summaryrefslogtreecommitdiff
path: root/power
diff options
context:
space:
mode:
Diffstat (limited to 'power')
-rw-r--r--power/intel_x86.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/power/intel_x86.c b/power/intel_x86.c
index a4de71ff5f..8457c3bb0a 100644
--- a/power/intel_x86.c
+++ b/power/intel_x86.c
@@ -340,23 +340,35 @@ void chipset_throttle_cpu(int throttle)
enum power_state power_chipset_init(void)
{
+ CPRINTS("%s: power_signal=0x%x", __func__, power_get_signals());
+
+ if (!system_jumped_to_this_image())
+ return POWER_G3;
/*
- * If we're switching between images without rebooting, see if the x86
- * is already powered on; if so, leave it there instead of cycling
- * through G3.
+ * We are here as RW. We need to handle the following cases:
+ *
+ * 1. Late sysjump by software sync. AP is in S0.
+ * 2. Shutting down in recovery mode then sysjump by EFS2. AP is in S5
+ * and expected to sequence down.
+ * 3. Rebooting from recovery mode then sysjump by EFS2. AP is in S5
+ * and expected to sequence up.
+ * 4. RO jumps to RW from main() by EFS2. (a.k.a. power on reset, cold
+ * reset). AP is in G3.
*/
- if (system_jumped_to_this_image()) {
- if ((power_get_signals() & IN_ALL_S0) == IN_ALL_S0) {
- /* Disable idle task deep sleep when in S0. */
- disable_sleep(SLEEP_MASK_AP_RUN);
- CPRINTS("already in S0");
- return POWER_S0;
- }
-
- /* Force all signals to their G3 states */
- chipset_force_g3();
+ if ((power_get_signals() & IN_ALL_S0) == IN_ALL_S0) {
+ /* case #1. Disable idle task deep sleep when in S0. */
+ disable_sleep(SLEEP_MASK_AP_RUN);
+ CPRINTS("already in S0");
+ return POWER_S0;
}
-
+ if ((power_get_signals() & CHIPSET_G3S5_POWERUP_SIGNAL)
+ == CHIPSET_G3S5_POWERUP_SIGNAL) {
+ /* case #2 & #3 */
+ CPRINTS("already in S5");
+ return POWER_S5;
+ }
+ /* case #4 */
+ chipset_force_g3();
return POWER_G3;
}