|
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
|
|
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
|
|
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void acpi_safe_halt(void)
|
|
|
|
+{
|
|
|
|
+ current_thread_info()->status &= ~TS_POLLING;
|
|
|
|
+ /*
|
|
|
|
+ * TS_POLLING-cleared state must be visible before we
|
|
|
|
+ * test NEED_RESCHED:
|
|
|
|
+ */
|
|
|
|
+ smp_mb();
|
|
|
|
+ if (!need_resched())
|
|
|
|
+ safe_halt();
|
|
|
|
+ current_thread_info()->status |= TS_POLLING;
|
|
|
|
+}
|
|
|
|
+
|
|
#ifndef CONFIG_CPU_IDLE
|
|
#ifndef CONFIG_CPU_IDLE
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
-static void acpi_safe_halt(void)
|
|
|
|
-{
|
|
|
|
- current_thread_info()->status &= ~TS_POLLING;
|
|
|
|
- /*
|
|
|
|
- * TS_POLLING-cleared state must be visible before we
|
|
|
|
- * test NEED_RESCHED:
|
|
|
|
- */
|
|
|
|
- smp_mb();
|
|
|
|
- if (!need_resched())
|
|
|
|
- safe_halt();
|
|
|
|
- current_thread_info()->status |= TS_POLLING;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static atomic_t c3_cpu_count;
|
|
static atomic_t c3_cpu_count;
|
|
|
|
|
|
/* Common C-state entry for C2, C3, .. */
|
|
/* Common C-state entry for C2, C3, .. */
|
|
@@ -1385,15 +1385,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|
if (pr->flags.bm_check)
|
|
if (pr->flags.bm_check)
|
|
acpi_idle_update_bm_rld(pr, cx);
|
|
acpi_idle_update_bm_rld(pr, cx);
|
|
|
|
|
|
- current_thread_info()->status &= ~TS_POLLING;
|
|
|
|
- /*
|
|
|
|
- * TS_POLLING-cleared state must be visible before we test
|
|
|
|
- * NEED_RESCHED:
|
|
|
|
- */
|
|
|
|
- smp_mb();
|
|
|
|
- if (!need_resched())
|
|
|
|
- safe_halt();
|
|
|
|
- current_thread_info()->status |= TS_POLLING;
|
|
|
|
|
|
+ acpi_safe_halt();
|
|
|
|
|
|
cx->usage++;
|
|
cx->usage++;
|
|
|
|
|
|
@@ -1493,6 +1485,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|
if (acpi_idle_suspend)
|
|
if (acpi_idle_suspend)
|
|
return(acpi_idle_enter_c1(dev, state));
|
|
return(acpi_idle_enter_c1(dev, state));
|
|
|
|
|
|
|
|
+ if (acpi_idle_bm_check()) {
|
|
|
|
+ if (dev->safe_state) {
|
|
|
|
+ return dev->safe_state->enter(dev, dev->safe_state);
|
|
|
|
+ } else {
|
|
|
|
+ acpi_safe_halt();
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
local_irq_disable();
|
|
local_irq_disable();
|
|
current_thread_info()->status &= ~TS_POLLING;
|
|
current_thread_info()->status &= ~TS_POLLING;
|
|
/*
|
|
/*
|
|
@@ -1515,49 +1516,39 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|
*/
|
|
*/
|
|
acpi_state_timer_broadcast(pr, cx, 1);
|
|
acpi_state_timer_broadcast(pr, cx, 1);
|
|
|
|
|
|
- if (acpi_idle_bm_check()) {
|
|
|
|
- cx = pr->power.bm_state;
|
|
|
|
-
|
|
|
|
- acpi_idle_update_bm_rld(pr, cx);
|
|
|
|
-
|
|
|
|
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
- acpi_idle_do_entry(cx);
|
|
|
|
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
- } else {
|
|
|
|
- acpi_idle_update_bm_rld(pr, cx);
|
|
|
|
|
|
+ acpi_idle_update_bm_rld(pr, cx);
|
|
|
|
|
|
- /*
|
|
|
|
- * disable bus master
|
|
|
|
- * bm_check implies we need ARB_DIS
|
|
|
|
- * !bm_check implies we need cache flush
|
|
|
|
- * bm_control implies whether we can do ARB_DIS
|
|
|
|
- *
|
|
|
|
- * That leaves a case where bm_check is set and bm_control is
|
|
|
|
- * not set. In that case we cannot do much, we enter C3
|
|
|
|
- * without doing anything.
|
|
|
|
- */
|
|
|
|
- if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
|
|
- spin_lock(&c3_lock);
|
|
|
|
- c3_cpu_count++;
|
|
|
|
- /* Disable bus master arbitration when all CPUs are in C3 */
|
|
|
|
- if (c3_cpu_count == num_online_cpus())
|
|
|
|
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
|
|
|
- spin_unlock(&c3_lock);
|
|
|
|
- } else if (!pr->flags.bm_check) {
|
|
|
|
- ACPI_FLUSH_CPU_CACHE();
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * disable bus master
|
|
|
|
+ * bm_check implies we need ARB_DIS
|
|
|
|
+ * !bm_check implies we need cache flush
|
|
|
|
+ * bm_control implies whether we can do ARB_DIS
|
|
|
|
+ *
|
|
|
|
+ * That leaves a case where bm_check is set and bm_control is
|
|
|
|
+ * not set. In that case we cannot do much, we enter C3
|
|
|
|
+ * without doing anything.
|
|
|
|
+ */
|
|
|
|
+ if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
|
|
+ spin_lock(&c3_lock);
|
|
|
|
+ c3_cpu_count++;
|
|
|
|
+ /* Disable bus master arbitration when all CPUs are in C3 */
|
|
|
|
+ if (c3_cpu_count == num_online_cpus())
|
|
|
|
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
|
|
|
+ spin_unlock(&c3_lock);
|
|
|
|
+ } else if (!pr->flags.bm_check) {
|
|
|
|
+ ACPI_FLUSH_CPU_CACHE();
|
|
|
|
+ }
|
|
|
|
|
|
- t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
- acpi_idle_do_entry(cx);
|
|
|
|
- t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
|
|
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
+ acpi_idle_do_entry(cx);
|
|
|
|
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
|
|
|
|
|
- /* Re-enable bus master arbitration */
|
|
|
|
- if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
|
|
- spin_lock(&c3_lock);
|
|
|
|
- acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
|
|
|
- c3_cpu_count--;
|
|
|
|
- spin_unlock(&c3_lock);
|
|
|
|
- }
|
|
|
|
|
|
+ /* Re-enable bus master arbitration */
|
|
|
|
+ if (pr->flags.bm_check && pr->flags.bm_control) {
|
|
|
|
+ spin_lock(&c3_lock);
|
|
|
|
+ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
|
|
|
+ c3_cpu_count--;
|
|
|
|
+ spin_unlock(&c3_lock);
|
|
}
|
|
}
|
|
|
|
|
|
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
|
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
|
@@ -1626,12 +1617,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|
case ACPI_STATE_C1:
|
|
case ACPI_STATE_C1:
|
|
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
|
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
|
state->enter = acpi_idle_enter_c1;
|
|
state->enter = acpi_idle_enter_c1;
|
|
|
|
+ dev->safe_state = state;
|
|
break;
|
|
break;
|
|
|
|
|
|
case ACPI_STATE_C2:
|
|
case ACPI_STATE_C2:
|
|
state->flags |= CPUIDLE_FLAG_BALANCED;
|
|
state->flags |= CPUIDLE_FLAG_BALANCED;
|
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
|
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
|
state->enter = acpi_idle_enter_simple;
|
|
state->enter = acpi_idle_enter_simple;
|
|
|
|
+ dev->safe_state = state;
|
|
break;
|
|
break;
|
|
|
|
|
|
case ACPI_STATE_C3:
|
|
case ACPI_STATE_C3:
|
|
@@ -1652,14 +1645,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
|
if (!count)
|
|
if (!count)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- /* find the deepest state that can handle active BM */
|
|
|
|
- if (pr->flags.bm_check) {
|
|
|
|
- for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
|
|
|
|
- if (pr->power.states[i].type == ACPI_STATE_C3)
|
|
|
|
- break;
|
|
|
|
- pr->power.bm_state = &pr->power.states[i-1];
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|