|
@@ -301,13 +301,7 @@ void exit_idle(void)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
- * The idle thread. There's no useful work to be
|
|
|
- * done, so just try to conserve power and have a
|
|
|
- * low exit latency (ie sit in a loop waiting for
|
|
|
- * somebody to say that they'd like to reschedule)
|
|
|
- */
|
|
|
-void cpu_idle(void)
|
|
|
+void arch_cpu_idle_prepare(void)
|
|
|
{
|
|
|
/*
|
|
|
* If we're the non-boot CPU, nothing set the stack canary up
|
|
@@ -317,71 +311,40 @@ void cpu_idle(void)
|
|
|
* canaries already on the stack wont ever trigger).
|
|
|
*/
|
|
|
boot_init_stack_canary();
|
|
|
- current_thread_info()->status |= TS_POLLING;
|
|
|
-
|
|
|
- while (1) {
|
|
|
- tick_nohz_idle_enter();
|
|
|
-
|
|
|
- while (!need_resched()) {
|
|
|
- rmb();
|
|
|
-
|
|
|
- if (cpu_is_offline(smp_processor_id()))
|
|
|
- play_dead();
|
|
|
-
|
|
|
- /*
|
|
|
- * Idle routines should keep interrupts disabled
|
|
|
- * from here on, until they go to idle.
|
|
|
- * Otherwise, idle callbacks can misfire.
|
|
|
- */
|
|
|
- local_touch_nmi();
|
|
|
- local_irq_disable();
|
|
|
-
|
|
|
- enter_idle();
|
|
|
-
|
|
|
- /* Don't trace irqs off for idle */
|
|
|
- stop_critical_timings();
|
|
|
-
|
|
|
- /* enter_idle() needs rcu for notifiers */
|
|
|
- rcu_idle_enter();
|
|
|
+}
|
|
|
|
|
|
- if (cpuidle_idle_call())
|
|
|
- x86_idle();
|
|
|
+void arch_cpu_idle_enter(void)
|
|
|
+{
|
|
|
+ local_touch_nmi();
|
|
|
+ enter_idle();
|
|
|
+}
|
|
|
|
|
|
- rcu_idle_exit();
|
|
|
- start_critical_timings();
|
|
|
+void arch_cpu_idle_exit(void)
|
|
|
+{
|
|
|
+ __exit_idle();
|
|
|
+}
|
|
|
|
|
|
- /* In many cases the interrupt that ended idle
|
|
|
- has already called exit_idle. But some idle
|
|
|
- loops can be woken up without interrupt. */
|
|
|
- __exit_idle();
|
|
|
- }
|
|
|
+void arch_cpu_idle_dead(void)
|
|
|
+{
|
|
|
+ play_dead();
|
|
|
+}
|
|
|
|
|
|
- tick_nohz_idle_exit();
|
|
|
- preempt_enable_no_resched();
|
|
|
- schedule();
|
|
|
- preempt_disable();
|
|
|
- }
|
|
|
+/*
|
|
|
+ * Called from the generic idle code.
|
|
|
+ */
|
|
|
+void arch_cpu_idle(void)
|
|
|
+{
|
|
|
+ if (cpuidle_idle_call())
|
|
|
+ x86_idle();
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * We use this if we don't have any better
|
|
|
- * idle routine..
|
|
|
+ * We use this if we don't have any better idle routine..
|
|
|
*/
|
|
|
void default_idle(void)
|
|
|
{
|
|
|
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
|
|
- current_thread_info()->status &= ~TS_POLLING;
|
|
|
- /*
|
|
|
- * TS_POLLING-cleared state must be visible before we
|
|
|
- * test NEED_RESCHED:
|
|
|
- */
|
|
|
- smp_mb();
|
|
|
-
|
|
|
- if (!need_resched())
|
|
|
- safe_halt(); /* enables interrupts racelessly */
|
|
|
- else
|
|
|
- local_irq_enable();
|
|
|
- current_thread_info()->status |= TS_POLLING;
|
|
|
+ safe_halt();
|
|
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
|
}
|
|
|
#ifdef CONFIG_APM_MODULE
|
|
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
|
|
|
halt();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * On SMP it's slightly faster (but much more power-consuming!)
|
|
|
- * to poll the ->work.need_resched flag instead of waiting for the
|
|
|
- * cross-CPU IPI to arrive. Use this option with caution.
|
|
|
- */
|
|
|
-static void poll_idle(void)
|
|
|
-{
|
|
|
- trace_cpu_idle_rcuidle(0, smp_processor_id());
|
|
|
- local_irq_enable();
|
|
|
- while (!need_resched())
|
|
|
- cpu_relax();
|
|
|
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
|
-}
|
|
|
-
|
|
|
bool amd_e400_c1e_detected;
|
|
|
EXPORT_SYMBOL(amd_e400_c1e_detected);
|
|
|
|
|
@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
|
|
|
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
|
|
|
{
|
|
|
#ifdef CONFIG_SMP
|
|
|
- if (x86_idle == poll_idle && smp_num_siblings > 1)
|
|
|
+ if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
|
|
|
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
|
|
|
#endif
|
|
|
- if (x86_idle)
|
|
|
+ if (x86_idle || boot_option_idle_override == IDLE_POLL)
|
|
|
return;
|
|
|
|
|
|
if (cpu_has_amd_erratum(amd_erratum_400)) {
|
|
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
|
|
|
|
|
|
if (!strcmp(str, "poll")) {
|
|
|
pr_info("using polling idle threads\n");
|
|
|
- x86_idle = poll_idle;
|
|
|
boot_option_idle_override = IDLE_POLL;
|
|
|
+ cpu_idle_poll_ctrl(true);
|
|
|
} else if (!strcmp(str, "halt")) {
|
|
|
/*
|
|
|
* When the boot option of idle=halt is added, halt is
|