|
@@ -111,12 +111,10 @@ void default_idle(void)
|
|
*/
|
|
*/
|
|
smp_mb();
|
|
smp_mb();
|
|
|
|
|
|
- local_irq_disable();
|
|
|
|
- if (!need_resched()) {
|
|
|
|
|
|
+ if (!need_resched())
|
|
safe_halt(); /* enables interrupts racelessly */
|
|
safe_halt(); /* enables interrupts racelessly */
|
|
- local_irq_disable();
|
|
|
|
- }
|
|
|
|
- local_irq_enable();
|
|
|
|
|
|
+ else
|
|
|
|
+ local_irq_enable();
|
|
current_thread_info()->status |= TS_POLLING;
|
|
current_thread_info()->status |= TS_POLLING;
|
|
} else {
|
|
} else {
|
|
local_irq_enable();
|
|
local_irq_enable();
|
|
@@ -128,17 +126,6 @@ void default_idle(void)
|
|
EXPORT_SYMBOL(default_idle);
|
|
EXPORT_SYMBOL(default_idle);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-/*
|
|
|
|
- * On SMP it's slightly faster (but much more power-consuming!)
|
|
|
|
- * to poll the ->work.need_resched flag instead of waiting for the
|
|
|
|
- * cross-CPU IPI to arrive. Use this option with caution.
|
|
|
|
- */
|
|
|
|
-static void poll_idle(void)
|
|
|
|
-{
|
|
|
|
- local_irq_enable();
|
|
|
|
- cpu_relax();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#include <asm/nmi.h>
|
|
#include <asm/nmi.h>
|
|
/* We don't actually take CPU down, just spin without interrupts. */
|
|
/* We don't actually take CPU down, just spin without interrupts. */
|
|
@@ -196,6 +183,7 @@ void cpu_idle(void)
|
|
if (cpu_is_offline(cpu))
|
|
if (cpu_is_offline(cpu))
|
|
play_dead();
|
|
play_dead();
|
|
|
|
|
|
|
|
+ local_irq_disable();
|
|
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
|
|
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
|
|
idle();
|
|
idle();
|
|
}
|
|
}
|
|
@@ -206,104 +194,6 @@ void cpu_idle(void)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void do_nothing(void *unused)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
|
|
|
|
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
|
|
|
|
- * handler on SMP systems.
|
|
|
|
- *
|
|
|
|
- * Caller must have changed pm_idle to the new value before the call. Old
|
|
|
|
- * pm_idle value will not be used by any CPU after the return of this function.
|
|
|
|
- */
|
|
|
|
-void cpu_idle_wait(void)
|
|
|
|
-{
|
|
|
|
- smp_mb();
|
|
|
|
- /* kick all the CPUs so that they exit out of pm_idle */
|
|
|
|
- smp_call_function(do_nothing, NULL, 0, 1);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
|
|
|
|
- * which can obviate IPI to trigger checking of need_resched.
|
|
|
|
- * We execute MONITOR against need_resched and enter optimized wait state
|
|
|
|
- * through MWAIT. Whenever someone changes need_resched, we would be woken
|
|
|
|
- * up from MWAIT (without an IPI).
|
|
|
|
- *
|
|
|
|
- * New with Core Duo processors, MWAIT can take some hints based on CPU
|
|
|
|
- * capability.
|
|
|
|
- */
|
|
|
|
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
|
|
|
-{
|
|
|
|
- if (!need_resched()) {
|
|
|
|
- __monitor((void *)¤t_thread_info()->flags, 0, 0);
|
|
|
|
- smp_mb();
|
|
|
|
- if (!need_resched())
|
|
|
|
- __sti_mwait(ax, cx);
|
|
|
|
- else
|
|
|
|
- local_irq_enable();
|
|
|
|
- } else
|
|
|
|
- local_irq_enable();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
|
|
|
|
-static void mwait_idle(void)
|
|
|
|
-{
|
|
|
|
- local_irq_enable();
|
|
|
|
- mwait_idle_with_hints(0, 0);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
|
|
|
|
-{
|
|
|
|
- if (force_mwait)
|
|
|
|
- return 1;
|
|
|
|
- /* Any C1 states supported? */
|
|
|
|
- return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
|
|
|
|
-{
|
|
|
|
- static int selected;
|
|
|
|
-
|
|
|
|
- if (selected)
|
|
|
|
- return;
|
|
|
|
-#ifdef CONFIG_X86_SMP
|
|
|
|
- if (pm_idle == poll_idle && smp_num_siblings > 1) {
|
|
|
|
- printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
|
|
|
|
- " performance may degrade.\n");
|
|
|
|
- }
|
|
|
|
-#endif
|
|
|
|
- if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
|
|
|
|
- /*
|
|
|
|
- * Skip, if setup has overridden idle.
|
|
|
|
- * One CPU supports mwait => All CPUs supports mwait
|
|
|
|
- */
|
|
|
|
- if (!pm_idle) {
|
|
|
|
- printk(KERN_INFO "using mwait in idle threads.\n");
|
|
|
|
- pm_idle = mwait_idle;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- selected = 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int __init idle_setup(char *str)
|
|
|
|
-{
|
|
|
|
- if (!strcmp(str, "poll")) {
|
|
|
|
- printk("using polling idle threads.\n");
|
|
|
|
- pm_idle = poll_idle;
|
|
|
|
- } else if (!strcmp(str, "mwait"))
|
|
|
|
- force_mwait = 1;
|
|
|
|
- else
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
- boot_option_idle_override = 1;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-early_param("idle", idle_setup);
|
|
|
|
-
|
|
|
|
void __show_registers(struct pt_regs *regs, int all)
|
|
void __show_registers(struct pt_regs *regs, int all)
|
|
{
|
|
{
|
|
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|
|
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|