|
@@ -84,11 +84,15 @@ EXPORT_SYMBOL_GPL(pm_power_off);
|
|
|
void (*pm_restart)(const char *cmd);
|
|
|
EXPORT_SYMBOL_GPL(pm_restart);
|
|
|
|
|
|
+void arch_cpu_idle_prepare(void)
|
|
|
+{
|
|
|
+ local_fiq_enable();
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* This is our default idle handler.
|
|
|
*/
|
|
|
-static void default_idle(void)
|
|
|
+void arch_cpu_idle(void)
|
|
|
{
|
|
|
/*
|
|
|
* This should do all the clock switching and wait for interrupt
|
|
@@ -98,43 +102,6 @@ static void default_idle(void)
|
|
|
local_irq_enable();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * The idle thread.
|
|
|
- * We always respect 'hlt_counter' to prevent low power idle.
|
|
|
- */
|
|
|
-void cpu_idle(void)
|
|
|
-{
|
|
|
- local_fiq_enable();
|
|
|
-
|
|
|
- /* endless idle loop with no priority at all */
|
|
|
- while (1) {
|
|
|
- tick_nohz_idle_enter();
|
|
|
- rcu_idle_enter();
|
|
|
- while (!need_resched()) {
|
|
|
- /*
|
|
|
- * We need to disable interrupts here to ensure
|
|
|
- * we don't miss a wakeup call.
|
|
|
- */
|
|
|
- local_irq_disable();
|
|
|
- if (!need_resched()) {
|
|
|
- stop_critical_timings();
|
|
|
- default_idle();
|
|
|
- start_critical_timings();
|
|
|
- /*
|
|
|
- * default_idle functions should always return
|
|
|
- * with IRQs enabled.
|
|
|
- */
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
- } else {
|
|
|
- local_irq_enable();
|
|
|
- }
|
|
|
- }
|
|
|
- rcu_idle_exit();
|
|
|
- tick_nohz_idle_exit();
|
|
|
- schedule_preempt_disabled();
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
void machine_shutdown(void)
|
|
|
{
|
|
|
#ifdef CONFIG_SMP
|