|
@@ -14,7 +14,6 @@
|
|
|
* This file handles the architecture-dependent parts of process handling..
|
|
|
*/
|
|
|
|
|
|
-#include <linux/stackprotector.h>
|
|
|
#include <linux/cpu.h>
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/sched.h>
|
|
@@ -32,12 +31,10 @@
|
|
|
#include <linux/notifier.h>
|
|
|
#include <linux/kprobes.h>
|
|
|
#include <linux/kdebug.h>
|
|
|
-#include <linux/tick.h>
|
|
|
#include <linux/prctl.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
#include <linux/io.h>
|
|
|
#include <linux/ftrace.h>
|
|
|
-#include <linux/cpuidle.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
#include <asm/system.h>
|
|
@@ -52,114 +49,10 @@
|
|
|
#include <asm/idle.h>
|
|
|
#include <asm/syscalls.h>
|
|
|
#include <asm/debugreg.h>
|
|
|
-#include <asm/nmi.h>
|
|
|
|
|
|
asmlinkage extern void ret_from_fork(void);
|
|
|
|
|
|
DEFINE_PER_CPU(unsigned long, old_rsp);
|
|
|
-static DEFINE_PER_CPU(unsigned char, is_idle);
|
|
|
-
|
|
|
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
|
|
-
|
|
|
-void idle_notifier_register(struct notifier_block *n)
|
|
|
-{
|
|
|
- atomic_notifier_chain_register(&idle_notifier, n);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(idle_notifier_register);
|
|
|
-
|
|
|
-void idle_notifier_unregister(struct notifier_block *n)
|
|
|
-{
|
|
|
- atomic_notifier_chain_unregister(&idle_notifier, n);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
|
|
|
-
|
|
|
-void enter_idle(void)
|
|
|
-{
|
|
|
- percpu_write(is_idle, 1);
|
|
|
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
|
|
|
-}
|
|
|
-
|
|
|
-static void __exit_idle(void)
|
|
|
-{
|
|
|
- if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
|
|
|
- return;
|
|
|
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
|
|
|
-}
|
|
|
-
|
|
|
-/* Called from interrupts to signify idle end */
|
|
|
-void exit_idle(void)
|
|
|
-{
|
|
|
- /* idle loop has pid 0 */
|
|
|
- if (current->pid)
|
|
|
- return;
|
|
|
- __exit_idle();
|
|
|
-}
|
|
|
-
|
|
|
-#ifndef CONFIG_SMP
|
|
|
-static inline void play_dead(void)
|
|
|
-{
|
|
|
- BUG();
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-/*
|
|
|
- * The idle thread. There's no useful work to be
|
|
|
- * done, so just try to conserve power and have a
|
|
|
- * low exit latency (ie sit in a loop waiting for
|
|
|
- * somebody to say that they'd like to reschedule)
|
|
|
- */
|
|
|
-void cpu_idle(void)
|
|
|
-{
|
|
|
- current_thread_info()->status |= TS_POLLING;
|
|
|
-
|
|
|
- /*
|
|
|
- * If we're the non-boot CPU, nothing set the stack canary up
|
|
|
- * for us. CPU0 already has it initialized but no harm in
|
|
|
- * doing it again. This is a good place for updating it, as
|
|
|
- * we wont ever return from this function (so the invalid
|
|
|
- * canaries already on the stack wont ever trigger).
|
|
|
- */
|
|
|
- boot_init_stack_canary();
|
|
|
-
|
|
|
- /* endless idle loop with no priority at all */
|
|
|
- while (1) {
|
|
|
- tick_nohz_idle_enter();
|
|
|
- while (!need_resched()) {
|
|
|
-
|
|
|
- rmb();
|
|
|
-
|
|
|
- if (cpu_is_offline(smp_processor_id()))
|
|
|
- play_dead();
|
|
|
- /*
|
|
|
- * Idle routines should keep interrupts disabled
|
|
|
- * from here on, until they go to idle.
|
|
|
- * Otherwise, idle callbacks can misfire.
|
|
|
- */
|
|
|
- local_touch_nmi();
|
|
|
- local_irq_disable();
|
|
|
- enter_idle();
|
|
|
- /* Don't trace irqs off for idle */
|
|
|
- stop_critical_timings();
|
|
|
-
|
|
|
- /* enter_idle() needs rcu for notifiers */
|
|
|
- rcu_idle_enter();
|
|
|
-
|
|
|
- if (cpuidle_idle_call())
|
|
|
- pm_idle();
|
|
|
-
|
|
|
- rcu_idle_exit();
|
|
|
- start_critical_timings();
|
|
|
-
|
|
|
- /* In many cases the interrupt that ended idle
|
|
|
- has already called exit_idle. But some idle
|
|
|
- loops can be woken up without interrupt. */
|
|
|
- __exit_idle();
|
|
|
- }
|
|
|
-
|
|
|
- tick_nohz_idle_exit();
|
|
|
- schedule_preempt_disabled();
|
|
|
- }
|
|
|
-}
|
|
|
|
|
|
/* Prints also some state that isn't saved in the pt_regs */
|
|
|
void __show_regs(struct pt_regs *regs, int all)
|