process.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <linux/errno.h>
  3. #include <linux/kernel.h>
  4. #include <linux/mm.h>
  5. #include <linux/smp.h>
  6. #include <linux/prctl.h>
  7. #include <linux/slab.h>
  8. #include <linux/sched.h>
  9. #include <linux/module.h>
  10. #include <linux/pm.h>
  11. #include <linux/clockchips.h>
  12. #include <linux/random.h>
  13. #include <linux/user-return-notifier.h>
  14. #include <linux/dmi.h>
  15. #include <linux/utsname.h>
  16. #include <linux/stackprotector.h>
  17. #include <linux/tick.h>
  18. #include <linux/cpuidle.h>
  19. #include <trace/events/power.h>
  20. #include <linux/hw_breakpoint.h>
  21. #include <asm/cpu.h>
  22. #include <asm/apic.h>
  23. #include <asm/syscalls.h>
  24. #include <asm/idle.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/i387.h>
  27. #include <asm/fpu-internal.h>
  28. #include <asm/debugreg.h>
  29. #include <asm/nmi.h>
  30. /*
  31. * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  32. * no more per-task TSS's. The TSS size is kept cacheline-aligned
  33. * so they are allowed to end up in the .data..cacheline_aligned
  34. * section. Since TSS's are completely CPU-local, we want them
  35. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  36. */
  37. DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
  38. #ifdef CONFIG_X86_64
  39. static DEFINE_PER_CPU(unsigned char, is_idle);
  40. static ATOMIC_NOTIFIER_HEAD(idle_notifier);
  41. void idle_notifier_register(struct notifier_block *n)
  42. {
  43. atomic_notifier_chain_register(&idle_notifier, n);
  44. }
  45. EXPORT_SYMBOL_GPL(idle_notifier_register);
  46. void idle_notifier_unregister(struct notifier_block *n)
  47. {
  48. atomic_notifier_chain_unregister(&idle_notifier, n);
  49. }
  50. EXPORT_SYMBOL_GPL(idle_notifier_unregister);
  51. #endif
  52. struct kmem_cache *task_xstate_cachep;
  53. EXPORT_SYMBOL_GPL(task_xstate_cachep);
  54. /*
  55. * this gets called so that we can store lazy state into memory and copy the
  56. * current task into the new thread.
  57. */
  58. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  59. {
  60. int ret;
  61. *dst = *src;
  62. if (fpu_allocated(&src->thread.fpu)) {
  63. memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
  64. ret = fpu_alloc(&dst->thread.fpu);
  65. if (ret)
  66. return ret;
  67. fpu_copy(dst, src);
  68. }
  69. return 0;
  70. }
  71. void free_thread_xstate(struct task_struct *tsk)
  72. {
  73. fpu_free(&tsk->thread.fpu);
  74. }
  75. void arch_release_task_struct(struct task_struct *tsk)
  76. {
  77. free_thread_xstate(tsk);
  78. }
  79. void arch_task_cache_init(void)
  80. {
  81. task_xstate_cachep =
  82. kmem_cache_create("task_xstate", xstate_size,
  83. __alignof__(union thread_xstate),
  84. SLAB_PANIC | SLAB_NOTRACK, NULL);
  85. }
  86. /*
  87. * Free current thread data structures etc..
  88. */
  89. void exit_thread(void)
  90. {
  91. struct task_struct *me = current;
  92. struct thread_struct *t = &me->thread;
  93. unsigned long *bp = t->io_bitmap_ptr;
  94. if (bp) {
  95. struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
  96. t->io_bitmap_ptr = NULL;
  97. clear_thread_flag(TIF_IO_BITMAP);
  98. /*
  99. * Careful, clear this in the TSS too:
  100. */
  101. memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
  102. t->io_bitmap_max = 0;
  103. put_cpu();
  104. kfree(bp);
  105. }
  106. drop_fpu(me);
  107. }
  108. void flush_thread(void)
  109. {
  110. struct task_struct *tsk = current;
  111. flush_ptrace_hw_breakpoint(tsk);
  112. memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  113. drop_init_fpu(tsk);
  114. /*
  115. * Free the FPU state for non xsave platforms. They get reallocated
  116. * lazily at the first use.
  117. */
  118. if (!use_eager_fpu())
  119. free_thread_xstate(tsk);
  120. }
  121. static void hard_disable_TSC(void)
  122. {
  123. write_cr4(read_cr4() | X86_CR4_TSD);
  124. }
  125. void disable_TSC(void)
  126. {
  127. preempt_disable();
  128. if (!test_and_set_thread_flag(TIF_NOTSC))
  129. /*
  130. * Must flip the CPU state synchronously with
  131. * TIF_NOTSC in the current running context.
  132. */
  133. hard_disable_TSC();
  134. preempt_enable();
  135. }
  136. static void hard_enable_TSC(void)
  137. {
  138. write_cr4(read_cr4() & ~X86_CR4_TSD);
  139. }
  140. static void enable_TSC(void)
  141. {
  142. preempt_disable();
  143. if (test_and_clear_thread_flag(TIF_NOTSC))
  144. /*
  145. * Must flip the CPU state synchronously with
  146. * TIF_NOTSC in the current running context.
  147. */
  148. hard_enable_TSC();
  149. preempt_enable();
  150. }
  151. int get_tsc_mode(unsigned long adr)
  152. {
  153. unsigned int val;
  154. if (test_thread_flag(TIF_NOTSC))
  155. val = PR_TSC_SIGSEGV;
  156. else
  157. val = PR_TSC_ENABLE;
  158. return put_user(val, (unsigned int __user *)adr);
  159. }
  160. int set_tsc_mode(unsigned int val)
  161. {
  162. if (val == PR_TSC_SIGSEGV)
  163. disable_TSC();
  164. else if (val == PR_TSC_ENABLE)
  165. enable_TSC();
  166. else
  167. return -EINVAL;
  168. return 0;
  169. }
  170. void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  171. struct tss_struct *tss)
  172. {
  173. struct thread_struct *prev, *next;
  174. prev = &prev_p->thread;
  175. next = &next_p->thread;
  176. if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
  177. test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
  178. unsigned long debugctl = get_debugctlmsr();
  179. debugctl &= ~DEBUGCTLMSR_BTF;
  180. if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
  181. debugctl |= DEBUGCTLMSR_BTF;
  182. update_debugctlmsr(debugctl);
  183. }
  184. if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
  185. test_tsk_thread_flag(next_p, TIF_NOTSC)) {
  186. /* prev and next are different */
  187. if (test_tsk_thread_flag(next_p, TIF_NOTSC))
  188. hard_disable_TSC();
  189. else
  190. hard_enable_TSC();
  191. }
  192. if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
  193. /*
  194. * Copy the relevant range of the IO bitmap.
  195. * Normally this is 128 bytes or less:
  196. */
  197. memcpy(tss->io_bitmap, next->io_bitmap_ptr,
  198. max(prev->io_bitmap_max, next->io_bitmap_max));
  199. } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
  200. /*
  201. * Clear any possible leftover bits:
  202. */
  203. memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
  204. }
  205. propagate_user_return_notify(prev_p, next_p);
  206. }
  207. /*
  208. * Idle related variables and functions
  209. */
  210. unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
  211. EXPORT_SYMBOL(boot_option_idle_override);
  212. static void (*x86_idle)(void);
  213. #ifndef CONFIG_SMP
  214. static inline void play_dead(void)
  215. {
  216. BUG();
  217. }
  218. #endif
  219. #ifdef CONFIG_X86_64
  220. void enter_idle(void)
  221. {
  222. this_cpu_write(is_idle, 1);
  223. atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
  224. }
  225. static void __exit_idle(void)
  226. {
  227. if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
  228. return;
  229. atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
  230. }
  231. /* Called from interrupts to signify idle end */
  232. void exit_idle(void)
  233. {
  234. /* idle loop has pid 0 */
  235. if (current->pid)
  236. return;
  237. __exit_idle();
  238. }
  239. #endif
  240. void arch_cpu_idle_prepare(void)
  241. {
  242. /*
  243. * If we're the non-boot CPU, nothing set the stack canary up
  244. * for us. CPU0 already has it initialized but no harm in
  245. * doing it again. This is a good place for updating it, as
  246. * we wont ever return from this function (so the invalid
  247. * canaries already on the stack wont ever trigger).
  248. */
  249. boot_init_stack_canary();
  250. }
  251. void arch_cpu_idle_enter(void)
  252. {
  253. local_touch_nmi();
  254. enter_idle();
  255. }
  256. void arch_cpu_idle_exit(void)
  257. {
  258. __exit_idle();
  259. }
  260. void arch_cpu_idle_dead(void)
  261. {
  262. play_dead();
  263. }
  264. /*
  265. * Called from the generic idle code.
  266. */
  267. void arch_cpu_idle(void)
  268. {
  269. if (cpuidle_idle_call())
  270. x86_idle();
  271. }
  272. /*
  273. * We use this if we don't have any better idle routine..
  274. */
  275. void default_idle(void)
  276. {
  277. trace_cpu_idle_rcuidle(1, smp_processor_id());
  278. safe_halt();
  279. trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
  280. }
  281. #ifdef CONFIG_APM_MODULE
  282. EXPORT_SYMBOL(default_idle);
  283. #endif
  284. #ifdef CONFIG_XEN
  285. bool xen_set_default_idle(void)
  286. {
  287. bool ret = !!x86_idle;
  288. x86_idle = default_idle;
  289. return ret;
  290. }
  291. #endif
  292. void stop_this_cpu(void *dummy)
  293. {
  294. local_irq_disable();
  295. /*
  296. * Remove this CPU:
  297. */
  298. set_cpu_online(smp_processor_id(), false);
  299. disable_local_APIC();
  300. for (;;)
  301. halt();
  302. }
  303. bool amd_e400_c1e_detected;
  304. EXPORT_SYMBOL(amd_e400_c1e_detected);
  305. static cpumask_var_t amd_e400_c1e_mask;
  306. void amd_e400_remove_cpu(int cpu)
  307. {
  308. if (amd_e400_c1e_mask != NULL)
  309. cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
  310. }
  311. /*
  312. * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
  313. * pending message MSR. If we detect C1E, then we handle it the same
  314. * way as C3 power states (local apic timer and TSC stop)
  315. */
  316. static void amd_e400_idle(void)
  317. {
  318. if (need_resched())
  319. return;
  320. if (!amd_e400_c1e_detected) {
  321. u32 lo, hi;
  322. rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
  323. if (lo & K8_INTP_C1E_ACTIVE_MASK) {
  324. amd_e400_c1e_detected = true;
  325. if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  326. mark_tsc_unstable("TSC halt in AMD C1E");
  327. pr_info("System has AMD C1E enabled\n");
  328. }
  329. }
  330. if (amd_e400_c1e_detected) {
  331. int cpu = smp_processor_id();
  332. if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
  333. cpumask_set_cpu(cpu, amd_e400_c1e_mask);
  334. /*
  335. * Force broadcast so ACPI can not interfere.
  336. */
  337. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
  338. &cpu);
  339. pr_info("Switch to broadcast mode on CPU%d\n", cpu);
  340. }
  341. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
  342. default_idle();
  343. /*
  344. * The switch back from broadcast mode needs to be
  345. * called with interrupts disabled.
  346. */
  347. local_irq_disable();
  348. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
  349. local_irq_enable();
  350. } else
  351. default_idle();
  352. }
  353. void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
  354. {
  355. #ifdef CONFIG_SMP
  356. if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
  357. pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
  358. #endif
  359. if (x86_idle || boot_option_idle_override == IDLE_POLL)
  360. return;
  361. if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
  362. /* E400: APIC timer interrupt does not wake up CPU from C1e */
  363. pr_info("using AMD E400 aware idle routine\n");
  364. x86_idle = amd_e400_idle;
  365. } else
  366. x86_idle = default_idle;
  367. }
  368. void __init init_amd_e400_c1e_mask(void)
  369. {
  370. /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
  371. if (x86_idle == amd_e400_idle)
  372. zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
  373. }
  374. static int __init idle_setup(char *str)
  375. {
  376. if (!str)
  377. return -EINVAL;
  378. if (!strcmp(str, "poll")) {
  379. pr_info("using polling idle threads\n");
  380. boot_option_idle_override = IDLE_POLL;
  381. cpu_idle_poll_ctrl(true);
  382. } else if (!strcmp(str, "halt")) {
  383. /*
  384. * When the boot option of idle=halt is added, halt is
  385. * forced to be used for CPU idle. In such case CPU C2/C3
  386. * won't be used again.
  387. * To continue to load the CPU idle driver, don't touch
  388. * the boot_option_idle_override.
  389. */
  390. x86_idle = default_idle;
  391. boot_option_idle_override = IDLE_HALT;
  392. } else if (!strcmp(str, "nomwait")) {
  393. /*
  394. * If the boot option of "idle=nomwait" is added,
  395. * it means that mwait will be disabled for CPU C2/C3
  396. * states. In such case it won't touch the variable
  397. * of boot_option_idle_override.
  398. */
  399. boot_option_idle_override = IDLE_NOMWAIT;
  400. } else
  401. return -1;
  402. return 0;
  403. }
  404. early_param("idle", idle_setup);
  405. unsigned long arch_align_stack(unsigned long sp)
  406. {
  407. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  408. sp -= get_random_int() % 8192;
  409. return sp & ~0xf;
  410. }
  411. unsigned long arch_randomize_brk(struct mm_struct *mm)
  412. {
  413. unsigned long range_end = mm->brk + 0x02000000;
  414. return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  415. }