process.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. #include <linux/errno.h>
  2. #include <linux/kernel.h>
  3. #include <linux/mm.h>
  4. #include <linux/smp.h>
  5. #include <linux/prctl.h>
  6. #include <linux/slab.h>
  7. #include <linux/sched.h>
  8. #include <linux/module.h>
  9. #include <linux/pm.h>
  10. #include <linux/clockchips.h>
  11. #include <trace/power.h>
  12. #include <asm/system.h>
  13. #include <asm/apic.h>
  14. #include <asm/idle.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/i387.h>
  17. unsigned long idle_halt;
  18. EXPORT_SYMBOL(idle_halt);
  19. unsigned long idle_nomwait;
  20. EXPORT_SYMBOL(idle_nomwait);
  21. struct kmem_cache *task_xstate_cachep;
  22. DEFINE_TRACE(power_start);
  23. DEFINE_TRACE(power_end);
  24. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  25. {
  26. *dst = *src;
  27. if (src->thread.xstate) {
  28. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  29. GFP_KERNEL);
  30. if (!dst->thread.xstate)
  31. return -ENOMEM;
  32. WARN_ON((unsigned long)dst->thread.xstate & 15);
  33. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  34. }
  35. return 0;
  36. }
  37. void free_thread_xstate(struct task_struct *tsk)
  38. {
  39. if (tsk->thread.xstate) {
  40. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  41. tsk->thread.xstate = NULL;
  42. }
  43. }
  44. void free_thread_info(struct thread_info *ti)
  45. {
  46. free_thread_xstate(ti->task);
  47. free_pages((unsigned long)ti, get_order(THREAD_SIZE));
  48. }
  49. void arch_task_cache_init(void)
  50. {
  51. task_xstate_cachep =
  52. kmem_cache_create("task_xstate", xstate_size,
  53. __alignof__(union thread_xstate),
  54. SLAB_PANIC, NULL);
  55. }
  56. /*
  57. * Free current thread data structures etc..
  58. */
  59. void exit_thread(void)
  60. {
  61. struct task_struct *me = current;
  62. struct thread_struct *t = &me->thread;
  63. if (me->thread.io_bitmap_ptr) {
  64. struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
  65. kfree(t->io_bitmap_ptr);
  66. t->io_bitmap_ptr = NULL;
  67. clear_thread_flag(TIF_IO_BITMAP);
  68. /*
  69. * Careful, clear this in the TSS too:
  70. */
  71. memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
  72. t->io_bitmap_max = 0;
  73. put_cpu();
  74. }
  75. ds_exit_thread(current);
  76. }
  77. void flush_thread(void)
  78. {
  79. struct task_struct *tsk = current;
  80. #ifdef CONFIG_X86_64
  81. if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
  82. clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
  83. if (test_tsk_thread_flag(tsk, TIF_IA32)) {
  84. clear_tsk_thread_flag(tsk, TIF_IA32);
  85. } else {
  86. set_tsk_thread_flag(tsk, TIF_IA32);
  87. current_thread_info()->status |= TS_COMPAT;
  88. }
  89. }
  90. #endif
  91. clear_tsk_thread_flag(tsk, TIF_DEBUG);
  92. tsk->thread.debugreg0 = 0;
  93. tsk->thread.debugreg1 = 0;
  94. tsk->thread.debugreg2 = 0;
  95. tsk->thread.debugreg3 = 0;
  96. tsk->thread.debugreg6 = 0;
  97. tsk->thread.debugreg7 = 0;
  98. memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
  99. /*
  100. * Forget coprocessor state..
  101. */
  102. tsk->fpu_counter = 0;
  103. clear_fpu(tsk);
  104. clear_used_math();
  105. }
  106. static void hard_disable_TSC(void)
  107. {
  108. write_cr4(read_cr4() | X86_CR4_TSD);
  109. }
  110. void disable_TSC(void)
  111. {
  112. preempt_disable();
  113. if (!test_and_set_thread_flag(TIF_NOTSC))
  114. /*
  115. * Must flip the CPU state synchronously with
  116. * TIF_NOTSC in the current running context.
  117. */
  118. hard_disable_TSC();
  119. preempt_enable();
  120. }
  121. static void hard_enable_TSC(void)
  122. {
  123. write_cr4(read_cr4() & ~X86_CR4_TSD);
  124. }
  125. static void enable_TSC(void)
  126. {
  127. preempt_disable();
  128. if (test_and_clear_thread_flag(TIF_NOTSC))
  129. /*
  130. * Must flip the CPU state synchronously with
  131. * TIF_NOTSC in the current running context.
  132. */
  133. hard_enable_TSC();
  134. preempt_enable();
  135. }
  136. int get_tsc_mode(unsigned long adr)
  137. {
  138. unsigned int val;
  139. if (test_thread_flag(TIF_NOTSC))
  140. val = PR_TSC_SIGSEGV;
  141. else
  142. val = PR_TSC_ENABLE;
  143. return put_user(val, (unsigned int __user *)adr);
  144. }
  145. int set_tsc_mode(unsigned int val)
  146. {
  147. if (val == PR_TSC_SIGSEGV)
  148. disable_TSC();
  149. else if (val == PR_TSC_ENABLE)
  150. enable_TSC();
  151. else
  152. return -EINVAL;
  153. return 0;
  154. }
  155. void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  156. struct tss_struct *tss)
  157. {
  158. struct thread_struct *prev, *next;
  159. prev = &prev_p->thread;
  160. next = &next_p->thread;
  161. if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
  162. test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
  163. ds_switch_to(prev_p, next_p);
  164. else if (next->debugctlmsr != prev->debugctlmsr)
  165. update_debugctlmsr(next->debugctlmsr);
  166. if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
  167. set_debugreg(next->debugreg0, 0);
  168. set_debugreg(next->debugreg1, 1);
  169. set_debugreg(next->debugreg2, 2);
  170. set_debugreg(next->debugreg3, 3);
  171. /* no 4 and 5 */
  172. set_debugreg(next->debugreg6, 6);
  173. set_debugreg(next->debugreg7, 7);
  174. }
  175. if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
  176. test_tsk_thread_flag(next_p, TIF_NOTSC)) {
  177. /* prev and next are different */
  178. if (test_tsk_thread_flag(next_p, TIF_NOTSC))
  179. hard_disable_TSC();
  180. else
  181. hard_enable_TSC();
  182. }
  183. if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
  184. /*
  185. * Copy the relevant range of the IO bitmap.
  186. * Normally this is 128 bytes or less:
  187. */
  188. memcpy(tss->io_bitmap, next->io_bitmap_ptr,
  189. max(prev->io_bitmap_max, next->io_bitmap_max));
  190. } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
  191. /*
  192. * Clear any possible leftover bits:
  193. */
  194. memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
  195. }
  196. }
  197. int sys_fork(struct pt_regs *regs)
  198. {
  199. return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
  200. }
  201. /*
  202. * This is trivial, and on the face of it looks like it
  203. * could equally well be done in user mode.
  204. *
  205. * Not so, for quite unobvious reasons - register pressure.
  206. * In user mode vfork() cannot have a stack frame, and if
  207. * done by calling the "clone()" system call directly, you
  208. * do not have enough call-clobbered registers to hold all
  209. * the information you need.
  210. */
  211. int sys_vfork(struct pt_regs *regs)
  212. {
  213. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
  214. NULL, NULL);
  215. }
  216. /*
  217. * Idle related variables and functions
  218. */
  219. unsigned long boot_option_idle_override = 0;
  220. EXPORT_SYMBOL(boot_option_idle_override);
  221. /*
  222. * Powermanagement idle function, if any..
  223. */
  224. void (*pm_idle)(void);
  225. EXPORT_SYMBOL(pm_idle);
  226. #ifdef CONFIG_X86_32
  227. /*
  228. * This halt magic was a workaround for ancient floppy DMA
  229. * wreckage. It should be safe to remove.
  230. */
  231. static int hlt_counter;
  232. void disable_hlt(void)
  233. {
  234. hlt_counter++;
  235. }
  236. EXPORT_SYMBOL(disable_hlt);
  237. void enable_hlt(void)
  238. {
  239. hlt_counter--;
  240. }
  241. EXPORT_SYMBOL(enable_hlt);
  242. static inline int hlt_use_halt(void)
  243. {
  244. return (!hlt_counter && boot_cpu_data.hlt_works_ok);
  245. }
  246. #else
  247. static inline int hlt_use_halt(void)
  248. {
  249. return 1;
  250. }
  251. #endif
  252. /*
  253. * We use this if we don't have any better
  254. * idle routine..
  255. */
  256. void default_idle(void)
  257. {
  258. if (hlt_use_halt()) {
  259. struct power_trace it;
  260. trace_power_start(&it, POWER_CSTATE, 1);
  261. current_thread_info()->status &= ~TS_POLLING;
  262. /*
  263. * TS_POLLING-cleared state must be visible before we
  264. * test NEED_RESCHED:
  265. */
  266. smp_mb();
  267. if (!need_resched())
  268. safe_halt(); /* enables interrupts racelessly */
  269. else
  270. local_irq_enable();
  271. current_thread_info()->status |= TS_POLLING;
  272. trace_power_end(&it);
  273. } else {
  274. local_irq_enable();
  275. /* loop is done by the caller */
  276. cpu_relax();
  277. }
  278. }
  279. #ifdef CONFIG_APM_MODULE
  280. EXPORT_SYMBOL(default_idle);
  281. #endif
  282. void stop_this_cpu(void *dummy)
  283. {
  284. local_irq_disable();
  285. /*
  286. * Remove this CPU:
  287. */
  288. cpu_clear(smp_processor_id(), cpu_online_map);
  289. disable_local_APIC();
  290. for (;;) {
  291. if (hlt_works(smp_processor_id()))
  292. halt();
  293. }
  294. }
  295. static void do_nothing(void *unused)
  296. {
  297. }
  298. /*
  299. * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
  300. * pm_idle and update to new pm_idle value. Required while changing pm_idle
  301. * handler on SMP systems.
  302. *
  303. * Caller must have changed pm_idle to the new value before the call. Old
  304. * pm_idle value will not be used by any CPU after the return of this function.
  305. */
  306. void cpu_idle_wait(void)
  307. {
  308. smp_mb();
  309. /* kick all the CPUs so that they exit out of pm_idle */
  310. smp_call_function(do_nothing, NULL, 1);
  311. }
  312. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  313. /*
  314. * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  315. * which can obviate IPI to trigger checking of need_resched.
  316. * We execute MONITOR against need_resched and enter optimized wait state
  317. * through MWAIT. Whenever someone changes need_resched, we would be woken
  318. * up from MWAIT (without an IPI).
  319. *
  320. * New with Core Duo processors, MWAIT can take some hints based on CPU
  321. * capability.
  322. */
  323. void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
  324. {
  325. struct power_trace it;
  326. trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
  327. if (!need_resched()) {
  328. if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
  329. clflush((void *)&current_thread_info()->flags);
  330. __monitor((void *)&current_thread_info()->flags, 0, 0);
  331. smp_mb();
  332. if (!need_resched())
  333. __mwait(ax, cx);
  334. }
  335. trace_power_end(&it);
  336. }
  337. /* Default MONITOR/MWAIT with no hints, used for default C1 state */
  338. static void mwait_idle(void)
  339. {
  340. struct power_trace it;
  341. if (!need_resched()) {
  342. trace_power_start(&it, POWER_CSTATE, 1);
  343. if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
  344. clflush((void *)&current_thread_info()->flags);
  345. __monitor((void *)&current_thread_info()->flags, 0, 0);
  346. smp_mb();
  347. if (!need_resched())
  348. __sti_mwait(0, 0);
  349. else
  350. local_irq_enable();
  351. trace_power_end(&it);
  352. } else
  353. local_irq_enable();
  354. }
  355. /*
  356. * On SMP it's slightly faster (but much more power-consuming!)
  357. * to poll the ->work.need_resched flag instead of waiting for the
  358. * cross-CPU IPI to arrive. Use this option with caution.
  359. */
  360. static void poll_idle(void)
  361. {
  362. struct power_trace it;
  363. trace_power_start(&it, POWER_CSTATE, 0);
  364. local_irq_enable();
  365. while (!need_resched())
  366. cpu_relax();
  367. trace_power_end(&it);
  368. }
  369. /*
  370. * mwait selection logic:
  371. *
  372. * It depends on the CPU. For AMD CPUs that support MWAIT this is
  373. * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
  374. * then depend on a clock divisor and current Pstate of the core. If
  375. * all cores of a processor are in halt state (C1) the processor can
  376. * enter the C1E (C1 enhanced) state. If mwait is used this will never
  377. * happen.
  378. *
  379. * idle=mwait overrides this decision and forces the usage of mwait.
  380. */
  381. static int __cpuinitdata force_mwait;
  382. #define MWAIT_INFO 0x05
  383. #define MWAIT_ECX_EXTENDED_INFO 0x01
  384. #define MWAIT_EDX_C1 0xf0
  385. static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
  386. {
  387. u32 eax, ebx, ecx, edx;
  388. if (force_mwait)
  389. return 1;
  390. if (c->cpuid_level < MWAIT_INFO)
  391. return 0;
  392. cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
  393. /* Check, whether EDX has extended info about MWAIT */
  394. if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
  395. return 1;
  396. /*
  397. * edx enumeratios MONITOR/MWAIT extensions. Check, whether
  398. * C1 supports MWAIT
  399. */
  400. return (edx & MWAIT_EDX_C1);
  401. }
  402. /*
  403. * Check for AMD CPUs, which have potentially C1E support
  404. */
  405. static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
  406. {
  407. if (c->x86_vendor != X86_VENDOR_AMD)
  408. return 0;
  409. if (c->x86 < 0x0F)
  410. return 0;
  411. /* Family 0x0f models < rev F do not have C1E */
  412. if (c->x86 == 0x0f && c->x86_model < 0x40)
  413. return 0;
  414. return 1;
  415. }
  416. static cpumask_t c1e_mask = CPU_MASK_NONE;
  417. static int c1e_detected;
  418. void c1e_remove_cpu(int cpu)
  419. {
  420. cpu_clear(cpu, c1e_mask);
  421. }
  422. /*
  423. * C1E aware idle routine. We check for C1E active in the interrupt
  424. * pending message MSR. If we detect C1E, then we handle it the same
  425. * way as C3 power states (local apic timer and TSC stop)
  426. */
  427. static void c1e_idle(void)
  428. {
  429. if (need_resched())
  430. return;
  431. if (!c1e_detected) {
  432. u32 lo, hi;
  433. rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
  434. if (lo & K8_INTP_C1E_ACTIVE_MASK) {
  435. c1e_detected = 1;
  436. if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  437. mark_tsc_unstable("TSC halt in AMD C1E");
  438. printk(KERN_INFO "System has AMD C1E enabled\n");
  439. set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
  440. }
  441. }
  442. if (c1e_detected) {
  443. int cpu = smp_processor_id();
  444. if (!cpu_isset(cpu, c1e_mask)) {
  445. cpu_set(cpu, c1e_mask);
  446. /*
  447. * Force broadcast so ACPI can not interfere. Needs
  448. * to run with interrupts enabled as it uses
  449. * smp_function_call.
  450. */
  451. local_irq_enable();
  452. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
  453. &cpu);
  454. printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
  455. cpu);
  456. local_irq_disable();
  457. }
  458. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
  459. default_idle();
  460. /*
  461. * The switch back from broadcast mode needs to be
  462. * called with interrupts disabled.
  463. */
  464. local_irq_disable();
  465. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
  466. local_irq_enable();
  467. } else
  468. default_idle();
  469. }
  470. void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
  471. {
  472. #ifdef CONFIG_SMP
  473. if (pm_idle == poll_idle && smp_num_siblings > 1) {
  474. printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
  475. " performance may degrade.\n");
  476. }
  477. #endif
  478. if (pm_idle)
  479. return;
  480. if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
  481. /*
  482. * One CPU supports mwait => All CPUs supports mwait
  483. */
  484. printk(KERN_INFO "using mwait in idle threads.\n");
  485. pm_idle = mwait_idle;
  486. } else if (check_c1e_idle(c)) {
  487. printk(KERN_INFO "using C1E aware idle routine\n");
  488. pm_idle = c1e_idle;
  489. } else
  490. pm_idle = default_idle;
  491. }
  492. static int __init idle_setup(char *str)
  493. {
  494. if (!str)
  495. return -EINVAL;
  496. if (!strcmp(str, "poll")) {
  497. printk("using polling idle threads.\n");
  498. pm_idle = poll_idle;
  499. } else if (!strcmp(str, "mwait"))
  500. force_mwait = 1;
  501. else if (!strcmp(str, "halt")) {
  502. /*
  503. * When the boot option of idle=halt is added, halt is
  504. * forced to be used for CPU idle. In such case CPU C2/C3
  505. * won't be used again.
  506. * To continue to load the CPU idle driver, don't touch
  507. * the boot_option_idle_override.
  508. */
  509. pm_idle = default_idle;
  510. idle_halt = 1;
  511. return 0;
  512. } else if (!strcmp(str, "nomwait")) {
  513. /*
  514. * If the boot option of "idle=nomwait" is added,
  515. * it means that mwait will be disabled for CPU C2/C3
  516. * states. In such case it won't touch the variable
  517. * of boot_option_idle_override.
  518. */
  519. idle_nomwait = 1;
  520. return 0;
  521. } else
  522. return -1;
  523. boot_option_idle_override = 1;
  524. return 0;
  525. }
  526. early_param("idle", idle_setup);