process.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. #include <linux/errno.h>
  2. #include <linux/kernel.h>
  3. #include <linux/mm.h>
  4. #include <asm/idle.h>
  5. #include <linux/smp.h>
  6. #include <linux/slab.h>
  7. #include <linux/sched.h>
  8. #include <linux/module.h>
  9. #include <linux/pm.h>
  10. #include <linux/clockchips.h>
  11. #include <linux/ftrace.h>
  12. #include <asm/system.h>
  13. #include <asm/apic.h>
  14. unsigned long idle_halt;
  15. EXPORT_SYMBOL(idle_halt);
  16. unsigned long idle_nomwait;
  17. EXPORT_SYMBOL(idle_nomwait);
  18. struct kmem_cache *task_xstate_cachep;
  19. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  20. {
  21. *dst = *src;
  22. if (src->thread.xstate) {
  23. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  24. GFP_KERNEL);
  25. if (!dst->thread.xstate)
  26. return -ENOMEM;
  27. WARN_ON((unsigned long)dst->thread.xstate & 15);
  28. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  29. }
  30. return 0;
  31. }
  32. void free_thread_xstate(struct task_struct *tsk)
  33. {
  34. if (tsk->thread.xstate) {
  35. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  36. tsk->thread.xstate = NULL;
  37. }
  38. }
  39. void free_thread_info(struct thread_info *ti)
  40. {
  41. free_thread_xstate(ti->task);
  42. free_pages((unsigned long)ti, get_order(THREAD_SIZE));
  43. }
  44. void arch_task_cache_init(void)
  45. {
  46. task_xstate_cachep =
  47. kmem_cache_create("task_xstate", xstate_size,
  48. __alignof__(union thread_xstate),
  49. SLAB_PANIC, NULL);
  50. }
  51. /*
  52. * Idle related variables and functions
  53. */
  54. unsigned long boot_option_idle_override = 0;
  55. EXPORT_SYMBOL(boot_option_idle_override);
  56. /*
  57. * Powermanagement idle function, if any..
  58. */
  59. void (*pm_idle)(void);
  60. EXPORT_SYMBOL(pm_idle);
  61. #ifdef CONFIG_X86_32
  62. /*
  63. * This halt magic was a workaround for ancient floppy DMA
  64. * wreckage. It should be safe to remove.
  65. */
  66. static int hlt_counter;
  67. void disable_hlt(void)
  68. {
  69. hlt_counter++;
  70. }
  71. EXPORT_SYMBOL(disable_hlt);
  72. void enable_hlt(void)
  73. {
  74. hlt_counter--;
  75. }
  76. EXPORT_SYMBOL(enable_hlt);
  77. static inline int hlt_use_halt(void)
  78. {
  79. return (!hlt_counter && boot_cpu_data.hlt_works_ok);
  80. }
  81. #else
  82. static inline int hlt_use_halt(void)
  83. {
  84. return 1;
  85. }
  86. #endif
  87. /*
  88. * We use this if we don't have any better
  89. * idle routine..
  90. */
  91. void default_idle(void)
  92. {
  93. if (hlt_use_halt()) {
  94. struct power_trace it;
  95. trace_power_start(&it, POWER_CSTATE, 1);
  96. current_thread_info()->status &= ~TS_POLLING;
  97. /*
  98. * TS_POLLING-cleared state must be visible before we
  99. * test NEED_RESCHED:
  100. */
  101. smp_mb();
  102. if (!need_resched())
  103. safe_halt(); /* enables interrupts racelessly */
  104. else
  105. local_irq_enable();
  106. current_thread_info()->status |= TS_POLLING;
  107. trace_power_end(&it);
  108. } else {
  109. local_irq_enable();
  110. /* loop is done by the caller */
  111. cpu_relax();
  112. }
  113. }
  114. #ifdef CONFIG_APM_MODULE
  115. EXPORT_SYMBOL(default_idle);
  116. #endif
  117. void stop_this_cpu(void *dummy)
  118. {
  119. local_irq_disable();
  120. /*
  121. * Remove this CPU:
  122. */
  123. cpu_clear(smp_processor_id(), cpu_online_map);
  124. disable_local_APIC();
  125. for (;;) {
  126. if (hlt_works(smp_processor_id()))
  127. halt();
  128. }
  129. }
  130. static void do_nothing(void *unused)
  131. {
  132. }
  133. /*
  134. * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
  135. * pm_idle and update to new pm_idle value. Required while changing pm_idle
  136. * handler on SMP systems.
  137. *
  138. * Caller must have changed pm_idle to the new value before the call. Old
  139. * pm_idle value will not be used by any CPU after the return of this function.
  140. */
  141. void cpu_idle_wait(void)
  142. {
  143. smp_mb();
  144. /* kick all the CPUs so that they exit out of pm_idle */
  145. smp_call_function(do_nothing, NULL, 1);
  146. }
  147. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  148. /*
  149. * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  150. * which can obviate IPI to trigger checking of need_resched.
  151. * We execute MONITOR against need_resched and enter optimized wait state
  152. * through MWAIT. Whenever someone changes need_resched, we would be woken
  153. * up from MWAIT (without an IPI).
  154. *
  155. * New with Core Duo processors, MWAIT can take some hints based on CPU
  156. * capability.
  157. */
  158. void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
  159. {
  160. struct power_trace it;
  161. trace_power_start(&it, POWER_CSTATE, (ax>>4)+1);
  162. if (!need_resched()) {
  163. __monitor((void *)&current_thread_info()->flags, 0, 0);
  164. smp_mb();
  165. if (!need_resched())
  166. __mwait(ax, cx);
  167. }
  168. trace_power_end(&it);
  169. }
  170. /* Default MONITOR/MWAIT with no hints, used for default C1 state */
  171. static void mwait_idle(void)
  172. {
  173. struct power_trace it;
  174. if (!need_resched()) {
  175. trace_power_start(&it, POWER_CSTATE, 1);
  176. __monitor((void *)&current_thread_info()->flags, 0, 0);
  177. smp_mb();
  178. if (!need_resched())
  179. __sti_mwait(0, 0);
  180. else
  181. local_irq_enable();
  182. trace_power_end(&it);
  183. } else
  184. local_irq_enable();
  185. }
  186. /*
  187. * On SMP it's slightly faster (but much more power-consuming!)
  188. * to poll the ->work.need_resched flag instead of waiting for the
  189. * cross-CPU IPI to arrive. Use this option with caution.
  190. */
  191. static void poll_idle(void)
  192. {
  193. struct power_trace it;
  194. trace_power_start(&it, POWER_CSTATE, 0);
  195. local_irq_enable();
  196. while (!need_resched())
  197. cpu_relax();
  198. trace_power_end(&it);
  199. }
  200. /*
  201. * mwait selection logic:
  202. *
  203. * It depends on the CPU. For AMD CPUs that support MWAIT this is
  204. * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
  205. * then depend on a clock divisor and current Pstate of the core. If
  206. * all cores of a processor are in halt state (C1) the processor can
  207. * enter the C1E (C1 enhanced) state. If mwait is used this will never
  208. * happen.
  209. *
  210. * idle=mwait overrides this decision and forces the usage of mwait.
  211. */
  212. static int __cpuinitdata force_mwait;
  213. #define MWAIT_INFO 0x05
  214. #define MWAIT_ECX_EXTENDED_INFO 0x01
  215. #define MWAIT_EDX_C1 0xf0
  216. static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
  217. {
  218. u32 eax, ebx, ecx, edx;
  219. if (force_mwait)
  220. return 1;
  221. if (c->cpuid_level < MWAIT_INFO)
  222. return 0;
  223. cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
  224. /* Check, whether EDX has extended info about MWAIT */
  225. if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
  226. return 1;
  227. /*
  228. * edx enumeratios MONITOR/MWAIT extensions. Check, whether
  229. * C1 supports MWAIT
  230. */
  231. return (edx & MWAIT_EDX_C1);
  232. }
  233. /*
  234. * Check for AMD CPUs, which have potentially C1E support
  235. */
  236. static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
  237. {
  238. if (c->x86_vendor != X86_VENDOR_AMD)
  239. return 0;
  240. if (c->x86 < 0x0F)
  241. return 0;
  242. /* Family 0x0f models < rev F do not have C1E */
  243. if (c->x86 == 0x0f && c->x86_model < 0x40)
  244. return 0;
  245. return 1;
  246. }
  247. static cpumask_t c1e_mask = CPU_MASK_NONE;
  248. static int c1e_detected;
  249. void c1e_remove_cpu(int cpu)
  250. {
  251. cpu_clear(cpu, c1e_mask);
  252. }
  253. /*
  254. * C1E aware idle routine. We check for C1E active in the interrupt
  255. * pending message MSR. If we detect C1E, then we handle it the same
  256. * way as C3 power states (local apic timer and TSC stop)
  257. */
  258. static void c1e_idle(void)
  259. {
  260. if (need_resched())
  261. return;
  262. if (!c1e_detected) {
  263. u32 lo, hi;
  264. rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
  265. if (lo & K8_INTP_C1E_ACTIVE_MASK) {
  266. c1e_detected = 1;
  267. if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  268. mark_tsc_unstable("TSC halt in AMD C1E");
  269. printk(KERN_INFO "System has AMD C1E enabled\n");
  270. set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
  271. }
  272. }
  273. if (c1e_detected) {
  274. int cpu = smp_processor_id();
  275. if (!cpu_isset(cpu, c1e_mask)) {
  276. cpu_set(cpu, c1e_mask);
  277. /*
  278. * Force broadcast so ACPI can not interfere. Needs
  279. * to run with interrupts enabled as it uses
  280. * smp_function_call.
  281. */
  282. local_irq_enable();
  283. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
  284. &cpu);
  285. printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
  286. cpu);
  287. local_irq_disable();
  288. }
  289. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
  290. default_idle();
  291. /*
  292. * The switch back from broadcast mode needs to be
  293. * called with interrupts disabled.
  294. */
  295. local_irq_disable();
  296. clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
  297. local_irq_enable();
  298. } else
  299. default_idle();
  300. }
  301. void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
  302. {
  303. #ifdef CONFIG_X86_SMP
  304. if (pm_idle == poll_idle && smp_num_siblings > 1) {
  305. printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
  306. " performance may degrade.\n");
  307. }
  308. #endif
  309. if (pm_idle)
  310. return;
  311. if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
  312. /*
  313. * One CPU supports mwait => All CPUs supports mwait
  314. */
  315. printk(KERN_INFO "using mwait in idle threads.\n");
  316. pm_idle = mwait_idle;
  317. } else if (check_c1e_idle(c)) {
  318. printk(KERN_INFO "using C1E aware idle routine\n");
  319. pm_idle = c1e_idle;
  320. } else
  321. pm_idle = default_idle;
  322. }
  323. static int __init idle_setup(char *str)
  324. {
  325. if (!str)
  326. return -EINVAL;
  327. if (!strcmp(str, "poll")) {
  328. printk("using polling idle threads.\n");
  329. pm_idle = poll_idle;
  330. } else if (!strcmp(str, "mwait"))
  331. force_mwait = 1;
  332. else if (!strcmp(str, "halt")) {
  333. /*
  334. * When the boot option of idle=halt is added, halt is
  335. * forced to be used for CPU idle. In such case CPU C2/C3
  336. * won't be used again.
  337. * To continue to load the CPU idle driver, don't touch
  338. * the boot_option_idle_override.
  339. */
  340. pm_idle = default_idle;
  341. idle_halt = 1;
  342. return 0;
  343. } else if (!strcmp(str, "nomwait")) {
  344. /*
  345. * If the boot option of "idle=nomwait" is added,
  346. * it means that mwait will be disabled for CPU C2/C3
  347. * states. In such case it won't touch the variable
  348. * of boot_option_idle_override.
  349. */
  350. idle_nomwait = 1;
  351. return 0;
  352. } else
  353. return -1;
  354. boot_option_idle_override = 1;
  355. return 0;
  356. }
  357. early_param("idle", idle_setup);