processor_idle.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * processor_idle - idle state cpuidle driver.
  3. * Adapted from drivers/idle/intel_idle.c and
  4. * drivers/acpi/processor_idle.c
  5. *
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/moduleparam.h>
  11. #include <linux/cpuidle.h>
  12. #include <linux/cpu.h>
  13. #include <linux/notifier.h>
  14. #include <asm/paca.h>
  15. #include <asm/reg.h>
  16. #include <asm/machdep.h>
  17. #include <asm/firmware.h>
  18. #include <asm/runlatch.h>
  19. #include "plpar_wrappers.h"
  20. #include "pseries.h"
  21. struct cpuidle_driver pseries_idle_driver = {
  22. .name = "pseries_idle",
  23. .owner = THIS_MODULE,
  24. };
  25. #define MAX_IDLE_STATE_COUNT 2
  26. static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
  27. static struct cpuidle_device __percpu *pseries_cpuidle_devices;
  28. static struct cpuidle_state *cpuidle_state_table;
  29. static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
  30. {
  31. *kt_before = ktime_get();
  32. *in_purr = mfspr(SPRN_PURR);
  33. /*
  34. * Indicate to the HV that we are idle. Now would be
  35. * a good time to find other work to dispatch.
  36. */
  37. get_lppaca()->idle = 1;
  38. }
  39. static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
  40. {
  41. get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
  42. get_lppaca()->idle = 0;
  43. return ktime_to_us(ktime_sub(ktime_get(), kt_before));
  44. }
  45. static int snooze_loop(struct cpuidle_device *dev,
  46. struct cpuidle_driver *drv,
  47. int index)
  48. {
  49. unsigned long in_purr;
  50. ktime_t kt_before;
  51. int cpu = dev->cpu;
  52. idle_loop_prolog(&in_purr, &kt_before);
  53. local_irq_enable();
  54. set_thread_flag(TIF_POLLING_NRFLAG);
  55. while ((!need_resched()) && cpu_online(cpu)) {
  56. ppc64_runlatch_off();
  57. HMT_low();
  58. HMT_very_low();
  59. }
  60. HMT_medium();
  61. clear_thread_flag(TIF_POLLING_NRFLAG);
  62. smp_mb();
  63. dev->last_residency =
  64. (int)idle_loop_epilog(in_purr, kt_before);
  65. return index;
  66. }
  67. static void check_and_cede_processor(void)
  68. {
  69. /*
  70. * Ensure our interrupt state is properly tracked,
  71. * also checks if no interrupt has occurred while we
  72. * were soft-disabled
  73. */
  74. if (prep_irq_for_idle()) {
  75. cede_processor();
  76. #ifdef CONFIG_TRACE_IRQFLAGS
  77. /* Ensure that H_CEDE returns with IRQs on */
  78. if (WARN_ON(!(mfmsr() & MSR_EE)))
  79. __hard_irq_enable();
  80. #endif
  81. }
  82. }
  83. static int dedicated_cede_loop(struct cpuidle_device *dev,
  84. struct cpuidle_driver *drv,
  85. int index)
  86. {
  87. unsigned long in_purr;
  88. ktime_t kt_before;
  89. idle_loop_prolog(&in_purr, &kt_before);
  90. get_lppaca()->donate_dedicated_cpu = 1;
  91. ppc64_runlatch_off();
  92. HMT_medium();
  93. check_and_cede_processor();
  94. get_lppaca()->donate_dedicated_cpu = 0;
  95. dev->last_residency =
  96. (int)idle_loop_epilog(in_purr, kt_before);
  97. return index;
  98. }
  99. static int shared_cede_loop(struct cpuidle_device *dev,
  100. struct cpuidle_driver *drv,
  101. int index)
  102. {
  103. unsigned long in_purr;
  104. ktime_t kt_before;
  105. idle_loop_prolog(&in_purr, &kt_before);
  106. /*
  107. * Yield the processor to the hypervisor. We return if
  108. * an external interrupt occurs (which are driven prior
  109. * to returning here) or if a prod occurs from another
  110. * processor. When returning here, external interrupts
  111. * are enabled.
  112. */
  113. check_and_cede_processor();
  114. dev->last_residency =
  115. (int)idle_loop_epilog(in_purr, kt_before);
  116. return index;
  117. }
  118. /*
  119. * States for dedicated partition case.
  120. */
  121. static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
  122. { /* Snooze */
  123. .name = "snooze",
  124. .desc = "snooze",
  125. .flags = CPUIDLE_FLAG_TIME_VALID,
  126. .exit_latency = 0,
  127. .target_residency = 0,
  128. .enter = &snooze_loop },
  129. { /* CEDE */
  130. .name = "CEDE",
  131. .desc = "CEDE",
  132. .flags = CPUIDLE_FLAG_TIME_VALID,
  133. .exit_latency = 10,
  134. .target_residency = 100,
  135. .enter = &dedicated_cede_loop },
  136. };
  137. /*
  138. * States for shared partition case.
  139. */
  140. static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
  141. { /* Shared Cede */
  142. .name = "Shared Cede",
  143. .desc = "Shared Cede",
  144. .flags = CPUIDLE_FLAG_TIME_VALID,
  145. .exit_latency = 0,
  146. .target_residency = 0,
  147. .enter = &shared_cede_loop },
  148. };
  149. void update_smt_snooze_delay(int cpu, int residency)
  150. {
  151. struct cpuidle_driver *drv = cpuidle_get_driver();
  152. struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
  153. if (cpuidle_state_table != dedicated_states)
  154. return;
  155. if (residency < 0) {
  156. /* Disable the Nap state on that cpu */
  157. if (dev)
  158. dev->states_usage[1].disable = 1;
  159. } else
  160. if (drv)
  161. drv->states[1].target_residency = residency;
  162. }
  163. static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
  164. unsigned long action, void *hcpu)
  165. {
  166. int hotcpu = (unsigned long)hcpu;
  167. struct cpuidle_device *dev =
  168. per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
  169. if (dev && cpuidle_get_driver()) {
  170. switch (action) {
  171. case CPU_ONLINE:
  172. case CPU_ONLINE_FROZEN:
  173. cpuidle_pause_and_lock();
  174. cpuidle_enable_device(dev);
  175. cpuidle_resume_and_unlock();
  176. break;
  177. case CPU_DEAD:
  178. case CPU_DEAD_FROZEN:
  179. cpuidle_pause_and_lock();
  180. cpuidle_disable_device(dev);
  181. cpuidle_resume_and_unlock();
  182. break;
  183. default:
  184. return NOTIFY_DONE;
  185. }
  186. }
  187. return NOTIFY_OK;
  188. }
  189. static struct notifier_block setup_hotplug_notifier = {
  190. .notifier_call = pseries_cpuidle_add_cpu_notifier,
  191. };
  192. /*
  193. * pseries_cpuidle_driver_init()
  194. */
  195. static int pseries_cpuidle_driver_init(void)
  196. {
  197. int idle_state;
  198. struct cpuidle_driver *drv = &pseries_idle_driver;
  199. drv->state_count = 0;
  200. for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
  201. if (idle_state > max_idle_state)
  202. break;
  203. /* is the state not enabled? */
  204. if (cpuidle_state_table[idle_state].enter == NULL)
  205. continue;
  206. drv->states[drv->state_count] = /* structure copy */
  207. cpuidle_state_table[idle_state];
  208. drv->state_count += 1;
  209. }
  210. return 0;
  211. }
  212. /* pseries_idle_devices_uninit(void)
  213. * unregister cpuidle devices and de-allocate memory
  214. */
  215. static void pseries_idle_devices_uninit(void)
  216. {
  217. int i;
  218. struct cpuidle_device *dev;
  219. for_each_possible_cpu(i) {
  220. dev = per_cpu_ptr(pseries_cpuidle_devices, i);
  221. cpuidle_unregister_device(dev);
  222. }
  223. free_percpu(pseries_cpuidle_devices);
  224. return;
  225. }
  226. /* pseries_idle_devices_init()
  227. * allocate, initialize and register cpuidle device
  228. */
  229. static int pseries_idle_devices_init(void)
  230. {
  231. int i;
  232. struct cpuidle_driver *drv = &pseries_idle_driver;
  233. struct cpuidle_device *dev;
  234. pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
  235. if (pseries_cpuidle_devices == NULL)
  236. return -ENOMEM;
  237. for_each_possible_cpu(i) {
  238. dev = per_cpu_ptr(pseries_cpuidle_devices, i);
  239. dev->state_count = drv->state_count;
  240. dev->cpu = i;
  241. if (cpuidle_register_device(dev)) {
  242. printk(KERN_DEBUG \
  243. "cpuidle_register_device %d failed!\n", i);
  244. return -EIO;
  245. }
  246. }
  247. return 0;
  248. }
  249. /*
  250. * pseries_idle_probe()
  251. * Choose state table for shared versus dedicated partition
  252. */
  253. static int pseries_idle_probe(void)
  254. {
  255. if (!firmware_has_feature(FW_FEATURE_SPLPAR))
  256. return -ENODEV;
  257. if (cpuidle_disable != IDLE_NO_OVERRIDE)
  258. return -ENODEV;
  259. if (max_idle_state == 0) {
  260. printk(KERN_DEBUG "pseries processor idle disabled.\n");
  261. return -EPERM;
  262. }
  263. if (get_lppaca()->shared_proc)
  264. cpuidle_state_table = shared_states;
  265. else
  266. cpuidle_state_table = dedicated_states;
  267. return 0;
  268. }
  269. static int __init pseries_processor_idle_init(void)
  270. {
  271. int retval;
  272. retval = pseries_idle_probe();
  273. if (retval)
  274. return retval;
  275. pseries_cpuidle_driver_init();
  276. retval = cpuidle_register_driver(&pseries_idle_driver);
  277. if (retval) {
  278. printk(KERN_DEBUG "Registration of pseries driver failed.\n");
  279. return retval;
  280. }
  281. retval = pseries_idle_devices_init();
  282. if (retval) {
  283. pseries_idle_devices_uninit();
  284. cpuidle_unregister_driver(&pseries_idle_driver);
  285. return retval;
  286. }
  287. register_cpu_notifier(&setup_hotplug_notifier);
  288. printk(KERN_DEBUG "pseries_idle_driver registered\n");
  289. return 0;
  290. }
  291. static void __exit pseries_processor_idle_exit(void)
  292. {
  293. unregister_cpu_notifier(&setup_hotplug_notifier);
  294. pseries_idle_devices_uninit();
  295. cpuidle_unregister_driver(&pseries_idle_driver);
  296. return;
  297. }
  298. module_init(pseries_processor_idle_init);
  299. module_exit(pseries_processor_idle_exit);
  300. MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
  301. MODULE_DESCRIPTION("Cpuidle driver for POWER");
  302. MODULE_LICENSE("GPL");