smpboot.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Common SMP CPU bringup/teardown functions
  3. */
  4. #include <linux/cpu.h>
  5. #include <linux/err.h>
  6. #include <linux/smp.h>
  7. #include <linux/init.h>
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <linux/sched.h>
  11. #include <linux/export.h>
  12. #include <linux/percpu.h>
  13. #include <linux/kthread.h>
  14. #include <linux/smpboot.h>
  15. #include "smpboot.h"
  16. #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
  17. /*
  18. * For the hotplug case we keep the task structs around and reuse
  19. * them.
  20. */
  21. static DEFINE_PER_CPU(struct task_struct *, idle_threads);
  22. struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
  23. {
  24. struct task_struct *tsk = per_cpu(idle_threads, cpu);
  25. if (!tsk)
  26. return ERR_PTR(-ENOMEM);
  27. init_idle(tsk, cpu);
  28. return tsk;
  29. }
  30. void __init idle_thread_set_boot_cpu(void)
  31. {
  32. per_cpu(idle_threads, smp_processor_id()) = current;
  33. }
  34. /**
  35. * idle_init - Initialize the idle thread for a cpu
  36. * @cpu: The cpu for which the idle thread should be initialized
  37. *
  38. * Creates the thread if it does not exist.
  39. */
  40. static inline void idle_init(unsigned int cpu)
  41. {
  42. struct task_struct *tsk = per_cpu(idle_threads, cpu);
  43. if (!tsk) {
  44. tsk = fork_idle(cpu);
  45. if (IS_ERR(tsk))
  46. pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
  47. else
  48. per_cpu(idle_threads, cpu) = tsk;
  49. }
  50. }
  51. /**
  52. * idle_threads_init - Initialize idle threads for all cpus
  53. */
  54. void __init idle_threads_init(void)
  55. {
  56. unsigned int cpu, boot_cpu;
  57. boot_cpu = smp_processor_id();
  58. for_each_possible_cpu(cpu) {
  59. if (cpu != boot_cpu)
  60. idle_init(cpu);
  61. }
  62. }
  63. #endif
  64. static LIST_HEAD(hotplug_threads);
  65. static DEFINE_MUTEX(smpboot_threads_lock);
  66. struct smpboot_thread_data {
  67. unsigned int cpu;
  68. unsigned int status;
  69. struct smp_hotplug_thread *ht;
  70. };
  71. enum {
  72. HP_THREAD_NONE = 0,
  73. HP_THREAD_ACTIVE,
  74. HP_THREAD_PARKED,
  75. };
  76. /**
  77. * smpboot_thread_fn - percpu hotplug thread loop function
  78. * @data: thread data pointer
  79. *
  80. * Checks for thread stop and park conditions. Calls the necessary
  81. * setup, cleanup, park and unpark functions for the registered
  82. * thread.
  83. *
  84. * Returns 1 when the thread should exit, 0 otherwise.
  85. */
  86. static int smpboot_thread_fn(void *data)
  87. {
  88. struct smpboot_thread_data *td = data;
  89. struct smp_hotplug_thread *ht = td->ht;
  90. while (1) {
  91. set_current_state(TASK_INTERRUPTIBLE);
  92. preempt_disable();
  93. if (kthread_should_stop()) {
  94. set_current_state(TASK_RUNNING);
  95. preempt_enable();
  96. if (ht->cleanup)
  97. ht->cleanup(td->cpu, cpu_online(td->cpu));
  98. kfree(td);
  99. return 0;
  100. }
  101. if (kthread_should_park()) {
  102. __set_current_state(TASK_RUNNING);
  103. preempt_enable();
  104. if (ht->park && td->status == HP_THREAD_ACTIVE) {
  105. BUG_ON(td->cpu != smp_processor_id());
  106. ht->park(td->cpu);
  107. td->status = HP_THREAD_PARKED;
  108. }
  109. kthread_parkme();
  110. /* We might have been woken for stop */
  111. continue;
  112. }
  113. BUG_ON(td->cpu != smp_processor_id());
  114. /* Check for state change setup */
  115. switch (td->status) {
  116. case HP_THREAD_NONE:
  117. preempt_enable();
  118. if (ht->setup)
  119. ht->setup(td->cpu);
  120. td->status = HP_THREAD_ACTIVE;
  121. preempt_disable();
  122. break;
  123. case HP_THREAD_PARKED:
  124. preempt_enable();
  125. if (ht->unpark)
  126. ht->unpark(td->cpu);
  127. td->status = HP_THREAD_ACTIVE;
  128. preempt_disable();
  129. break;
  130. }
  131. if (!ht->thread_should_run(td->cpu)) {
  132. preempt_enable();
  133. schedule();
  134. } else {
  135. set_current_state(TASK_RUNNING);
  136. preempt_enable();
  137. ht->thread_fn(td->cpu);
  138. }
  139. }
  140. }
  141. static int
  142. __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
  143. {
  144. struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
  145. struct smpboot_thread_data *td;
  146. if (tsk)
  147. return 0;
  148. td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
  149. if (!td)
  150. return -ENOMEM;
  151. td->cpu = cpu;
  152. td->ht = ht;
  153. tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
  154. ht->thread_comm);
  155. if (IS_ERR(tsk)) {
  156. kfree(td);
  157. return PTR_ERR(tsk);
  158. }
  159. get_task_struct(tsk);
  160. *per_cpu_ptr(ht->store, cpu) = tsk;
  161. return 0;
  162. }
  163. int smpboot_create_threads(unsigned int cpu)
  164. {
  165. struct smp_hotplug_thread *cur;
  166. int ret = 0;
  167. mutex_lock(&smpboot_threads_lock);
  168. list_for_each_entry(cur, &hotplug_threads, list) {
  169. ret = __smpboot_create_thread(cur, cpu);
  170. if (ret)
  171. break;
  172. }
  173. mutex_unlock(&smpboot_threads_lock);
  174. return ret;
  175. }
  176. static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
  177. {
  178. struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
  179. kthread_unpark(tsk);
  180. }
  181. void smpboot_unpark_threads(unsigned int cpu)
  182. {
  183. struct smp_hotplug_thread *cur;
  184. mutex_lock(&smpboot_threads_lock);
  185. list_for_each_entry(cur, &hotplug_threads, list)
  186. smpboot_unpark_thread(cur, cpu);
  187. mutex_unlock(&smpboot_threads_lock);
  188. }
  189. static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
  190. {
  191. struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
  192. if (tsk)
  193. kthread_park(tsk);
  194. }
  195. void smpboot_park_threads(unsigned int cpu)
  196. {
  197. struct smp_hotplug_thread *cur;
  198. mutex_lock(&smpboot_threads_lock);
  199. list_for_each_entry_reverse(cur, &hotplug_threads, list)
  200. smpboot_park_thread(cur, cpu);
  201. mutex_unlock(&smpboot_threads_lock);
  202. }
  203. static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
  204. {
  205. unsigned int cpu;
  206. /* We need to destroy also the parked threads of offline cpus */
  207. for_each_possible_cpu(cpu) {
  208. struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
  209. if (tsk) {
  210. kthread_stop(tsk);
  211. put_task_struct(tsk);
  212. *per_cpu_ptr(ht->store, cpu) = NULL;
  213. }
  214. }
  215. }
  216. /**
  217. * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
  218. * @plug_thread: Hotplug thread descriptor
  219. *
  220. * Creates and starts the threads on all online cpus.
  221. */
  222. int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
  223. {
  224. unsigned int cpu;
  225. int ret = 0;
  226. mutex_lock(&smpboot_threads_lock);
  227. for_each_online_cpu(cpu) {
  228. ret = __smpboot_create_thread(plug_thread, cpu);
  229. if (ret) {
  230. smpboot_destroy_threads(plug_thread);
  231. goto out;
  232. }
  233. smpboot_unpark_thread(plug_thread, cpu);
  234. }
  235. list_add(&plug_thread->list, &hotplug_threads);
  236. out:
  237. mutex_unlock(&smpboot_threads_lock);
  238. return ret;
  239. }
  240. EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
  241. /**
  242. * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
  243. * @plug_thread: Hotplug thread descriptor
  244. *
  245. * Stops all threads on all possible cpus.
  246. */
  247. void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
  248. {
  249. get_online_cpus();
  250. mutex_lock(&smpboot_threads_lock);
  251. list_del(&plug_thread->list);
  252. smpboot_destroy_threads(plug_thread);
  253. mutex_unlock(&smpboot_threads_lock);
  254. put_online_cpus();
  255. }
  256. EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);