workqueue.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704
  1. /*
  2. * linux/kernel/workqueue.c
  3. *
  4. * Generic mechanism for defining kernel helper threads for running
  5. * arbitrary tasks in process context.
  6. *
  7. * Started by Ingo Molnar, Copyright (C) 2002
  8. *
  9. * Derived from the taskqueue/keventd code by:
  10. *
  11. * David Woodhouse <dwmw2@infradead.org>
  12. * Andrew Morton <andrewm@uow.edu.au>
  13. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  14. * Theodore Ts'o <tytso@mit.edu>
  15. *
  16. * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/completion.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/slab.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/kthread.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/mempolicy.h>
  31. /*
  32. * The per-CPU workqueue (if single thread, we always use the first
  33. * possible cpu).
  34. *
  35. * The sequence counters are for flush_scheduled_work(). It wants to wait
  36. * until all currently-scheduled works are completed, but it doesn't
  37. * want to be livelocked by new, incoming ones. So it waits until
  38. * remove_sequence is >= the insert_sequence which pertained when
  39. * flush_scheduled_work() was called.
  40. */
  41. struct cpu_workqueue_struct {
  42. spinlock_t lock;
  43. long remove_sequence; /* Least-recently added (next to run) */
  44. long insert_sequence; /* Next to add */
  45. struct list_head worklist;
  46. wait_queue_head_t more_work;
  47. wait_queue_head_t work_done;
  48. struct workqueue_struct *wq;
  49. struct task_struct *thread;
  50. int run_depth; /* Detect run_workqueue() recursion depth */
  51. } ____cacheline_aligned;
  52. /*
  53. * The externally visible workqueue abstraction is an array of
  54. * per-CPU workqueues:
  55. */
  56. struct workqueue_struct {
  57. struct cpu_workqueue_struct *cpu_wq;
  58. const char *name;
  59. struct list_head list; /* Empty if single thread */
  60. };
  61. /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
  62. threads to each one as cpus come/go. */
  63. static DEFINE_MUTEX(workqueue_mutex);
  64. static LIST_HEAD(workqueues);
  65. static int singlethread_cpu;
  66. /* If it's single threaded, it isn't in the list of workqueues. */
  67. static inline int is_single_threaded(struct workqueue_struct *wq)
  68. {
  69. return list_empty(&wq->list);
  70. }
  71. /* Preempt must be disabled. */
  72. static void __queue_work(struct cpu_workqueue_struct *cwq,
  73. struct work_struct *work)
  74. {
  75. unsigned long flags;
  76. spin_lock_irqsave(&cwq->lock, flags);
  77. work->wq_data = cwq;
  78. list_add_tail(&work->entry, &cwq->worklist);
  79. cwq->insert_sequence++;
  80. wake_up(&cwq->more_work);
  81. spin_unlock_irqrestore(&cwq->lock, flags);
  82. }
  83. /**
  84. * queue_work - queue work on a workqueue
  85. * @wq: workqueue to use
  86. * @work: work to queue
  87. *
  88. * Returns non-zero if it was successfully added.
  89. *
  90. * We queue the work to the CPU it was submitted, but there is no
  91. * guarantee that it will be processed by that CPU.
  92. */
  93. int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
  94. {
  95. int ret = 0, cpu = get_cpu();
  96. if (!test_and_set_bit(0, &work->pending)) {
  97. if (unlikely(is_single_threaded(wq)))
  98. cpu = singlethread_cpu;
  99. BUG_ON(!list_empty(&work->entry));
  100. __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  101. ret = 1;
  102. }
  103. put_cpu();
  104. return ret;
  105. }
  106. EXPORT_SYMBOL_GPL(queue_work);
  107. static void delayed_work_timer_fn(unsigned long __data)
  108. {
  109. struct work_struct *work = (struct work_struct *)__data;
  110. struct workqueue_struct *wq = work->wq_data;
  111. int cpu = smp_processor_id();
  112. if (unlikely(is_single_threaded(wq)))
  113. cpu = singlethread_cpu;
  114. __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  115. }
  116. /**
  117. * queue_delayed_work - queue work on a workqueue after delay
  118. * @wq: workqueue to use
  119. * @work: work to queue
  120. * @delay: number of jiffies to wait before queueing
  121. *
  122. * Returns non-zero if it was successfully added.
  123. */
  124. int fastcall queue_delayed_work(struct workqueue_struct *wq,
  125. struct work_struct *work, unsigned long delay)
  126. {
  127. int ret = 0;
  128. struct timer_list *timer = &work->timer;
  129. if (!test_and_set_bit(0, &work->pending)) {
  130. BUG_ON(timer_pending(timer));
  131. BUG_ON(!list_empty(&work->entry));
  132. /* This stores wq for the moment, for the timer_fn */
  133. work->wq_data = wq;
  134. timer->expires = jiffies + delay;
  135. timer->data = (unsigned long)work;
  136. timer->function = delayed_work_timer_fn;
  137. add_timer(timer);
  138. ret = 1;
  139. }
  140. return ret;
  141. }
  142. EXPORT_SYMBOL_GPL(queue_delayed_work);
  143. /**
  144. * queue_delayed_work_on - queue work on specific CPU after delay
  145. * @cpu: CPU number to execute work on
  146. * @wq: workqueue to use
  147. * @work: work to queue
  148. * @delay: number of jiffies to wait before queueing
  149. *
  150. * Returns non-zero if it was successfully added.
  151. */
  152. int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  153. struct work_struct *work, unsigned long delay)
  154. {
  155. int ret = 0;
  156. struct timer_list *timer = &work->timer;
  157. if (!test_and_set_bit(0, &work->pending)) {
  158. BUG_ON(timer_pending(timer));
  159. BUG_ON(!list_empty(&work->entry));
  160. /* This stores wq for the moment, for the timer_fn */
  161. work->wq_data = wq;
  162. timer->expires = jiffies + delay;
  163. timer->data = (unsigned long)work;
  164. timer->function = delayed_work_timer_fn;
  165. add_timer_on(timer, cpu);
  166. ret = 1;
  167. }
  168. return ret;
  169. }
  170. EXPORT_SYMBOL_GPL(queue_delayed_work_on);
  171. static void run_workqueue(struct cpu_workqueue_struct *cwq)
  172. {
  173. unsigned long flags;
  174. /*
  175. * Keep taking off work from the queue until
  176. * done.
  177. */
  178. spin_lock_irqsave(&cwq->lock, flags);
  179. cwq->run_depth++;
  180. if (cwq->run_depth > 3) {
  181. /* morton gets to eat his hat */
  182. printk("%s: recursion depth exceeded: %d\n",
  183. __FUNCTION__, cwq->run_depth);
  184. dump_stack();
  185. }
  186. while (!list_empty(&cwq->worklist)) {
  187. struct work_struct *work = list_entry(cwq->worklist.next,
  188. struct work_struct, entry);
  189. void (*f) (void *) = work->func;
  190. void *data = work->data;
  191. list_del_init(cwq->worklist.next);
  192. spin_unlock_irqrestore(&cwq->lock, flags);
  193. BUG_ON(work->wq_data != cwq);
  194. clear_bit(0, &work->pending);
  195. f(data);
  196. spin_lock_irqsave(&cwq->lock, flags);
  197. cwq->remove_sequence++;
  198. wake_up(&cwq->work_done);
  199. }
  200. cwq->run_depth--;
  201. spin_unlock_irqrestore(&cwq->lock, flags);
  202. }
  203. static int worker_thread(void *__cwq)
  204. {
  205. struct cpu_workqueue_struct *cwq = __cwq;
  206. DECLARE_WAITQUEUE(wait, current);
  207. struct k_sigaction sa;
  208. sigset_t blocked;
  209. current->flags |= PF_NOFREEZE;
  210. set_user_nice(current, -5);
  211. /* Block and flush all signals */
  212. sigfillset(&blocked);
  213. sigprocmask(SIG_BLOCK, &blocked, NULL);
  214. flush_signals(current);
  215. /*
  216. * We inherited MPOL_INTERLEAVE from the booting kernel.
  217. * Set MPOL_DEFAULT to insure node local allocations.
  218. */
  219. numa_default_policy();
  220. /* SIG_IGN makes children autoreap: see do_notify_parent(). */
  221. sa.sa.sa_handler = SIG_IGN;
  222. sa.sa.sa_flags = 0;
  223. siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
  224. do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
  225. set_current_state(TASK_INTERRUPTIBLE);
  226. while (!kthread_should_stop()) {
  227. add_wait_queue(&cwq->more_work, &wait);
  228. if (list_empty(&cwq->worklist))
  229. schedule();
  230. else
  231. __set_current_state(TASK_RUNNING);
  232. remove_wait_queue(&cwq->more_work, &wait);
  233. if (!list_empty(&cwq->worklist))
  234. run_workqueue(cwq);
  235. set_current_state(TASK_INTERRUPTIBLE);
  236. }
  237. __set_current_state(TASK_RUNNING);
  238. return 0;
  239. }
  240. static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  241. {
  242. if (cwq->thread == current) {
  243. /*
  244. * Probably keventd trying to flush its own queue. So simply run
  245. * it by hand rather than deadlocking.
  246. */
  247. run_workqueue(cwq);
  248. } else {
  249. DEFINE_WAIT(wait);
  250. long sequence_needed;
  251. spin_lock_irq(&cwq->lock);
  252. sequence_needed = cwq->insert_sequence;
  253. while (sequence_needed - cwq->remove_sequence > 0) {
  254. prepare_to_wait(&cwq->work_done, &wait,
  255. TASK_UNINTERRUPTIBLE);
  256. spin_unlock_irq(&cwq->lock);
  257. schedule();
  258. spin_lock_irq(&cwq->lock);
  259. }
  260. finish_wait(&cwq->work_done, &wait);
  261. spin_unlock_irq(&cwq->lock);
  262. }
  263. }
  264. /**
  265. * flush_workqueue - ensure that any scheduled work has run to completion.
  266. * @wq: workqueue to flush
  267. *
  268. * Forces execution of the workqueue and blocks until its completion.
  269. * This is typically used in driver shutdown handlers.
  270. *
  271. * This function will sample each workqueue's current insert_sequence number and
  272. * will sleep until the head sequence is greater than or equal to that. This
  273. * means that we sleep until all works which were queued on entry have been
  274. * handled, but we are not livelocked by new incoming ones.
  275. *
  276. * This function used to run the workqueues itself. Now we just wait for the
  277. * helper threads to do it.
  278. */
  279. void fastcall flush_workqueue(struct workqueue_struct *wq)
  280. {
  281. might_sleep();
  282. if (is_single_threaded(wq)) {
  283. /* Always use first cpu's area. */
  284. flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
  285. } else {
  286. int cpu;
  287. mutex_lock(&workqueue_mutex);
  288. for_each_online_cpu(cpu)
  289. flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
  290. mutex_unlock(&workqueue_mutex);
  291. }
  292. }
  293. EXPORT_SYMBOL_GPL(flush_workqueue);
  294. static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
  295. int cpu)
  296. {
  297. struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  298. struct task_struct *p;
  299. spin_lock_init(&cwq->lock);
  300. cwq->wq = wq;
  301. cwq->thread = NULL;
  302. cwq->insert_sequence = 0;
  303. cwq->remove_sequence = 0;
  304. INIT_LIST_HEAD(&cwq->worklist);
  305. init_waitqueue_head(&cwq->more_work);
  306. init_waitqueue_head(&cwq->work_done);
  307. if (is_single_threaded(wq))
  308. p = kthread_create(worker_thread, cwq, "%s", wq->name);
  309. else
  310. p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
  311. if (IS_ERR(p))
  312. return NULL;
  313. cwq->thread = p;
  314. return p;
  315. }
  316. struct workqueue_struct *__create_workqueue(const char *name,
  317. int singlethread)
  318. {
  319. int cpu, destroy = 0;
  320. struct workqueue_struct *wq;
  321. struct task_struct *p;
  322. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  323. if (!wq)
  324. return NULL;
  325. wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
  326. if (!wq->cpu_wq) {
  327. kfree(wq);
  328. return NULL;
  329. }
  330. wq->name = name;
  331. mutex_lock(&workqueue_mutex);
  332. if (singlethread) {
  333. INIT_LIST_HEAD(&wq->list);
  334. p = create_workqueue_thread(wq, singlethread_cpu);
  335. if (!p)
  336. destroy = 1;
  337. else
  338. wake_up_process(p);
  339. } else {
  340. list_add(&wq->list, &workqueues);
  341. for_each_online_cpu(cpu) {
  342. p = create_workqueue_thread(wq, cpu);
  343. if (p) {
  344. kthread_bind(p, cpu);
  345. wake_up_process(p);
  346. } else
  347. destroy = 1;
  348. }
  349. }
  350. mutex_unlock(&workqueue_mutex);
  351. /*
  352. * Was there any error during startup? If yes then clean up:
  353. */
  354. if (destroy) {
  355. destroy_workqueue(wq);
  356. wq = NULL;
  357. }
  358. return wq;
  359. }
  360. EXPORT_SYMBOL_GPL(__create_workqueue);
  361. static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
  362. {
  363. struct cpu_workqueue_struct *cwq;
  364. unsigned long flags;
  365. struct task_struct *p;
  366. cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  367. spin_lock_irqsave(&cwq->lock, flags);
  368. p = cwq->thread;
  369. cwq->thread = NULL;
  370. spin_unlock_irqrestore(&cwq->lock, flags);
  371. if (p)
  372. kthread_stop(p);
  373. }
  374. /**
  375. * destroy_workqueue - safely terminate a workqueue
  376. * @wq: target workqueue
  377. *
  378. * Safely destroy a workqueue. All work currently pending will be done first.
  379. */
  380. void destroy_workqueue(struct workqueue_struct *wq)
  381. {
  382. int cpu;
  383. flush_workqueue(wq);
  384. /* We don't need the distraction of CPUs appearing and vanishing. */
  385. mutex_lock(&workqueue_mutex);
  386. if (is_single_threaded(wq))
  387. cleanup_workqueue_thread(wq, singlethread_cpu);
  388. else {
  389. for_each_online_cpu(cpu)
  390. cleanup_workqueue_thread(wq, cpu);
  391. list_del(&wq->list);
  392. }
  393. mutex_unlock(&workqueue_mutex);
  394. free_percpu(wq->cpu_wq);
  395. kfree(wq);
  396. }
  397. EXPORT_SYMBOL_GPL(destroy_workqueue);
  398. static struct workqueue_struct *keventd_wq;
  399. /**
  400. * schedule_work - put work task in global workqueue
  401. * @work: job to be done
  402. *
  403. * This puts a job in the kernel-global workqueue.
  404. */
  405. int fastcall schedule_work(struct work_struct *work)
  406. {
  407. return queue_work(keventd_wq, work);
  408. }
  409. EXPORT_SYMBOL(schedule_work);
  410. /**
  411. * schedule_delayed_work - put work task in global workqueue after delay
  412. * @work: job to be done
  413. * @delay: number of jiffies to wait
  414. *
  415. * After waiting for a given time this puts a job in the kernel-global
  416. * workqueue.
  417. */
  418. int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
  419. {
  420. return queue_delayed_work(keventd_wq, work, delay);
  421. }
  422. EXPORT_SYMBOL(schedule_delayed_work);
  423. /**
  424. * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  425. * @cpu: cpu to use
  426. * @work: job to be done
  427. * @delay: number of jiffies to wait
  428. *
  429. * After waiting for a given time this puts a job in the kernel-global
  430. * workqueue on the specified CPU.
  431. */
  432. int schedule_delayed_work_on(int cpu,
  433. struct work_struct *work, unsigned long delay)
  434. {
  435. return queue_delayed_work_on(cpu, keventd_wq, work, delay);
  436. }
  437. EXPORT_SYMBOL(schedule_delayed_work_on);
  438. /**
  439. * schedule_on_each_cpu - call a function on each online CPU from keventd
  440. * @func: the function to call
  441. * @info: a pointer to pass to func()
  442. *
  443. * Returns zero on success.
  444. * Returns -ve errno on failure.
  445. *
  446. * Appears to be racy against CPU hotplug.
  447. *
  448. * schedule_on_each_cpu() is very slow.
  449. */
  450. int schedule_on_each_cpu(void (*func)(void *info), void *info)
  451. {
  452. int cpu;
  453. struct work_struct *works;
  454. works = alloc_percpu(struct work_struct);
  455. if (!works)
  456. return -ENOMEM;
  457. mutex_lock(&workqueue_mutex);
  458. for_each_online_cpu(cpu) {
  459. INIT_WORK(per_cpu_ptr(works, cpu), func, info);
  460. __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
  461. per_cpu_ptr(works, cpu));
  462. }
  463. mutex_unlock(&workqueue_mutex);
  464. flush_workqueue(keventd_wq);
  465. free_percpu(works);
  466. return 0;
  467. }
  468. void flush_scheduled_work(void)
  469. {
  470. flush_workqueue(keventd_wq);
  471. }
  472. EXPORT_SYMBOL(flush_scheduled_work);
  473. /**
  474. * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  475. * work whose handler rearms the delayed work.
  476. * @wq: the controlling workqueue structure
  477. * @work: the delayed work struct
  478. */
  479. void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
  480. struct work_struct *work)
  481. {
  482. while (!cancel_delayed_work(work))
  483. flush_workqueue(wq);
  484. }
  485. EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
  486. /**
  487. * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  488. * work whose handler rearms the delayed work.
  489. * @work: the delayed work struct
  490. */
  491. void cancel_rearming_delayed_work(struct work_struct *work)
  492. {
  493. cancel_rearming_delayed_workqueue(keventd_wq, work);
  494. }
  495. EXPORT_SYMBOL(cancel_rearming_delayed_work);
  496. /**
  497. * execute_in_process_context - reliably execute the routine with user context
  498. * @fn: the function to execute
  499. * @data: data to pass to the function
  500. * @ew: guaranteed storage for the execute work structure (must
  501. * be available when the work executes)
  502. *
  503. * Executes the function immediately if process context is available,
  504. * otherwise schedules the function for delayed execution.
  505. *
  506. * Returns: 0 - function was executed
  507. * 1 - function was scheduled for execution
  508. */
  509. int execute_in_process_context(void (*fn)(void *data), void *data,
  510. struct execute_work *ew)
  511. {
  512. if (!in_interrupt()) {
  513. fn(data);
  514. return 0;
  515. }
  516. INIT_WORK(&ew->work, fn, data);
  517. schedule_work(&ew->work);
  518. return 1;
  519. }
  520. EXPORT_SYMBOL_GPL(execute_in_process_context);
  521. int keventd_up(void)
  522. {
  523. return keventd_wq != NULL;
  524. }
  525. int current_is_keventd(void)
  526. {
  527. struct cpu_workqueue_struct *cwq;
  528. int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
  529. int ret = 0;
  530. BUG_ON(!keventd_wq);
  531. cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
  532. if (current == cwq->thread)
  533. ret = 1;
  534. return ret;
  535. }
  536. #ifdef CONFIG_HOTPLUG_CPU
  537. /* Take the work from this (downed) CPU. */
  538. static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
  539. {
  540. struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  541. struct list_head list;
  542. struct work_struct *work;
  543. spin_lock_irq(&cwq->lock);
  544. list_replace_init(&cwq->worklist, &list);
  545. while (!list_empty(&list)) {
  546. printk("Taking work for %s\n", wq->name);
  547. work = list_entry(list.next,struct work_struct,entry);
  548. list_del(&work->entry);
  549. __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
  550. }
  551. spin_unlock_irq(&cwq->lock);
  552. }
  553. /* We're holding the cpucontrol mutex here */
  554. static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  555. unsigned long action,
  556. void *hcpu)
  557. {
  558. unsigned int hotcpu = (unsigned long)hcpu;
  559. struct workqueue_struct *wq;
  560. switch (action) {
  561. case CPU_UP_PREPARE:
  562. mutex_lock(&workqueue_mutex);
  563. /* Create a new workqueue thread for it. */
  564. list_for_each_entry(wq, &workqueues, list) {
  565. if (!create_workqueue_thread(wq, hotcpu)) {
  566. printk("workqueue for %i failed\n", hotcpu);
  567. return NOTIFY_BAD;
  568. }
  569. }
  570. break;
  571. case CPU_ONLINE:
  572. /* Kick off worker threads. */
  573. list_for_each_entry(wq, &workqueues, list) {
  574. struct cpu_workqueue_struct *cwq;
  575. cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
  576. kthread_bind(cwq->thread, hotcpu);
  577. wake_up_process(cwq->thread);
  578. }
  579. mutex_unlock(&workqueue_mutex);
  580. break;
  581. case CPU_UP_CANCELED:
  582. list_for_each_entry(wq, &workqueues, list) {
  583. if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
  584. continue;
  585. /* Unbind so it can run. */
  586. kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
  587. any_online_cpu(cpu_online_map));
  588. cleanup_workqueue_thread(wq, hotcpu);
  589. }
  590. mutex_unlock(&workqueue_mutex);
  591. break;
  592. case CPU_DOWN_PREPARE:
  593. mutex_lock(&workqueue_mutex);
  594. break;
  595. case CPU_DOWN_FAILED:
  596. mutex_unlock(&workqueue_mutex);
  597. break;
  598. case CPU_DEAD:
  599. list_for_each_entry(wq, &workqueues, list)
  600. cleanup_workqueue_thread(wq, hotcpu);
  601. list_for_each_entry(wq, &workqueues, list)
  602. take_over_work(wq, hotcpu);
  603. mutex_unlock(&workqueue_mutex);
  604. break;
  605. }
  606. return NOTIFY_OK;
  607. }
  608. #endif
  609. void init_workqueues(void)
  610. {
  611. singlethread_cpu = first_cpu(cpu_possible_map);
  612. hotcpu_notifier(workqueue_cpu_callback, 0);
  613. keventd_wq = create_workqueue("events");
  614. BUG_ON(!keventd_wq);
  615. }