workqueue.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /*
  2. * linux/kernel/workqueue.c
  3. *
  4. * Generic mechanism for defining kernel helper threads for running
  5. * arbitrary tasks in process context.
  6. *
  7. * Started by Ingo Molnar, Copyright (C) 2002
  8. *
  9. * Derived from the taskqueue/keventd code by:
  10. *
  11. * David Woodhouse <dwmw2@infradead.org>
  12. * Andrew Morton <andrewm@uow.edu.au>
  13. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  14. * Theodore Ts'o <tytso@mit.edu>
  15. *
  16. * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/completion.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/slab.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/kthread.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/freezer.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/debug_locks.h>
  34. #include <linux/lockdep.h>
  35. /*
  36. * The per-CPU workqueue (if single thread, we always use the first
  37. * possible cpu).
  38. */
  39. struct cpu_workqueue_struct {
  40. spinlock_t lock;
  41. struct list_head worklist;
  42. wait_queue_head_t more_work;
  43. struct work_struct *current_work;
  44. struct workqueue_struct *wq;
  45. struct task_struct *thread;
  46. int run_depth; /* Detect run_workqueue() recursion depth */
  47. } ____cacheline_aligned;
  48. /*
  49. * The externally visible workqueue abstraction is an array of
  50. * per-CPU workqueues:
  51. */
  52. struct workqueue_struct {
  53. struct cpu_workqueue_struct *cpu_wq;
  54. struct list_head list;
  55. const char *name;
  56. int singlethread;
  57. int freezeable; /* Freeze threads during suspend */
  58. #ifdef CONFIG_LOCKDEP
  59. struct lockdep_map lockdep_map;
  60. #endif
  61. };
  62. /* Serializes the accesses to the list of workqueues. */
  63. static DEFINE_SPINLOCK(workqueue_lock);
  64. static LIST_HEAD(workqueues);
  65. static int singlethread_cpu __read_mostly;
  66. static cpumask_t cpu_singlethread_map __read_mostly;
  67. /*
  68. * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
  69. * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
  70. * which comes in between can't use for_each_online_cpu(). We could
  71. * use cpu_possible_map, the cpumask below is more a documentation
  72. * than optimization.
  73. */
  74. static cpumask_t cpu_populated_map __read_mostly;
  75. /* If it's single threaded, it isn't in the list of workqueues. */
  76. static inline int is_single_threaded(struct workqueue_struct *wq)
  77. {
  78. return wq->singlethread;
  79. }
  80. static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
  81. {
  82. return is_single_threaded(wq)
  83. ? &cpu_singlethread_map : &cpu_populated_map;
  84. }
  85. static
  86. struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
  87. {
  88. if (unlikely(is_single_threaded(wq)))
  89. cpu = singlethread_cpu;
  90. return per_cpu_ptr(wq->cpu_wq, cpu);
  91. }
  92. /*
  93. * Set the workqueue on which a work item is to be run
  94. * - Must *only* be called if the pending flag is set
  95. */
  96. static inline void set_wq_data(struct work_struct *work,
  97. struct cpu_workqueue_struct *cwq)
  98. {
  99. unsigned long new;
  100. BUG_ON(!work_pending(work));
  101. new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
  102. new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
  103. atomic_long_set(&work->data, new);
  104. }
  105. static inline
  106. struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
  107. {
  108. return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
  109. }
  110. static void insert_work(struct cpu_workqueue_struct *cwq,
  111. struct work_struct *work, int tail)
  112. {
  113. set_wq_data(work, cwq);
  114. /*
  115. * Ensure that we get the right work->data if we see the
  116. * result of list_add() below, see try_to_grab_pending().
  117. */
  118. smp_wmb();
  119. if (tail)
  120. list_add_tail(&work->entry, &cwq->worklist);
  121. else
  122. list_add(&work->entry, &cwq->worklist);
  123. wake_up(&cwq->more_work);
  124. }
  125. /* Preempt must be disabled. */
  126. static void __queue_work(struct cpu_workqueue_struct *cwq,
  127. struct work_struct *work)
  128. {
  129. unsigned long flags;
  130. spin_lock_irqsave(&cwq->lock, flags);
  131. insert_work(cwq, work, 1);
  132. spin_unlock_irqrestore(&cwq->lock, flags);
  133. }
  134. /**
  135. * queue_work - queue work on a workqueue
  136. * @wq: workqueue to use
  137. * @work: work to queue
  138. *
  139. * Returns 0 if @work was already on a queue, non-zero otherwise.
  140. *
  141. * We queue the work to the CPU on which it was submitted, but if the CPU dies
  142. * it can be processed by another CPU.
  143. */
  144. int queue_work(struct workqueue_struct *wq, struct work_struct *work)
  145. {
  146. int ret = 0;
  147. if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
  148. BUG_ON(!list_empty(&work->entry));
  149. __queue_work(wq_per_cpu(wq, get_cpu()), work);
  150. put_cpu();
  151. ret = 1;
  152. }
  153. return ret;
  154. }
  155. EXPORT_SYMBOL_GPL(queue_work);
  156. static void delayed_work_timer_fn(unsigned long __data)
  157. {
  158. struct delayed_work *dwork = (struct delayed_work *)__data;
  159. struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
  160. struct workqueue_struct *wq = cwq->wq;
  161. __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
  162. }
  163. /**
  164. * queue_delayed_work - queue work on a workqueue after delay
  165. * @wq: workqueue to use
  166. * @dwork: delayable work to queue
  167. * @delay: number of jiffies to wait before queueing
  168. *
  169. * Returns 0 if @work was already on a queue, non-zero otherwise.
  170. */
  171. int queue_delayed_work(struct workqueue_struct *wq,
  172. struct delayed_work *dwork, unsigned long delay)
  173. {
  174. timer_stats_timer_set_start_info(&dwork->timer);
  175. if (delay == 0)
  176. return queue_work(wq, &dwork->work);
  177. return queue_delayed_work_on(-1, wq, dwork, delay);
  178. }
  179. EXPORT_SYMBOL_GPL(queue_delayed_work);
  180. /**
  181. * queue_delayed_work_on - queue work on specific CPU after delay
  182. * @cpu: CPU number to execute work on
  183. * @wq: workqueue to use
  184. * @dwork: work to queue
  185. * @delay: number of jiffies to wait before queueing
  186. *
  187. * Returns 0 if @work was already on a queue, non-zero otherwise.
  188. */
  189. int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  190. struct delayed_work *dwork, unsigned long delay)
  191. {
  192. int ret = 0;
  193. struct timer_list *timer = &dwork->timer;
  194. struct work_struct *work = &dwork->work;
  195. timer_stats_timer_set_start_info(&dwork->timer);
  196. if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
  197. BUG_ON(timer_pending(timer));
  198. BUG_ON(!list_empty(&work->entry));
  199. /* This stores cwq for the moment, for the timer_fn */
  200. set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
  201. timer->expires = jiffies + delay;
  202. timer->data = (unsigned long)dwork;
  203. timer->function = delayed_work_timer_fn;
  204. if (unlikely(cpu >= 0))
  205. add_timer_on(timer, cpu);
  206. else
  207. add_timer(timer);
  208. ret = 1;
  209. }
  210. return ret;
  211. }
  212. EXPORT_SYMBOL_GPL(queue_delayed_work_on);
  213. static void run_workqueue(struct cpu_workqueue_struct *cwq)
  214. {
  215. spin_lock_irq(&cwq->lock);
  216. cwq->run_depth++;
  217. if (cwq->run_depth > 3) {
  218. /* morton gets to eat his hat */
  219. printk("%s: recursion depth exceeded: %d\n",
  220. __func__, cwq->run_depth);
  221. dump_stack();
  222. }
  223. while (!list_empty(&cwq->worklist)) {
  224. struct work_struct *work = list_entry(cwq->worklist.next,
  225. struct work_struct, entry);
  226. work_func_t f = work->func;
  227. #ifdef CONFIG_LOCKDEP
  228. /*
  229. * It is permissible to free the struct work_struct
  230. * from inside the function that is called from it,
  231. * this we need to take into account for lockdep too.
  232. * To avoid bogus "held lock freed" warnings as well
  233. * as problems when looking into work->lockdep_map,
  234. * make a copy and use that here.
  235. */
  236. struct lockdep_map lockdep_map = work->lockdep_map;
  237. #endif
  238. cwq->current_work = work;
  239. list_del_init(cwq->worklist.next);
  240. spin_unlock_irq(&cwq->lock);
  241. BUG_ON(get_wq_data(work) != cwq);
  242. work_clear_pending(work);
  243. lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
  244. lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
  245. f(work);
  246. lock_release(&lockdep_map, 1, _THIS_IP_);
  247. lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
  248. if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
  249. printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
  250. "%s/0x%08x/%d\n",
  251. current->comm, preempt_count(),
  252. task_pid_nr(current));
  253. printk(KERN_ERR " last function: ");
  254. print_symbol("%s\n", (unsigned long)f);
  255. debug_show_held_locks(current);
  256. dump_stack();
  257. }
  258. spin_lock_irq(&cwq->lock);
  259. cwq->current_work = NULL;
  260. }
  261. cwq->run_depth--;
  262. spin_unlock_irq(&cwq->lock);
  263. }
  264. static int worker_thread(void *__cwq)
  265. {
  266. struct cpu_workqueue_struct *cwq = __cwq;
  267. DEFINE_WAIT(wait);
  268. if (cwq->wq->freezeable)
  269. set_freezable();
  270. set_user_nice(current, -5);
  271. for (;;) {
  272. prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
  273. if (!freezing(current) &&
  274. !kthread_should_stop() &&
  275. list_empty(&cwq->worklist))
  276. schedule();
  277. finish_wait(&cwq->more_work, &wait);
  278. try_to_freeze();
  279. if (kthread_should_stop())
  280. break;
  281. run_workqueue(cwq);
  282. }
  283. return 0;
  284. }
  285. struct wq_barrier {
  286. struct work_struct work;
  287. struct completion done;
  288. };
  289. static void wq_barrier_func(struct work_struct *work)
  290. {
  291. struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
  292. complete(&barr->done);
  293. }
  294. static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  295. struct wq_barrier *barr, int tail)
  296. {
  297. INIT_WORK(&barr->work, wq_barrier_func);
  298. __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
  299. init_completion(&barr->done);
  300. insert_work(cwq, &barr->work, tail);
  301. }
  302. static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  303. {
  304. int active;
  305. if (cwq->thread == current) {
  306. /*
  307. * Probably keventd trying to flush its own queue. So simply run
  308. * it by hand rather than deadlocking.
  309. */
  310. run_workqueue(cwq);
  311. active = 1;
  312. } else {
  313. struct wq_barrier barr;
  314. active = 0;
  315. spin_lock_irq(&cwq->lock);
  316. if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
  317. insert_wq_barrier(cwq, &barr, 1);
  318. active = 1;
  319. }
  320. spin_unlock_irq(&cwq->lock);
  321. if (active)
  322. wait_for_completion(&barr.done);
  323. }
  324. return active;
  325. }
  326. /**
  327. * flush_workqueue - ensure that any scheduled work has run to completion.
  328. * @wq: workqueue to flush
  329. *
  330. * Forces execution of the workqueue and blocks until its completion.
  331. * This is typically used in driver shutdown handlers.
  332. *
  333. * We sleep until all works which were queued on entry have been handled,
  334. * but we are not livelocked by new incoming ones.
  335. *
  336. * This function used to run the workqueues itself. Now we just wait for the
  337. * helper threads to do it.
  338. */
  339. void flush_workqueue(struct workqueue_struct *wq)
  340. {
  341. const cpumask_t *cpu_map = wq_cpu_map(wq);
  342. int cpu;
  343. might_sleep();
  344. lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
  345. lock_release(&wq->lockdep_map, 1, _THIS_IP_);
  346. for_each_cpu_mask(cpu, *cpu_map)
  347. flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
  348. }
  349. EXPORT_SYMBOL_GPL(flush_workqueue);
  350. /*
  351. * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
  352. * so this work can't be re-armed in any way.
  353. */
  354. static int try_to_grab_pending(struct work_struct *work)
  355. {
  356. struct cpu_workqueue_struct *cwq;
  357. int ret = -1;
  358. if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
  359. return 0;
  360. /*
  361. * The queueing is in progress, or it is already queued. Try to
  362. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
  363. */
  364. cwq = get_wq_data(work);
  365. if (!cwq)
  366. return ret;
  367. spin_lock_irq(&cwq->lock);
  368. if (!list_empty(&work->entry)) {
  369. /*
  370. * This work is queued, but perhaps we locked the wrong cwq.
  371. * In that case we must see the new value after rmb(), see
  372. * insert_work()->wmb().
  373. */
  374. smp_rmb();
  375. if (cwq == get_wq_data(work)) {
  376. list_del_init(&work->entry);
  377. ret = 1;
  378. }
  379. }
  380. spin_unlock_irq(&cwq->lock);
  381. return ret;
  382. }
  383. static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
  384. struct work_struct *work)
  385. {
  386. struct wq_barrier barr;
  387. int running = 0;
  388. spin_lock_irq(&cwq->lock);
  389. if (unlikely(cwq->current_work == work)) {
  390. insert_wq_barrier(cwq, &barr, 0);
  391. running = 1;
  392. }
  393. spin_unlock_irq(&cwq->lock);
  394. if (unlikely(running))
  395. wait_for_completion(&barr.done);
  396. }
  397. static void wait_on_work(struct work_struct *work)
  398. {
  399. struct cpu_workqueue_struct *cwq;
  400. struct workqueue_struct *wq;
  401. const cpumask_t *cpu_map;
  402. int cpu;
  403. might_sleep();
  404. lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
  405. lock_release(&work->lockdep_map, 1, _THIS_IP_);
  406. cwq = get_wq_data(work);
  407. if (!cwq)
  408. return;
  409. wq = cwq->wq;
  410. cpu_map = wq_cpu_map(wq);
  411. for_each_cpu_mask(cpu, *cpu_map)
  412. wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  413. }
  414. static int __cancel_work_timer(struct work_struct *work,
  415. struct timer_list* timer)
  416. {
  417. int ret;
  418. do {
  419. ret = (timer && likely(del_timer(timer)));
  420. if (!ret)
  421. ret = try_to_grab_pending(work);
  422. wait_on_work(work);
  423. } while (unlikely(ret < 0));
  424. work_clear_pending(work);
  425. return ret;
  426. }
  427. /**
  428. * cancel_work_sync - block until a work_struct's callback has terminated
  429. * @work: the work which is to be flushed
  430. *
  431. * Returns true if @work was pending.
  432. *
  433. * cancel_work_sync() will cancel the work if it is queued. If the work's
  434. * callback appears to be running, cancel_work_sync() will block until it
  435. * has completed.
  436. *
  437. * It is possible to use this function if the work re-queues itself. It can
  438. * cancel the work even if it migrates to another workqueue, however in that
  439. * case it only guarantees that work->func() has completed on the last queued
  440. * workqueue.
  441. *
  442. * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
  443. * pending, otherwise it goes into a busy-wait loop until the timer expires.
  444. *
  445. * The caller must ensure that workqueue_struct on which this work was last
  446. * queued can't be destroyed before this function returns.
  447. */
  448. int cancel_work_sync(struct work_struct *work)
  449. {
  450. return __cancel_work_timer(work, NULL);
  451. }
  452. EXPORT_SYMBOL_GPL(cancel_work_sync);
  453. /**
  454. * cancel_delayed_work_sync - reliably kill off a delayed work.
  455. * @dwork: the delayed work struct
  456. *
  457. * Returns true if @dwork was pending.
  458. *
  459. * It is possible to use this function if @dwork rearms itself via queue_work()
  460. * or queue_delayed_work(). See also the comment for cancel_work_sync().
  461. */
  462. int cancel_delayed_work_sync(struct delayed_work *dwork)
  463. {
  464. return __cancel_work_timer(&dwork->work, &dwork->timer);
  465. }
  466. EXPORT_SYMBOL(cancel_delayed_work_sync);
  467. static struct workqueue_struct *keventd_wq __read_mostly;
  468. /**
  469. * schedule_work - put work task in global workqueue
  470. * @work: job to be done
  471. *
  472. * This puts a job in the kernel-global workqueue.
  473. */
  474. int schedule_work(struct work_struct *work)
  475. {
  476. return queue_work(keventd_wq, work);
  477. }
  478. EXPORT_SYMBOL(schedule_work);
  479. /**
  480. * schedule_delayed_work - put work task in global workqueue after delay
  481. * @dwork: job to be done
  482. * @delay: number of jiffies to wait or 0 for immediate execution
  483. *
  484. * After waiting for a given time this puts a job in the kernel-global
  485. * workqueue.
  486. */
  487. int schedule_delayed_work(struct delayed_work *dwork,
  488. unsigned long delay)
  489. {
  490. timer_stats_timer_set_start_info(&dwork->timer);
  491. return queue_delayed_work(keventd_wq, dwork, delay);
  492. }
  493. EXPORT_SYMBOL(schedule_delayed_work);
  494. /**
  495. * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  496. * @cpu: cpu to use
  497. * @dwork: job to be done
  498. * @delay: number of jiffies to wait
  499. *
  500. * After waiting for a given time this puts a job in the kernel-global
  501. * workqueue on the specified CPU.
  502. */
  503. int schedule_delayed_work_on(int cpu,
  504. struct delayed_work *dwork, unsigned long delay)
  505. {
  506. timer_stats_timer_set_start_info(&dwork->timer);
  507. return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
  508. }
  509. EXPORT_SYMBOL(schedule_delayed_work_on);
  510. /**
  511. * schedule_on_each_cpu - call a function on each online CPU from keventd
  512. * @func: the function to call
  513. *
  514. * Returns zero on success.
  515. * Returns -ve errno on failure.
  516. *
  517. * schedule_on_each_cpu() is very slow.
  518. */
  519. int schedule_on_each_cpu(work_func_t func)
  520. {
  521. int cpu;
  522. struct work_struct *works;
  523. works = alloc_percpu(struct work_struct);
  524. if (!works)
  525. return -ENOMEM;
  526. get_online_cpus();
  527. for_each_online_cpu(cpu) {
  528. struct work_struct *work = per_cpu_ptr(works, cpu);
  529. INIT_WORK(work, func);
  530. set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
  531. __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
  532. }
  533. flush_workqueue(keventd_wq);
  534. put_online_cpus();
  535. free_percpu(works);
  536. return 0;
  537. }
  538. void flush_scheduled_work(void)
  539. {
  540. flush_workqueue(keventd_wq);
  541. }
  542. EXPORT_SYMBOL(flush_scheduled_work);
  543. /**
  544. * execute_in_process_context - reliably execute the routine with user context
  545. * @fn: the function to execute
  546. * @ew: guaranteed storage for the execute work structure (must
  547. * be available when the work executes)
  548. *
  549. * Executes the function immediately if process context is available,
  550. * otherwise schedules the function for delayed execution.
  551. *
  552. * Returns: 0 - function was executed
  553. * 1 - function was scheduled for execution
  554. */
  555. int execute_in_process_context(work_func_t fn, struct execute_work *ew)
  556. {
  557. if (!in_interrupt()) {
  558. fn(&ew->work);
  559. return 0;
  560. }
  561. INIT_WORK(&ew->work, fn);
  562. schedule_work(&ew->work);
  563. return 1;
  564. }
  565. EXPORT_SYMBOL_GPL(execute_in_process_context);
  566. int keventd_up(void)
  567. {
  568. return keventd_wq != NULL;
  569. }
  570. int current_is_keventd(void)
  571. {
  572. struct cpu_workqueue_struct *cwq;
  573. int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
  574. int ret = 0;
  575. BUG_ON(!keventd_wq);
  576. cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
  577. if (current == cwq->thread)
  578. ret = 1;
  579. return ret;
  580. }
  581. static struct cpu_workqueue_struct *
  582. init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
  583. {
  584. struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  585. cwq->wq = wq;
  586. spin_lock_init(&cwq->lock);
  587. INIT_LIST_HEAD(&cwq->worklist);
  588. init_waitqueue_head(&cwq->more_work);
  589. return cwq;
  590. }
  591. static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
  592. {
  593. struct workqueue_struct *wq = cwq->wq;
  594. const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
  595. struct task_struct *p;
  596. p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
  597. /*
  598. * Nobody can add the work_struct to this cwq,
  599. * if (caller is __create_workqueue)
  600. * nobody should see this wq
  601. * else // caller is CPU_UP_PREPARE
  602. * cpu is not on cpu_online_map
  603. * so we can abort safely.
  604. */
  605. if (IS_ERR(p))
  606. return PTR_ERR(p);
  607. cwq->thread = p;
  608. return 0;
  609. }
  610. static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
  611. {
  612. struct task_struct *p = cwq->thread;
  613. if (p != NULL) {
  614. if (cpu >= 0)
  615. kthread_bind(p, cpu);
  616. wake_up_process(p);
  617. }
  618. }
  619. struct workqueue_struct *__create_workqueue_key(const char *name,
  620. int singlethread,
  621. int freezeable,
  622. struct lock_class_key *key,
  623. const char *lock_name)
  624. {
  625. struct workqueue_struct *wq;
  626. struct cpu_workqueue_struct *cwq;
  627. int err = 0, cpu;
  628. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  629. if (!wq)
  630. return NULL;
  631. wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
  632. if (!wq->cpu_wq) {
  633. kfree(wq);
  634. return NULL;
  635. }
  636. wq->name = name;
  637. lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
  638. wq->singlethread = singlethread;
  639. wq->freezeable = freezeable;
  640. INIT_LIST_HEAD(&wq->list);
  641. if (singlethread) {
  642. cwq = init_cpu_workqueue(wq, singlethread_cpu);
  643. err = create_workqueue_thread(cwq, singlethread_cpu);
  644. start_workqueue_thread(cwq, -1);
  645. } else {
  646. get_online_cpus();
  647. spin_lock(&workqueue_lock);
  648. list_add(&wq->list, &workqueues);
  649. spin_unlock(&workqueue_lock);
  650. for_each_possible_cpu(cpu) {
  651. cwq = init_cpu_workqueue(wq, cpu);
  652. if (err || !cpu_online(cpu))
  653. continue;
  654. err = create_workqueue_thread(cwq, cpu);
  655. start_workqueue_thread(cwq, cpu);
  656. }
  657. put_online_cpus();
  658. }
  659. if (err) {
  660. destroy_workqueue(wq);
  661. wq = NULL;
  662. }
  663. return wq;
  664. }
  665. EXPORT_SYMBOL_GPL(__create_workqueue_key);
  666. static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
  667. {
  668. /*
  669. * Our caller is either destroy_workqueue() or CPU_DEAD,
  670. * get_online_cpus() protects cwq->thread.
  671. */
  672. if (cwq->thread == NULL)
  673. return;
  674. lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
  675. lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
  676. flush_cpu_workqueue(cwq);
  677. /*
  678. * If the caller is CPU_DEAD and cwq->worklist was not empty,
  679. * a concurrent flush_workqueue() can insert a barrier after us.
  680. * However, in that case run_workqueue() won't return and check
  681. * kthread_should_stop() until it flushes all work_struct's.
  682. * When ->worklist becomes empty it is safe to exit because no
  683. * more work_structs can be queued on this cwq: flush_workqueue
  684. * checks list_empty(), and a "normal" queue_work() can't use
  685. * a dead CPU.
  686. */
  687. kthread_stop(cwq->thread);
  688. cwq->thread = NULL;
  689. }
  690. /**
  691. * destroy_workqueue - safely terminate a workqueue
  692. * @wq: target workqueue
  693. *
  694. * Safely destroy a workqueue. All work currently pending will be done first.
  695. */
  696. void destroy_workqueue(struct workqueue_struct *wq)
  697. {
  698. const cpumask_t *cpu_map = wq_cpu_map(wq);
  699. int cpu;
  700. get_online_cpus();
  701. spin_lock(&workqueue_lock);
  702. list_del(&wq->list);
  703. spin_unlock(&workqueue_lock);
  704. for_each_cpu_mask(cpu, *cpu_map)
  705. cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
  706. put_online_cpus();
  707. free_percpu(wq->cpu_wq);
  708. kfree(wq);
  709. }
  710. EXPORT_SYMBOL_GPL(destroy_workqueue);
  711. static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  712. unsigned long action,
  713. void *hcpu)
  714. {
  715. unsigned int cpu = (unsigned long)hcpu;
  716. struct cpu_workqueue_struct *cwq;
  717. struct workqueue_struct *wq;
  718. action &= ~CPU_TASKS_FROZEN;
  719. switch (action) {
  720. case CPU_UP_PREPARE:
  721. cpu_set(cpu, cpu_populated_map);
  722. }
  723. list_for_each_entry(wq, &workqueues, list) {
  724. cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  725. switch (action) {
  726. case CPU_UP_PREPARE:
  727. if (!create_workqueue_thread(cwq, cpu))
  728. break;
  729. printk(KERN_ERR "workqueue [%s] for %i failed\n",
  730. wq->name, cpu);
  731. return NOTIFY_BAD;
  732. case CPU_ONLINE:
  733. start_workqueue_thread(cwq, cpu);
  734. break;
  735. case CPU_UP_CANCELED:
  736. start_workqueue_thread(cwq, -1);
  737. case CPU_DEAD:
  738. cleanup_workqueue_thread(cwq);
  739. break;
  740. }
  741. }
  742. switch (action) {
  743. case CPU_UP_CANCELED:
  744. case CPU_DEAD:
  745. cpu_clear(cpu, cpu_populated_map);
  746. }
  747. return NOTIFY_OK;
  748. }
  749. void __init init_workqueues(void)
  750. {
  751. cpu_populated_map = cpu_online_map;
  752. singlethread_cpu = first_cpu(cpu_possible_map);
  753. cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
  754. hotcpu_notifier(workqueue_cpu_callback, 0);
  755. keventd_wq = create_workqueue("events");
  756. BUG_ON(!keventd_wq);
  757. }