workqueue.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * linux/kernel/workqueue.c
  3. *
  4. * Generic mechanism for defining kernel helper threads for running
  5. * arbitrary tasks in process context.
  6. *
  7. * Started by Ingo Molnar, Copyright (C) 2002
  8. *
  9. * Derived from the taskqueue/keventd code by:
  10. *
  11. * David Woodhouse <dwmw2@infradead.org>
  12. * Andrew Morton <andrewm@uow.edu.au>
  13. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  14. * Theodore Ts'o <tytso@mit.edu>
  15. *
  16. * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/completion.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/slab.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/kthread.h>
  29. /*
  30. * The per-CPU workqueue (if single thread, we always use cpu 0's).
  31. *
  32. * The sequence counters are for flush_scheduled_work(). It wants to wait
  33. * until until all currently-scheduled works are completed, but it doesn't
  34. * want to be livelocked by new, incoming ones. So it waits until
  35. * remove_sequence is >= the insert_sequence which pertained when
  36. * flush_scheduled_work() was called.
  37. */
  38. struct cpu_workqueue_struct {
  39. spinlock_t lock;
  40. long remove_sequence; /* Least-recently added (next to run) */
  41. long insert_sequence; /* Next to add */
  42. struct list_head worklist;
  43. wait_queue_head_t more_work;
  44. wait_queue_head_t work_done;
  45. struct workqueue_struct *wq;
  46. task_t *thread;
  47. int run_depth; /* Detect run_workqueue() recursion depth */
  48. } ____cacheline_aligned;
  49. /*
  50. * The externally visible workqueue abstraction is an array of
  51. * per-CPU workqueues:
  52. */
  53. struct workqueue_struct {
  54. struct cpu_workqueue_struct *cpu_wq;
  55. const char *name;
  56. struct list_head list; /* Empty if single thread */
  57. };
  58. /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
  59. threads to each one as cpus come/go. */
  60. static DEFINE_SPINLOCK(workqueue_lock);
  61. static LIST_HEAD(workqueues);
  62. /* If it's single threaded, it isn't in the list of workqueues. */
  63. static inline int is_single_threaded(struct workqueue_struct *wq)
  64. {
  65. return list_empty(&wq->list);
  66. }
  67. /* Preempt must be disabled. */
  68. static void __queue_work(struct cpu_workqueue_struct *cwq,
  69. struct work_struct *work)
  70. {
  71. unsigned long flags;
  72. spin_lock_irqsave(&cwq->lock, flags);
  73. work->wq_data = cwq;
  74. list_add_tail(&work->entry, &cwq->worklist);
  75. cwq->insert_sequence++;
  76. wake_up(&cwq->more_work);
  77. spin_unlock_irqrestore(&cwq->lock, flags);
  78. }
  79. /*
  80. * Queue work on a workqueue. Return non-zero if it was successfully
  81. * added.
  82. *
  83. * We queue the work to the CPU it was submitted, but there is no
  84. * guarantee that it will be processed by that CPU.
  85. */
  86. int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
  87. {
  88. int ret = 0, cpu = get_cpu();
  89. if (!test_and_set_bit(0, &work->pending)) {
  90. if (unlikely(is_single_threaded(wq)))
  91. cpu = any_online_cpu(cpu_online_map);
  92. BUG_ON(!list_empty(&work->entry));
  93. __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  94. ret = 1;
  95. }
  96. put_cpu();
  97. return ret;
  98. }
  99. static void delayed_work_timer_fn(unsigned long __data)
  100. {
  101. struct work_struct *work = (struct work_struct *)__data;
  102. struct workqueue_struct *wq = work->wq_data;
  103. int cpu = smp_processor_id();
  104. if (unlikely(is_single_threaded(wq)))
  105. cpu = any_online_cpu(cpu_online_map);
  106. __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  107. }
  108. int fastcall queue_delayed_work(struct workqueue_struct *wq,
  109. struct work_struct *work, unsigned long delay)
  110. {
  111. int ret = 0;
  112. struct timer_list *timer = &work->timer;
  113. if (!test_and_set_bit(0, &work->pending)) {
  114. BUG_ON(timer_pending(timer));
  115. BUG_ON(!list_empty(&work->entry));
  116. /* This stores wq for the moment, for the timer_fn */
  117. work->wq_data = wq;
  118. timer->expires = jiffies + delay;
  119. timer->data = (unsigned long)work;
  120. timer->function = delayed_work_timer_fn;
  121. add_timer(timer);
  122. ret = 1;
  123. }
  124. return ret;
  125. }
  126. static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
  127. {
  128. unsigned long flags;
  129. /*
  130. * Keep taking off work from the queue until
  131. * done.
  132. */
  133. spin_lock_irqsave(&cwq->lock, flags);
  134. cwq->run_depth++;
  135. if (cwq->run_depth > 3) {
  136. /* morton gets to eat his hat */
  137. printk("%s: recursion depth exceeded: %d\n",
  138. __FUNCTION__, cwq->run_depth);
  139. dump_stack();
  140. }
  141. while (!list_empty(&cwq->worklist)) {
  142. struct work_struct *work = list_entry(cwq->worklist.next,
  143. struct work_struct, entry);
  144. void (*f) (void *) = work->func;
  145. void *data = work->data;
  146. list_del_init(cwq->worklist.next);
  147. spin_unlock_irqrestore(&cwq->lock, flags);
  148. BUG_ON(work->wq_data != cwq);
  149. clear_bit(0, &work->pending);
  150. f(data);
  151. spin_lock_irqsave(&cwq->lock, flags);
  152. cwq->remove_sequence++;
  153. wake_up(&cwq->work_done);
  154. }
  155. cwq->run_depth--;
  156. spin_unlock_irqrestore(&cwq->lock, flags);
  157. }
  158. static int worker_thread(void *__cwq)
  159. {
  160. struct cpu_workqueue_struct *cwq = __cwq;
  161. DECLARE_WAITQUEUE(wait, current);
  162. struct k_sigaction sa;
  163. sigset_t blocked;
  164. current->flags |= PF_NOFREEZE;
  165. set_user_nice(current, -5);
  166. /* Block and flush all signals */
  167. sigfillset(&blocked);
  168. sigprocmask(SIG_BLOCK, &blocked, NULL);
  169. flush_signals(current);
  170. /* SIG_IGN makes children autoreap: see do_notify_parent(). */
  171. sa.sa.sa_handler = SIG_IGN;
  172. sa.sa.sa_flags = 0;
  173. siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
  174. do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
  175. set_current_state(TASK_INTERRUPTIBLE);
  176. while (!kthread_should_stop()) {
  177. add_wait_queue(&cwq->more_work, &wait);
  178. if (list_empty(&cwq->worklist))
  179. schedule();
  180. else
  181. __set_current_state(TASK_RUNNING);
  182. remove_wait_queue(&cwq->more_work, &wait);
  183. if (!list_empty(&cwq->worklist))
  184. run_workqueue(cwq);
  185. set_current_state(TASK_INTERRUPTIBLE);
  186. }
  187. __set_current_state(TASK_RUNNING);
  188. return 0;
  189. }
  190. static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  191. {
  192. if (cwq->thread == current) {
  193. /*
  194. * Probably keventd trying to flush its own queue. So simply run
  195. * it by hand rather than deadlocking.
  196. */
  197. run_workqueue(cwq);
  198. } else {
  199. DEFINE_WAIT(wait);
  200. long sequence_needed;
  201. spin_lock_irq(&cwq->lock);
  202. sequence_needed = cwq->insert_sequence;
  203. while (sequence_needed - cwq->remove_sequence > 0) {
  204. prepare_to_wait(&cwq->work_done, &wait,
  205. TASK_UNINTERRUPTIBLE);
  206. spin_unlock_irq(&cwq->lock);
  207. schedule();
  208. spin_lock_irq(&cwq->lock);
  209. }
  210. finish_wait(&cwq->work_done, &wait);
  211. spin_unlock_irq(&cwq->lock);
  212. }
  213. }
  214. /*
  215. * flush_workqueue - ensure that any scheduled work has run to completion.
  216. *
  217. * Forces execution of the workqueue and blocks until its completion.
  218. * This is typically used in driver shutdown handlers.
  219. *
  220. * This function will sample each workqueue's current insert_sequence number and
  221. * will sleep until the head sequence is greater than or equal to that. This
  222. * means that we sleep until all works which were queued on entry have been
  223. * handled, but we are not livelocked by new incoming ones.
  224. *
  225. * This function used to run the workqueues itself. Now we just wait for the
  226. * helper threads to do it.
  227. */
  228. void fastcall flush_workqueue(struct workqueue_struct *wq)
  229. {
  230. might_sleep();
  231. if (is_single_threaded(wq)) {
  232. /* Always use first cpu's area. */
  233. flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
  234. } else {
  235. int cpu;
  236. lock_cpu_hotplug();
  237. for_each_online_cpu(cpu)
  238. flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
  239. unlock_cpu_hotplug();
  240. }
  241. }
  242. static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
  243. int cpu)
  244. {
  245. struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  246. struct task_struct *p;
  247. spin_lock_init(&cwq->lock);
  248. cwq->wq = wq;
  249. cwq->thread = NULL;
  250. cwq->insert_sequence = 0;
  251. cwq->remove_sequence = 0;
  252. INIT_LIST_HEAD(&cwq->worklist);
  253. init_waitqueue_head(&cwq->more_work);
  254. init_waitqueue_head(&cwq->work_done);
  255. if (is_single_threaded(wq))
  256. p = kthread_create(worker_thread, cwq, "%s", wq->name);
  257. else
  258. p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
  259. if (IS_ERR(p))
  260. return NULL;
  261. cwq->thread = p;
  262. return p;
  263. }
  264. struct workqueue_struct *__create_workqueue(const char *name,
  265. int singlethread)
  266. {
  267. int cpu, destroy = 0;
  268. struct workqueue_struct *wq;
  269. struct task_struct *p;
  270. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  271. if (!wq)
  272. return NULL;
  273. wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
  274. if (!wq->cpu_wq) {
  275. kfree(wq);
  276. return NULL;
  277. }
  278. wq->name = name;
  279. /* We don't need the distraction of CPUs appearing and vanishing. */
  280. lock_cpu_hotplug();
  281. if (singlethread) {
  282. INIT_LIST_HEAD(&wq->list);
  283. p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
  284. if (!p)
  285. destroy = 1;
  286. else
  287. wake_up_process(p);
  288. } else {
  289. spin_lock(&workqueue_lock);
  290. list_add(&wq->list, &workqueues);
  291. spin_unlock(&workqueue_lock);
  292. for_each_online_cpu(cpu) {
  293. p = create_workqueue_thread(wq, cpu);
  294. if (p) {
  295. kthread_bind(p, cpu);
  296. wake_up_process(p);
  297. } else
  298. destroy = 1;
  299. }
  300. }
  301. unlock_cpu_hotplug();
  302. /*
  303. * Was there any error during startup? If yes then clean up:
  304. */
  305. if (destroy) {
  306. destroy_workqueue(wq);
  307. wq = NULL;
  308. }
  309. return wq;
  310. }
  311. static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
  312. {
  313. struct cpu_workqueue_struct *cwq;
  314. unsigned long flags;
  315. struct task_struct *p;
  316. cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  317. spin_lock_irqsave(&cwq->lock, flags);
  318. p = cwq->thread;
  319. cwq->thread = NULL;
  320. spin_unlock_irqrestore(&cwq->lock, flags);
  321. if (p)
  322. kthread_stop(p);
  323. }
  324. void destroy_workqueue(struct workqueue_struct *wq)
  325. {
  326. int cpu;
  327. flush_workqueue(wq);
  328. /* We don't need the distraction of CPUs appearing and vanishing. */
  329. lock_cpu_hotplug();
  330. if (is_single_threaded(wq))
  331. cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
  332. else {
  333. for_each_online_cpu(cpu)
  334. cleanup_workqueue_thread(wq, cpu);
  335. spin_lock(&workqueue_lock);
  336. list_del(&wq->list);
  337. spin_unlock(&workqueue_lock);
  338. }
  339. unlock_cpu_hotplug();
  340. free_percpu(wq->cpu_wq);
  341. kfree(wq);
  342. }
  343. static struct workqueue_struct *keventd_wq;
  344. int fastcall schedule_work(struct work_struct *work)
  345. {
  346. return queue_work(keventd_wq, work);
  347. }
  348. int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
  349. {
  350. return queue_delayed_work(keventd_wq, work, delay);
  351. }
  352. int schedule_delayed_work_on(int cpu,
  353. struct work_struct *work, unsigned long delay)
  354. {
  355. int ret = 0;
  356. struct timer_list *timer = &work->timer;
  357. if (!test_and_set_bit(0, &work->pending)) {
  358. BUG_ON(timer_pending(timer));
  359. BUG_ON(!list_empty(&work->entry));
  360. /* This stores keventd_wq for the moment, for the timer_fn */
  361. work->wq_data = keventd_wq;
  362. timer->expires = jiffies + delay;
  363. timer->data = (unsigned long)work;
  364. timer->function = delayed_work_timer_fn;
  365. add_timer_on(timer, cpu);
  366. ret = 1;
  367. }
  368. return ret;
  369. }
  370. int schedule_on_each_cpu(void (*func) (void *info), void *info)
  371. {
  372. int cpu;
  373. struct work_struct *work;
  374. work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
  375. if (!work)
  376. return -ENOMEM;
  377. for_each_online_cpu(cpu) {
  378. INIT_WORK(work + cpu, func, info);
  379. __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
  380. work + cpu);
  381. }
  382. flush_workqueue(keventd_wq);
  383. kfree(work);
  384. return 0;
  385. }
  386. void flush_scheduled_work(void)
  387. {
  388. flush_workqueue(keventd_wq);
  389. }
  390. /**
  391. * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  392. * work whose handler rearms the delayed work.
  393. * @wq: the controlling workqueue structure
  394. * @work: the delayed work struct
  395. */
  396. void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
  397. struct work_struct *work)
  398. {
  399. while (!cancel_delayed_work(work))
  400. flush_workqueue(wq);
  401. }
  402. EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
  403. /**
  404. * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  405. * work whose handler rearms the delayed work.
  406. * @work: the delayed work struct
  407. */
  408. void cancel_rearming_delayed_work(struct work_struct *work)
  409. {
  410. cancel_rearming_delayed_workqueue(keventd_wq, work);
  411. }
  412. EXPORT_SYMBOL(cancel_rearming_delayed_work);
  413. int keventd_up(void)
  414. {
  415. return keventd_wq != NULL;
  416. }
  417. int current_is_keventd(void)
  418. {
  419. struct cpu_workqueue_struct *cwq;
  420. int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
  421. int ret = 0;
  422. BUG_ON(!keventd_wq);
  423. cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
  424. if (current == cwq->thread)
  425. ret = 1;
  426. return ret;
  427. }
  428. #ifdef CONFIG_HOTPLUG_CPU
  429. /* Take the work from this (downed) CPU. */
  430. static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
  431. {
  432. struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  433. LIST_HEAD(list);
  434. struct work_struct *work;
  435. spin_lock_irq(&cwq->lock);
  436. list_splice_init(&cwq->worklist, &list);
  437. while (!list_empty(&list)) {
  438. printk("Taking work for %s\n", wq->name);
  439. work = list_entry(list.next,struct work_struct,entry);
  440. list_del(&work->entry);
  441. __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
  442. }
  443. spin_unlock_irq(&cwq->lock);
  444. }
  445. /* We're holding the cpucontrol mutex here */
  446. static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  447. unsigned long action,
  448. void *hcpu)
  449. {
  450. unsigned int hotcpu = (unsigned long)hcpu;
  451. struct workqueue_struct *wq;
  452. switch (action) {
  453. case CPU_UP_PREPARE:
  454. /* Create a new workqueue thread for it. */
  455. list_for_each_entry(wq, &workqueues, list) {
  456. if (!create_workqueue_thread(wq, hotcpu)) {
  457. printk("workqueue for %i failed\n", hotcpu);
  458. return NOTIFY_BAD;
  459. }
  460. }
  461. break;
  462. case CPU_ONLINE:
  463. /* Kick off worker threads. */
  464. list_for_each_entry(wq, &workqueues, list) {
  465. struct cpu_workqueue_struct *cwq;
  466. cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
  467. kthread_bind(cwq->thread, hotcpu);
  468. wake_up_process(cwq->thread);
  469. }
  470. break;
  471. case CPU_UP_CANCELED:
  472. list_for_each_entry(wq, &workqueues, list) {
  473. /* Unbind so it can run. */
  474. kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
  475. any_online_cpu(cpu_online_map));
  476. cleanup_workqueue_thread(wq, hotcpu);
  477. }
  478. break;
  479. case CPU_DEAD:
  480. list_for_each_entry(wq, &workqueues, list)
  481. cleanup_workqueue_thread(wq, hotcpu);
  482. list_for_each_entry(wq, &workqueues, list)
  483. take_over_work(wq, hotcpu);
  484. break;
  485. }
  486. return NOTIFY_OK;
  487. }
  488. #endif
  489. void init_workqueues(void)
  490. {
  491. hotcpu_notifier(workqueue_cpu_callback, 0);
  492. keventd_wq = create_workqueue("events");
  493. BUG_ON(!keventd_wq);
  494. }
  495. EXPORT_SYMBOL_GPL(__create_workqueue);
  496. EXPORT_SYMBOL_GPL(queue_work);
  497. EXPORT_SYMBOL_GPL(queue_delayed_work);
  498. EXPORT_SYMBOL_GPL(flush_workqueue);
  499. EXPORT_SYMBOL_GPL(destroy_workqueue);
  500. EXPORT_SYMBOL(schedule_work);
  501. EXPORT_SYMBOL(schedule_delayed_work);
  502. EXPORT_SYMBOL(schedule_delayed_work_on);
  503. EXPORT_SYMBOL(flush_scheduled_work);