workqueue.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915
  1. /*
  2. * linux/kernel/workqueue.c
  3. *
  4. * Generic mechanism for defining kernel helper threads for running
  5. * arbitrary tasks in process context.
  6. *
  7. * Started by Ingo Molnar, Copyright (C) 2002
  8. *
  9. * Derived from the taskqueue/keventd code by:
  10. *
  11. * David Woodhouse <dwmw2@infradead.org>
  12. * Andrew Morton
  13. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  14. * Theodore Ts'o <tytso@mit.edu>
  15. *
  16. * Made to use alloc_percpu by Christoph Lameter.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/completion.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/slab.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/kthread.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/freezer.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/debug_locks.h>
  34. #include <linux/lockdep.h>
  35. #include <linux/idr.h>
  36. /*
  37. * Structure fields follow one of the following exclusion rules.
  38. *
  39. * I: Set during initialization and read-only afterwards.
  40. *
  41. * L: gcwq->lock protected. Access with gcwq->lock held.
  42. *
  43. * F: wq->flush_mutex protected.
  44. *
  45. * W: workqueue_lock protected.
  46. */
  47. struct global_cwq;
  48. struct cpu_workqueue_struct;
  49. struct worker {
  50. struct work_struct *current_work; /* L: work being processed */
  51. struct list_head scheduled; /* L: scheduled works */
  52. struct task_struct *task; /* I: worker task */
  53. struct global_cwq *gcwq; /* I: the associated gcwq */
  54. struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
  55. int id; /* I: worker id */
  56. };
  57. /*
  58. * Global per-cpu workqueue.
  59. */
  60. struct global_cwq {
  61. spinlock_t lock; /* the gcwq lock */
  62. unsigned int cpu; /* I: the associated cpu */
  63. struct ida worker_ida; /* L: for worker IDs */
  64. } ____cacheline_aligned_in_smp;
  65. /*
  66. * The per-CPU workqueue (if single thread, we always use the first
  67. * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
  68. * work_struct->data are used for flags and thus cwqs need to be
  69. * aligned at two's power of the number of flag bits.
  70. */
  71. struct cpu_workqueue_struct {
  72. struct global_cwq *gcwq; /* I: the associated gcwq */
  73. struct list_head worklist;
  74. wait_queue_head_t more_work;
  75. struct worker *worker;
  76. struct workqueue_struct *wq; /* I: the owning workqueue */
  77. int work_color; /* L: current color */
  78. int flush_color; /* L: flushing color */
  79. int nr_in_flight[WORK_NR_COLORS];
  80. /* L: nr of in_flight works */
  81. int nr_active; /* L: nr of active works */
  82. int max_active; /* L: max active works */
  83. struct list_head delayed_works; /* L: delayed works */
  84. };
  85. /*
  86. * Structure used to wait for workqueue flush.
  87. */
  88. struct wq_flusher {
  89. struct list_head list; /* F: list of flushers */
  90. int flush_color; /* F: flush color waiting for */
  91. struct completion done; /* flush completion */
  92. };
  93. /*
  94. * The externally visible workqueue abstraction is an array of
  95. * per-CPU workqueues:
  96. */
  97. struct workqueue_struct {
  98. unsigned int flags; /* I: WQ_* flags */
  99. struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
  100. struct list_head list; /* W: list of all workqueues */
  101. struct mutex flush_mutex; /* protects wq flushing */
  102. int work_color; /* F: current work color */
  103. int flush_color; /* F: current flush color */
  104. atomic_t nr_cwqs_to_flush; /* flush in progress */
  105. struct wq_flusher *first_flusher; /* F: first flusher */
  106. struct list_head flusher_queue; /* F: flush waiters */
  107. struct list_head flusher_overflow; /* F: flush overflow list */
  108. int saved_max_active; /* I: saved cwq max_active */
  109. const char *name; /* I: workqueue name */
  110. #ifdef CONFIG_LOCKDEP
  111. struct lockdep_map lockdep_map;
  112. #endif
  113. };
  114. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  115. static struct debug_obj_descr work_debug_descr;
  116. /*
  117. * fixup_init is called when:
  118. * - an active object is initialized
  119. */
  120. static int work_fixup_init(void *addr, enum debug_obj_state state)
  121. {
  122. struct work_struct *work = addr;
  123. switch (state) {
  124. case ODEBUG_STATE_ACTIVE:
  125. cancel_work_sync(work);
  126. debug_object_init(work, &work_debug_descr);
  127. return 1;
  128. default:
  129. return 0;
  130. }
  131. }
  132. /*
  133. * fixup_activate is called when:
  134. * - an active object is activated
  135. * - an unknown object is activated (might be a statically initialized object)
  136. */
  137. static int work_fixup_activate(void *addr, enum debug_obj_state state)
  138. {
  139. struct work_struct *work = addr;
  140. switch (state) {
  141. case ODEBUG_STATE_NOTAVAILABLE:
  142. /*
  143. * This is not really a fixup. The work struct was
  144. * statically initialized. We just make sure that it
  145. * is tracked in the object tracker.
  146. */
  147. if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
  148. debug_object_init(work, &work_debug_descr);
  149. debug_object_activate(work, &work_debug_descr);
  150. return 0;
  151. }
  152. WARN_ON_ONCE(1);
  153. return 0;
  154. case ODEBUG_STATE_ACTIVE:
  155. WARN_ON(1);
  156. default:
  157. return 0;
  158. }
  159. }
  160. /*
  161. * fixup_free is called when:
  162. * - an active object is freed
  163. */
  164. static int work_fixup_free(void *addr, enum debug_obj_state state)
  165. {
  166. struct work_struct *work = addr;
  167. switch (state) {
  168. case ODEBUG_STATE_ACTIVE:
  169. cancel_work_sync(work);
  170. debug_object_free(work, &work_debug_descr);
  171. return 1;
  172. default:
  173. return 0;
  174. }
  175. }
  176. static struct debug_obj_descr work_debug_descr = {
  177. .name = "work_struct",
  178. .fixup_init = work_fixup_init,
  179. .fixup_activate = work_fixup_activate,
  180. .fixup_free = work_fixup_free,
  181. };
  182. static inline void debug_work_activate(struct work_struct *work)
  183. {
  184. debug_object_activate(work, &work_debug_descr);
  185. }
  186. static inline void debug_work_deactivate(struct work_struct *work)
  187. {
  188. debug_object_deactivate(work, &work_debug_descr);
  189. }
  190. void __init_work(struct work_struct *work, int onstack)
  191. {
  192. if (onstack)
  193. debug_object_init_on_stack(work, &work_debug_descr);
  194. else
  195. debug_object_init(work, &work_debug_descr);
  196. }
  197. EXPORT_SYMBOL_GPL(__init_work);
  198. void destroy_work_on_stack(struct work_struct *work)
  199. {
  200. debug_object_free(work, &work_debug_descr);
  201. }
  202. EXPORT_SYMBOL_GPL(destroy_work_on_stack);
  203. #else
  204. static inline void debug_work_activate(struct work_struct *work) { }
  205. static inline void debug_work_deactivate(struct work_struct *work) { }
  206. #endif
  207. /* Serializes the accesses to the list of workqueues. */
  208. static DEFINE_SPINLOCK(workqueue_lock);
  209. static LIST_HEAD(workqueues);
  210. static bool workqueue_freezing; /* W: have wqs started freezing? */
  211. static DEFINE_PER_CPU(struct global_cwq, global_cwq);
  212. static int worker_thread(void *__worker);
  213. static int singlethread_cpu __read_mostly;
  214. static struct global_cwq *get_gcwq(unsigned int cpu)
  215. {
  216. return &per_cpu(global_cwq, cpu);
  217. }
  218. static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
  219. struct workqueue_struct *wq)
  220. {
  221. return per_cpu_ptr(wq->cpu_wq, cpu);
  222. }
  223. static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
  224. struct workqueue_struct *wq)
  225. {
  226. if (unlikely(wq->flags & WQ_SINGLE_THREAD))
  227. cpu = singlethread_cpu;
  228. return get_cwq(cpu, wq);
  229. }
  230. static unsigned int work_color_to_flags(int color)
  231. {
  232. return color << WORK_STRUCT_COLOR_SHIFT;
  233. }
  234. static int get_work_color(struct work_struct *work)
  235. {
  236. return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
  237. ((1 << WORK_STRUCT_COLOR_BITS) - 1);
  238. }
  239. static int work_next_color(int color)
  240. {
  241. return (color + 1) % WORK_NR_COLORS;
  242. }
  243. /*
  244. * Set the workqueue on which a work item is to be run
  245. * - Must *only* be called if the pending flag is set
  246. */
  247. static inline void set_wq_data(struct work_struct *work,
  248. struct cpu_workqueue_struct *cwq,
  249. unsigned long extra_flags)
  250. {
  251. BUG_ON(!work_pending(work));
  252. atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
  253. WORK_STRUCT_PENDING | extra_flags);
  254. }
  255. /*
  256. * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
  257. */
  258. static inline void clear_wq_data(struct work_struct *work)
  259. {
  260. atomic_long_set(&work->data, work_static(work));
  261. }
  262. static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
  263. {
  264. return (void *)(atomic_long_read(&work->data) &
  265. WORK_STRUCT_WQ_DATA_MASK);
  266. }
  267. /**
  268. * insert_work - insert a work into cwq
  269. * @cwq: cwq @work belongs to
  270. * @work: work to insert
  271. * @head: insertion point
  272. * @extra_flags: extra WORK_STRUCT_* flags to set
  273. *
  274. * Insert @work into @cwq after @head.
  275. *
  276. * CONTEXT:
  277. * spin_lock_irq(gcwq->lock).
  278. */
  279. static void insert_work(struct cpu_workqueue_struct *cwq,
  280. struct work_struct *work, struct list_head *head,
  281. unsigned int extra_flags)
  282. {
  283. /* we own @work, set data and link */
  284. set_wq_data(work, cwq, extra_flags);
  285. /*
  286. * Ensure that we get the right work->data if we see the
  287. * result of list_add() below, see try_to_grab_pending().
  288. */
  289. smp_wmb();
  290. list_add_tail(&work->entry, head);
  291. wake_up(&cwq->more_work);
  292. }
  293. static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
  294. struct work_struct *work)
  295. {
  296. struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
  297. struct global_cwq *gcwq = cwq->gcwq;
  298. struct list_head *worklist;
  299. unsigned long flags;
  300. debug_work_activate(work);
  301. spin_lock_irqsave(&gcwq->lock, flags);
  302. BUG_ON(!list_empty(&work->entry));
  303. cwq->nr_in_flight[cwq->work_color]++;
  304. if (likely(cwq->nr_active < cwq->max_active)) {
  305. cwq->nr_active++;
  306. worklist = &cwq->worklist;
  307. } else
  308. worklist = &cwq->delayed_works;
  309. insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
  310. spin_unlock_irqrestore(&gcwq->lock, flags);
  311. }
  312. /**
  313. * queue_work - queue work on a workqueue
  314. * @wq: workqueue to use
  315. * @work: work to queue
  316. *
  317. * Returns 0 if @work was already on a queue, non-zero otherwise.
  318. *
  319. * We queue the work to the CPU on which it was submitted, but if the CPU dies
  320. * it can be processed by another CPU.
  321. */
  322. int queue_work(struct workqueue_struct *wq, struct work_struct *work)
  323. {
  324. int ret;
  325. ret = queue_work_on(get_cpu(), wq, work);
  326. put_cpu();
  327. return ret;
  328. }
  329. EXPORT_SYMBOL_GPL(queue_work);
  330. /**
  331. * queue_work_on - queue work on specific cpu
  332. * @cpu: CPU number to execute work on
  333. * @wq: workqueue to use
  334. * @work: work to queue
  335. *
  336. * Returns 0 if @work was already on a queue, non-zero otherwise.
  337. *
  338. * We queue the work to a specific CPU, the caller must ensure it
  339. * can't go away.
  340. */
  341. int
  342. queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
  343. {
  344. int ret = 0;
  345. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  346. __queue_work(cpu, wq, work);
  347. ret = 1;
  348. }
  349. return ret;
  350. }
  351. EXPORT_SYMBOL_GPL(queue_work_on);
  352. static void delayed_work_timer_fn(unsigned long __data)
  353. {
  354. struct delayed_work *dwork = (struct delayed_work *)__data;
  355. struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
  356. __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
  357. }
  358. /**
  359. * queue_delayed_work - queue work on a workqueue after delay
  360. * @wq: workqueue to use
  361. * @dwork: delayable work to queue
  362. * @delay: number of jiffies to wait before queueing
  363. *
  364. * Returns 0 if @work was already on a queue, non-zero otherwise.
  365. */
  366. int queue_delayed_work(struct workqueue_struct *wq,
  367. struct delayed_work *dwork, unsigned long delay)
  368. {
  369. if (delay == 0)
  370. return queue_work(wq, &dwork->work);
  371. return queue_delayed_work_on(-1, wq, dwork, delay);
  372. }
  373. EXPORT_SYMBOL_GPL(queue_delayed_work);
  374. /**
  375. * queue_delayed_work_on - queue work on specific CPU after delay
  376. * @cpu: CPU number to execute work on
  377. * @wq: workqueue to use
  378. * @dwork: work to queue
  379. * @delay: number of jiffies to wait before queueing
  380. *
  381. * Returns 0 if @work was already on a queue, non-zero otherwise.
  382. */
  383. int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  384. struct delayed_work *dwork, unsigned long delay)
  385. {
  386. int ret = 0;
  387. struct timer_list *timer = &dwork->timer;
  388. struct work_struct *work = &dwork->work;
  389. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  390. BUG_ON(timer_pending(timer));
  391. BUG_ON(!list_empty(&work->entry));
  392. timer_stats_timer_set_start_info(&dwork->timer);
  393. /* This stores cwq for the moment, for the timer_fn */
  394. set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
  395. timer->expires = jiffies + delay;
  396. timer->data = (unsigned long)dwork;
  397. timer->function = delayed_work_timer_fn;
  398. if (unlikely(cpu >= 0))
  399. add_timer_on(timer, cpu);
  400. else
  401. add_timer(timer);
  402. ret = 1;
  403. }
  404. return ret;
  405. }
  406. EXPORT_SYMBOL_GPL(queue_delayed_work_on);
  407. static struct worker *alloc_worker(void)
  408. {
  409. struct worker *worker;
  410. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  411. if (worker)
  412. INIT_LIST_HEAD(&worker->scheduled);
  413. return worker;
  414. }
  415. /**
  416. * create_worker - create a new workqueue worker
  417. * @cwq: cwq the new worker will belong to
  418. * @bind: whether to set affinity to @cpu or not
  419. *
  420. * Create a new worker which is bound to @cwq. The returned worker
  421. * can be started by calling start_worker() or destroyed using
  422. * destroy_worker().
  423. *
  424. * CONTEXT:
  425. * Might sleep. Does GFP_KERNEL allocations.
  426. *
  427. * RETURNS:
  428. * Pointer to the newly created worker.
  429. */
  430. static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
  431. {
  432. struct global_cwq *gcwq = cwq->gcwq;
  433. int id = -1;
  434. struct worker *worker = NULL;
  435. spin_lock_irq(&gcwq->lock);
  436. while (ida_get_new(&gcwq->worker_ida, &id)) {
  437. spin_unlock_irq(&gcwq->lock);
  438. if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
  439. goto fail;
  440. spin_lock_irq(&gcwq->lock);
  441. }
  442. spin_unlock_irq(&gcwq->lock);
  443. worker = alloc_worker();
  444. if (!worker)
  445. goto fail;
  446. worker->gcwq = gcwq;
  447. worker->cwq = cwq;
  448. worker->id = id;
  449. worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
  450. gcwq->cpu, id);
  451. if (IS_ERR(worker->task))
  452. goto fail;
  453. if (bind)
  454. kthread_bind(worker->task, gcwq->cpu);
  455. return worker;
  456. fail:
  457. if (id >= 0) {
  458. spin_lock_irq(&gcwq->lock);
  459. ida_remove(&gcwq->worker_ida, id);
  460. spin_unlock_irq(&gcwq->lock);
  461. }
  462. kfree(worker);
  463. return NULL;
  464. }
  465. /**
  466. * start_worker - start a newly created worker
  467. * @worker: worker to start
  468. *
  469. * Start @worker.
  470. *
  471. * CONTEXT:
  472. * spin_lock_irq(gcwq->lock).
  473. */
  474. static void start_worker(struct worker *worker)
  475. {
  476. wake_up_process(worker->task);
  477. }
  478. /**
  479. * destroy_worker - destroy a workqueue worker
  480. * @worker: worker to be destroyed
  481. *
  482. * Destroy @worker.
  483. */
  484. static void destroy_worker(struct worker *worker)
  485. {
  486. struct global_cwq *gcwq = worker->gcwq;
  487. int id = worker->id;
  488. /* sanity check frenzy */
  489. BUG_ON(worker->current_work);
  490. BUG_ON(!list_empty(&worker->scheduled));
  491. kthread_stop(worker->task);
  492. kfree(worker);
  493. spin_lock_irq(&gcwq->lock);
  494. ida_remove(&gcwq->worker_ida, id);
  495. spin_unlock_irq(&gcwq->lock);
  496. }
  497. /**
  498. * move_linked_works - move linked works to a list
  499. * @work: start of series of works to be scheduled
  500. * @head: target list to append @work to
  501. * @nextp: out paramter for nested worklist walking
  502. *
  503. * Schedule linked works starting from @work to @head. Work series to
  504. * be scheduled starts at @work and includes any consecutive work with
  505. * WORK_STRUCT_LINKED set in its predecessor.
  506. *
  507. * If @nextp is not NULL, it's updated to point to the next work of
  508. * the last scheduled work. This allows move_linked_works() to be
  509. * nested inside outer list_for_each_entry_safe().
  510. *
  511. * CONTEXT:
  512. * spin_lock_irq(gcwq->lock).
  513. */
  514. static void move_linked_works(struct work_struct *work, struct list_head *head,
  515. struct work_struct **nextp)
  516. {
  517. struct work_struct *n;
  518. /*
  519. * Linked worklist will always end before the end of the list,
  520. * use NULL for list head.
  521. */
  522. list_for_each_entry_safe_from(work, n, NULL, entry) {
  523. list_move_tail(&work->entry, head);
  524. if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
  525. break;
  526. }
  527. /*
  528. * If we're already inside safe list traversal and have moved
  529. * multiple works to the scheduled queue, the next position
  530. * needs to be updated.
  531. */
  532. if (nextp)
  533. *nextp = n;
  534. }
  535. static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  536. {
  537. struct work_struct *work = list_first_entry(&cwq->delayed_works,
  538. struct work_struct, entry);
  539. move_linked_works(work, &cwq->worklist, NULL);
  540. cwq->nr_active++;
  541. }
  542. /**
  543. * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  544. * @cwq: cwq of interest
  545. * @color: color of work which left the queue
  546. *
  547. * A work either has completed or is removed from pending queue,
  548. * decrement nr_in_flight of its cwq and handle workqueue flushing.
  549. *
  550. * CONTEXT:
  551. * spin_lock_irq(gcwq->lock).
  552. */
  553. static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  554. {
  555. /* ignore uncolored works */
  556. if (color == WORK_NO_COLOR)
  557. return;
  558. cwq->nr_in_flight[color]--;
  559. cwq->nr_active--;
  560. /* one down, submit a delayed one */
  561. if (!list_empty(&cwq->delayed_works) &&
  562. cwq->nr_active < cwq->max_active)
  563. cwq_activate_first_delayed(cwq);
  564. /* is flush in progress and are we at the flushing tip? */
  565. if (likely(cwq->flush_color != color))
  566. return;
  567. /* are there still in-flight works? */
  568. if (cwq->nr_in_flight[color])
  569. return;
  570. /* this cwq is done, clear flush_color */
  571. cwq->flush_color = -1;
  572. /*
  573. * If this was the last cwq, wake up the first flusher. It
  574. * will handle the rest.
  575. */
  576. if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
  577. complete(&cwq->wq->first_flusher->done);
  578. }
  579. /**
  580. * process_one_work - process single work
  581. * @worker: self
  582. * @work: work to process
  583. *
  584. * Process @work. This function contains all the logics necessary to
  585. * process a single work including synchronization against and
  586. * interaction with other workers on the same cpu, queueing and
  587. * flushing. As long as context requirement is met, any worker can
  588. * call this function to process a work.
  589. *
  590. * CONTEXT:
  591. * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  592. */
  593. static void process_one_work(struct worker *worker, struct work_struct *work)
  594. {
  595. struct cpu_workqueue_struct *cwq = worker->cwq;
  596. struct global_cwq *gcwq = cwq->gcwq;
  597. work_func_t f = work->func;
  598. int work_color;
  599. #ifdef CONFIG_LOCKDEP
  600. /*
  601. * It is permissible to free the struct work_struct from
  602. * inside the function that is called from it, this we need to
  603. * take into account for lockdep too. To avoid bogus "held
  604. * lock freed" warnings as well as problems when looking into
  605. * work->lockdep_map, make a copy and use that here.
  606. */
  607. struct lockdep_map lockdep_map = work->lockdep_map;
  608. #endif
  609. /* claim and process */
  610. debug_work_deactivate(work);
  611. worker->current_work = work;
  612. work_color = get_work_color(work);
  613. list_del_init(&work->entry);
  614. spin_unlock_irq(&gcwq->lock);
  615. BUG_ON(get_wq_data(work) != cwq);
  616. work_clear_pending(work);
  617. lock_map_acquire(&cwq->wq->lockdep_map);
  618. lock_map_acquire(&lockdep_map);
  619. f(work);
  620. lock_map_release(&lockdep_map);
  621. lock_map_release(&cwq->wq->lockdep_map);
  622. if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
  623. printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
  624. "%s/0x%08x/%d\n",
  625. current->comm, preempt_count(), task_pid_nr(current));
  626. printk(KERN_ERR " last function: ");
  627. print_symbol("%s\n", (unsigned long)f);
  628. debug_show_held_locks(current);
  629. dump_stack();
  630. }
  631. spin_lock_irq(&gcwq->lock);
  632. /* we're done with it, release */
  633. worker->current_work = NULL;
  634. cwq_dec_nr_in_flight(cwq, work_color);
  635. }
  636. /**
  637. * process_scheduled_works - process scheduled works
  638. * @worker: self
  639. *
  640. * Process all scheduled works. Please note that the scheduled list
  641. * may change while processing a work, so this function repeatedly
  642. * fetches a work from the top and executes it.
  643. *
  644. * CONTEXT:
  645. * spin_lock_irq(gcwq->lock) which may be released and regrabbed
  646. * multiple times.
  647. */
  648. static void process_scheduled_works(struct worker *worker)
  649. {
  650. while (!list_empty(&worker->scheduled)) {
  651. struct work_struct *work = list_first_entry(&worker->scheduled,
  652. struct work_struct, entry);
  653. process_one_work(worker, work);
  654. }
  655. }
  656. /**
  657. * worker_thread - the worker thread function
  658. * @__worker: self
  659. *
  660. * The cwq worker thread function.
  661. */
  662. static int worker_thread(void *__worker)
  663. {
  664. struct worker *worker = __worker;
  665. struct global_cwq *gcwq = worker->gcwq;
  666. struct cpu_workqueue_struct *cwq = worker->cwq;
  667. DEFINE_WAIT(wait);
  668. for (;;) {
  669. prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
  670. if (!kthread_should_stop() &&
  671. list_empty(&cwq->worklist))
  672. schedule();
  673. finish_wait(&cwq->more_work, &wait);
  674. if (kthread_should_stop())
  675. break;
  676. if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
  677. get_cpu_mask(gcwq->cpu))))
  678. set_cpus_allowed_ptr(worker->task,
  679. get_cpu_mask(gcwq->cpu));
  680. spin_lock_irq(&gcwq->lock);
  681. while (!list_empty(&cwq->worklist)) {
  682. struct work_struct *work =
  683. list_first_entry(&cwq->worklist,
  684. struct work_struct, entry);
  685. if (likely(!(*work_data_bits(work) &
  686. WORK_STRUCT_LINKED))) {
  687. /* optimization path, not strictly necessary */
  688. process_one_work(worker, work);
  689. if (unlikely(!list_empty(&worker->scheduled)))
  690. process_scheduled_works(worker);
  691. } else {
  692. move_linked_works(work, &worker->scheduled,
  693. NULL);
  694. process_scheduled_works(worker);
  695. }
  696. }
  697. spin_unlock_irq(&gcwq->lock);
  698. }
  699. return 0;
  700. }
  701. struct wq_barrier {
  702. struct work_struct work;
  703. struct completion done;
  704. };
  705. static void wq_barrier_func(struct work_struct *work)
  706. {
  707. struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
  708. complete(&barr->done);
  709. }
  710. /**
  711. * insert_wq_barrier - insert a barrier work
  712. * @cwq: cwq to insert barrier into
  713. * @barr: wq_barrier to insert
  714. * @target: target work to attach @barr to
  715. * @worker: worker currently executing @target, NULL if @target is not executing
  716. *
  717. * @barr is linked to @target such that @barr is completed only after
  718. * @target finishes execution. Please note that the ordering
  719. * guarantee is observed only with respect to @target and on the local
  720. * cpu.
  721. *
  722. * Currently, a queued barrier can't be canceled. This is because
  723. * try_to_grab_pending() can't determine whether the work to be
  724. * grabbed is at the head of the queue and thus can't clear LINKED
  725. * flag of the previous work while there must be a valid next work
  726. * after a work with LINKED flag set.
  727. *
  728. * Note that when @worker is non-NULL, @target may be modified
  729. * underneath us, so we can't reliably determine cwq from @target.
  730. *
  731. * CONTEXT:
  732. * spin_lock_irq(gcwq->lock).
  733. */
  734. static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  735. struct wq_barrier *barr,
  736. struct work_struct *target, struct worker *worker)
  737. {
  738. struct list_head *head;
  739. unsigned int linked = 0;
  740. /*
  741. * debugobject calls are safe here even with gcwq->lock locked
  742. * as we know for sure that this will not trigger any of the
  743. * checks and call back into the fixup functions where we
  744. * might deadlock.
  745. */
  746. INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
  747. __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
  748. init_completion(&barr->done);
  749. /*
  750. * If @target is currently being executed, schedule the
  751. * barrier to the worker; otherwise, put it after @target.
  752. */
  753. if (worker)
  754. head = worker->scheduled.next;
  755. else {
  756. unsigned long *bits = work_data_bits(target);
  757. head = target->entry.next;
  758. /* there can already be other linked works, inherit and set */
  759. linked = *bits & WORK_STRUCT_LINKED;
  760. __set_bit(WORK_STRUCT_LINKED_BIT, bits);
  761. }
  762. debug_work_activate(&barr->work);
  763. insert_work(cwq, &barr->work, head,
  764. work_color_to_flags(WORK_NO_COLOR) | linked);
  765. }
  766. /**
  767. * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
  768. * @wq: workqueue being flushed
  769. * @flush_color: new flush color, < 0 for no-op
  770. * @work_color: new work color, < 0 for no-op
  771. *
  772. * Prepare cwqs for workqueue flushing.
  773. *
  774. * If @flush_color is non-negative, flush_color on all cwqs should be
  775. * -1. If no cwq has in-flight commands at the specified color, all
  776. * cwq->flush_color's stay at -1 and %false is returned. If any cwq
  777. * has in flight commands, its cwq->flush_color is set to
  778. * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
  779. * wakeup logic is armed and %true is returned.
  780. *
  781. * The caller should have initialized @wq->first_flusher prior to
  782. * calling this function with non-negative @flush_color. If
  783. * @flush_color is negative, no flush color update is done and %false
  784. * is returned.
  785. *
  786. * If @work_color is non-negative, all cwqs should have the same
  787. * work_color which is previous to @work_color and all will be
  788. * advanced to @work_color.
  789. *
  790. * CONTEXT:
  791. * mutex_lock(wq->flush_mutex).
  792. *
  793. * RETURNS:
  794. * %true if @flush_color >= 0 and there's something to flush. %false
  795. * otherwise.
  796. */
  797. static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
  798. int flush_color, int work_color)
  799. {
  800. bool wait = false;
  801. unsigned int cpu;
  802. if (flush_color >= 0) {
  803. BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
  804. atomic_set(&wq->nr_cwqs_to_flush, 1);
  805. }
  806. for_each_possible_cpu(cpu) {
  807. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  808. struct global_cwq *gcwq = cwq->gcwq;
  809. spin_lock_irq(&gcwq->lock);
  810. if (flush_color >= 0) {
  811. BUG_ON(cwq->flush_color != -1);
  812. if (cwq->nr_in_flight[flush_color]) {
  813. cwq->flush_color = flush_color;
  814. atomic_inc(&wq->nr_cwqs_to_flush);
  815. wait = true;
  816. }
  817. }
  818. if (work_color >= 0) {
  819. BUG_ON(work_color != work_next_color(cwq->work_color));
  820. cwq->work_color = work_color;
  821. }
  822. spin_unlock_irq(&gcwq->lock);
  823. }
  824. if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
  825. complete(&wq->first_flusher->done);
  826. return wait;
  827. }
  828. /**
  829. * flush_workqueue - ensure that any scheduled work has run to completion.
  830. * @wq: workqueue to flush
  831. *
  832. * Forces execution of the workqueue and blocks until its completion.
  833. * This is typically used in driver shutdown handlers.
  834. *
  835. * We sleep until all works which were queued on entry have been handled,
  836. * but we are not livelocked by new incoming ones.
  837. */
  838. void flush_workqueue(struct workqueue_struct *wq)
  839. {
  840. struct wq_flusher this_flusher = {
  841. .list = LIST_HEAD_INIT(this_flusher.list),
  842. .flush_color = -1,
  843. .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
  844. };
  845. int next_color;
  846. lock_map_acquire(&wq->lockdep_map);
  847. lock_map_release(&wq->lockdep_map);
  848. mutex_lock(&wq->flush_mutex);
  849. /*
  850. * Start-to-wait phase
  851. */
  852. next_color = work_next_color(wq->work_color);
  853. if (next_color != wq->flush_color) {
  854. /*
  855. * Color space is not full. The current work_color
  856. * becomes our flush_color and work_color is advanced
  857. * by one.
  858. */
  859. BUG_ON(!list_empty(&wq->flusher_overflow));
  860. this_flusher.flush_color = wq->work_color;
  861. wq->work_color = next_color;
  862. if (!wq->first_flusher) {
  863. /* no flush in progress, become the first flusher */
  864. BUG_ON(wq->flush_color != this_flusher.flush_color);
  865. wq->first_flusher = &this_flusher;
  866. if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
  867. wq->work_color)) {
  868. /* nothing to flush, done */
  869. wq->flush_color = next_color;
  870. wq->first_flusher = NULL;
  871. goto out_unlock;
  872. }
  873. } else {
  874. /* wait in queue */
  875. BUG_ON(wq->flush_color == this_flusher.flush_color);
  876. list_add_tail(&this_flusher.list, &wq->flusher_queue);
  877. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  878. }
  879. } else {
  880. /*
  881. * Oops, color space is full, wait on overflow queue.
  882. * The next flush completion will assign us
  883. * flush_color and transfer to flusher_queue.
  884. */
  885. list_add_tail(&this_flusher.list, &wq->flusher_overflow);
  886. }
  887. mutex_unlock(&wq->flush_mutex);
  888. wait_for_completion(&this_flusher.done);
  889. /*
  890. * Wake-up-and-cascade phase
  891. *
  892. * First flushers are responsible for cascading flushes and
  893. * handling overflow. Non-first flushers can simply return.
  894. */
  895. if (wq->first_flusher != &this_flusher)
  896. return;
  897. mutex_lock(&wq->flush_mutex);
  898. wq->first_flusher = NULL;
  899. BUG_ON(!list_empty(&this_flusher.list));
  900. BUG_ON(wq->flush_color != this_flusher.flush_color);
  901. while (true) {
  902. struct wq_flusher *next, *tmp;
  903. /* complete all the flushers sharing the current flush color */
  904. list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
  905. if (next->flush_color != wq->flush_color)
  906. break;
  907. list_del_init(&next->list);
  908. complete(&next->done);
  909. }
  910. BUG_ON(!list_empty(&wq->flusher_overflow) &&
  911. wq->flush_color != work_next_color(wq->work_color));
  912. /* this flush_color is finished, advance by one */
  913. wq->flush_color = work_next_color(wq->flush_color);
  914. /* one color has been freed, handle overflow queue */
  915. if (!list_empty(&wq->flusher_overflow)) {
  916. /*
  917. * Assign the same color to all overflowed
  918. * flushers, advance work_color and append to
  919. * flusher_queue. This is the start-to-wait
  920. * phase for these overflowed flushers.
  921. */
  922. list_for_each_entry(tmp, &wq->flusher_overflow, list)
  923. tmp->flush_color = wq->work_color;
  924. wq->work_color = work_next_color(wq->work_color);
  925. list_splice_tail_init(&wq->flusher_overflow,
  926. &wq->flusher_queue);
  927. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  928. }
  929. if (list_empty(&wq->flusher_queue)) {
  930. BUG_ON(wq->flush_color != wq->work_color);
  931. break;
  932. }
  933. /*
  934. * Need to flush more colors. Make the next flusher
  935. * the new first flusher and arm cwqs.
  936. */
  937. BUG_ON(wq->flush_color == wq->work_color);
  938. BUG_ON(wq->flush_color != next->flush_color);
  939. list_del_init(&next->list);
  940. wq->first_flusher = next;
  941. if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
  942. break;
  943. /*
  944. * Meh... this color is already done, clear first
  945. * flusher and repeat cascading.
  946. */
  947. wq->first_flusher = NULL;
  948. }
  949. out_unlock:
  950. mutex_unlock(&wq->flush_mutex);
  951. }
  952. EXPORT_SYMBOL_GPL(flush_workqueue);
  953. /**
  954. * flush_work - block until a work_struct's callback has terminated
  955. * @work: the work which is to be flushed
  956. *
  957. * Returns false if @work has already terminated.
  958. *
  959. * It is expected that, prior to calling flush_work(), the caller has
  960. * arranged for the work to not be requeued, otherwise it doesn't make
  961. * sense to use this function.
  962. */
  963. int flush_work(struct work_struct *work)
  964. {
  965. struct worker *worker = NULL;
  966. struct cpu_workqueue_struct *cwq;
  967. struct global_cwq *gcwq;
  968. struct wq_barrier barr;
  969. might_sleep();
  970. cwq = get_wq_data(work);
  971. if (!cwq)
  972. return 0;
  973. gcwq = cwq->gcwq;
  974. lock_map_acquire(&cwq->wq->lockdep_map);
  975. lock_map_release(&cwq->wq->lockdep_map);
  976. spin_lock_irq(&gcwq->lock);
  977. if (!list_empty(&work->entry)) {
  978. /*
  979. * See the comment near try_to_grab_pending()->smp_rmb().
  980. * If it was re-queued under us we are not going to wait.
  981. */
  982. smp_rmb();
  983. if (unlikely(cwq != get_wq_data(work)))
  984. goto already_gone;
  985. } else {
  986. if (cwq->worker && cwq->worker->current_work == work)
  987. worker = cwq->worker;
  988. if (!worker)
  989. goto already_gone;
  990. }
  991. insert_wq_barrier(cwq, &barr, work, worker);
  992. spin_unlock_irq(&gcwq->lock);
  993. wait_for_completion(&barr.done);
  994. destroy_work_on_stack(&barr.work);
  995. return 1;
  996. already_gone:
  997. spin_unlock_irq(&gcwq->lock);
  998. return 0;
  999. }
  1000. EXPORT_SYMBOL_GPL(flush_work);
  1001. /*
  1002. * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
  1003. * so this work can't be re-armed in any way.
  1004. */
  1005. static int try_to_grab_pending(struct work_struct *work)
  1006. {
  1007. struct global_cwq *gcwq;
  1008. struct cpu_workqueue_struct *cwq;
  1009. int ret = -1;
  1010. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
  1011. return 0;
  1012. /*
  1013. * The queueing is in progress, or it is already queued. Try to
  1014. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
  1015. */
  1016. cwq = get_wq_data(work);
  1017. if (!cwq)
  1018. return ret;
  1019. gcwq = cwq->gcwq;
  1020. spin_lock_irq(&gcwq->lock);
  1021. if (!list_empty(&work->entry)) {
  1022. /*
  1023. * This work is queued, but perhaps we locked the wrong cwq.
  1024. * In that case we must see the new value after rmb(), see
  1025. * insert_work()->wmb().
  1026. */
  1027. smp_rmb();
  1028. if (cwq == get_wq_data(work)) {
  1029. debug_work_deactivate(work);
  1030. list_del_init(&work->entry);
  1031. cwq_dec_nr_in_flight(cwq, get_work_color(work));
  1032. ret = 1;
  1033. }
  1034. }
  1035. spin_unlock_irq(&gcwq->lock);
  1036. return ret;
  1037. }
  1038. static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
  1039. struct work_struct *work)
  1040. {
  1041. struct global_cwq *gcwq = cwq->gcwq;
  1042. struct wq_barrier barr;
  1043. struct worker *worker;
  1044. spin_lock_irq(&gcwq->lock);
  1045. worker = NULL;
  1046. if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
  1047. worker = cwq->worker;
  1048. insert_wq_barrier(cwq, &barr, work, worker);
  1049. }
  1050. spin_unlock_irq(&gcwq->lock);
  1051. if (unlikely(worker)) {
  1052. wait_for_completion(&barr.done);
  1053. destroy_work_on_stack(&barr.work);
  1054. }
  1055. }
  1056. static void wait_on_work(struct work_struct *work)
  1057. {
  1058. struct cpu_workqueue_struct *cwq;
  1059. struct workqueue_struct *wq;
  1060. int cpu;
  1061. might_sleep();
  1062. lock_map_acquire(&work->lockdep_map);
  1063. lock_map_release(&work->lockdep_map);
  1064. cwq = get_wq_data(work);
  1065. if (!cwq)
  1066. return;
  1067. wq = cwq->wq;
  1068. for_each_possible_cpu(cpu)
  1069. wait_on_cpu_work(get_cwq(cpu, wq), work);
  1070. }
  1071. static int __cancel_work_timer(struct work_struct *work,
  1072. struct timer_list* timer)
  1073. {
  1074. int ret;
  1075. do {
  1076. ret = (timer && likely(del_timer(timer)));
  1077. if (!ret)
  1078. ret = try_to_grab_pending(work);
  1079. wait_on_work(work);
  1080. } while (unlikely(ret < 0));
  1081. clear_wq_data(work);
  1082. return ret;
  1083. }
  1084. /**
  1085. * cancel_work_sync - block until a work_struct's callback has terminated
  1086. * @work: the work which is to be flushed
  1087. *
  1088. * Returns true if @work was pending.
  1089. *
  1090. * cancel_work_sync() will cancel the work if it is queued. If the work's
  1091. * callback appears to be running, cancel_work_sync() will block until it
  1092. * has completed.
  1093. *
  1094. * It is possible to use this function if the work re-queues itself. It can
  1095. * cancel the work even if it migrates to another workqueue, however in that
  1096. * case it only guarantees that work->func() has completed on the last queued
  1097. * workqueue.
  1098. *
  1099. * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
  1100. * pending, otherwise it goes into a busy-wait loop until the timer expires.
  1101. *
  1102. * The caller must ensure that workqueue_struct on which this work was last
  1103. * queued can't be destroyed before this function returns.
  1104. */
  1105. int cancel_work_sync(struct work_struct *work)
  1106. {
  1107. return __cancel_work_timer(work, NULL);
  1108. }
  1109. EXPORT_SYMBOL_GPL(cancel_work_sync);
  1110. /**
  1111. * cancel_delayed_work_sync - reliably kill off a delayed work.
  1112. * @dwork: the delayed work struct
  1113. *
  1114. * Returns true if @dwork was pending.
  1115. *
  1116. * It is possible to use this function if @dwork rearms itself via queue_work()
  1117. * or queue_delayed_work(). See also the comment for cancel_work_sync().
  1118. */
  1119. int cancel_delayed_work_sync(struct delayed_work *dwork)
  1120. {
  1121. return __cancel_work_timer(&dwork->work, &dwork->timer);
  1122. }
  1123. EXPORT_SYMBOL(cancel_delayed_work_sync);
  1124. static struct workqueue_struct *keventd_wq __read_mostly;
  1125. /**
  1126. * schedule_work - put work task in global workqueue
  1127. * @work: job to be done
  1128. *
  1129. * Returns zero if @work was already on the kernel-global workqueue and
  1130. * non-zero otherwise.
  1131. *
  1132. * This puts a job in the kernel-global workqueue if it was not already
  1133. * queued and leaves it in the same position on the kernel-global
  1134. * workqueue otherwise.
  1135. */
  1136. int schedule_work(struct work_struct *work)
  1137. {
  1138. return queue_work(keventd_wq, work);
  1139. }
  1140. EXPORT_SYMBOL(schedule_work);
  1141. /*
  1142. * schedule_work_on - put work task on a specific cpu
  1143. * @cpu: cpu to put the work task on
  1144. * @work: job to be done
  1145. *
  1146. * This puts a job on a specific cpu
  1147. */
  1148. int schedule_work_on(int cpu, struct work_struct *work)
  1149. {
  1150. return queue_work_on(cpu, keventd_wq, work);
  1151. }
  1152. EXPORT_SYMBOL(schedule_work_on);
  1153. /**
  1154. * schedule_delayed_work - put work task in global workqueue after delay
  1155. * @dwork: job to be done
  1156. * @delay: number of jiffies to wait or 0 for immediate execution
  1157. *
  1158. * After waiting for a given time this puts a job in the kernel-global
  1159. * workqueue.
  1160. */
  1161. int schedule_delayed_work(struct delayed_work *dwork,
  1162. unsigned long delay)
  1163. {
  1164. return queue_delayed_work(keventd_wq, dwork, delay);
  1165. }
  1166. EXPORT_SYMBOL(schedule_delayed_work);
  1167. /**
  1168. * flush_delayed_work - block until a dwork_struct's callback has terminated
  1169. * @dwork: the delayed work which is to be flushed
  1170. *
  1171. * Any timeout is cancelled, and any pending work is run immediately.
  1172. */
  1173. void flush_delayed_work(struct delayed_work *dwork)
  1174. {
  1175. if (del_timer_sync(&dwork->timer)) {
  1176. __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
  1177. &dwork->work);
  1178. put_cpu();
  1179. }
  1180. flush_work(&dwork->work);
  1181. }
  1182. EXPORT_SYMBOL(flush_delayed_work);
  1183. /**
  1184. * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  1185. * @cpu: cpu to use
  1186. * @dwork: job to be done
  1187. * @delay: number of jiffies to wait
  1188. *
  1189. * After waiting for a given time this puts a job in the kernel-global
  1190. * workqueue on the specified CPU.
  1191. */
  1192. int schedule_delayed_work_on(int cpu,
  1193. struct delayed_work *dwork, unsigned long delay)
  1194. {
  1195. return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
  1196. }
  1197. EXPORT_SYMBOL(schedule_delayed_work_on);
  1198. /**
  1199. * schedule_on_each_cpu - call a function on each online CPU from keventd
  1200. * @func: the function to call
  1201. *
  1202. * Returns zero on success.
  1203. * Returns -ve errno on failure.
  1204. *
  1205. * schedule_on_each_cpu() is very slow.
  1206. */
  1207. int schedule_on_each_cpu(work_func_t func)
  1208. {
  1209. int cpu;
  1210. int orig = -1;
  1211. struct work_struct *works;
  1212. works = alloc_percpu(struct work_struct);
  1213. if (!works)
  1214. return -ENOMEM;
  1215. get_online_cpus();
  1216. /*
  1217. * When running in keventd don't schedule a work item on
  1218. * itself. Can just call directly because the work queue is
  1219. * already bound. This also is faster.
  1220. */
  1221. if (current_is_keventd())
  1222. orig = raw_smp_processor_id();
  1223. for_each_online_cpu(cpu) {
  1224. struct work_struct *work = per_cpu_ptr(works, cpu);
  1225. INIT_WORK(work, func);
  1226. if (cpu != orig)
  1227. schedule_work_on(cpu, work);
  1228. }
  1229. if (orig >= 0)
  1230. func(per_cpu_ptr(works, orig));
  1231. for_each_online_cpu(cpu)
  1232. flush_work(per_cpu_ptr(works, cpu));
  1233. put_online_cpus();
  1234. free_percpu(works);
  1235. return 0;
  1236. }
  1237. /**
  1238. * flush_scheduled_work - ensure that any scheduled work has run to completion.
  1239. *
  1240. * Forces execution of the kernel-global workqueue and blocks until its
  1241. * completion.
  1242. *
  1243. * Think twice before calling this function! It's very easy to get into
  1244. * trouble if you don't take great care. Either of the following situations
  1245. * will lead to deadlock:
  1246. *
  1247. * One of the work items currently on the workqueue needs to acquire
  1248. * a lock held by your code or its caller.
  1249. *
  1250. * Your code is running in the context of a work routine.
  1251. *
  1252. * They will be detected by lockdep when they occur, but the first might not
  1253. * occur very often. It depends on what work items are on the workqueue and
  1254. * what locks they need, which you have no control over.
  1255. *
  1256. * In most situations flushing the entire workqueue is overkill; you merely
  1257. * need to know that a particular work item isn't queued and isn't running.
  1258. * In such cases you should use cancel_delayed_work_sync() or
  1259. * cancel_work_sync() instead.
  1260. */
  1261. void flush_scheduled_work(void)
  1262. {
  1263. flush_workqueue(keventd_wq);
  1264. }
  1265. EXPORT_SYMBOL(flush_scheduled_work);
  1266. /**
  1267. * execute_in_process_context - reliably execute the routine with user context
  1268. * @fn: the function to execute
  1269. * @ew: guaranteed storage for the execute work structure (must
  1270. * be available when the work executes)
  1271. *
  1272. * Executes the function immediately if process context is available,
  1273. * otherwise schedules the function for delayed execution.
  1274. *
  1275. * Returns: 0 - function was executed
  1276. * 1 - function was scheduled for execution
  1277. */
  1278. int execute_in_process_context(work_func_t fn, struct execute_work *ew)
  1279. {
  1280. if (!in_interrupt()) {
  1281. fn(&ew->work);
  1282. return 0;
  1283. }
  1284. INIT_WORK(&ew->work, fn);
  1285. schedule_work(&ew->work);
  1286. return 1;
  1287. }
  1288. EXPORT_SYMBOL_GPL(execute_in_process_context);
  1289. int keventd_up(void)
  1290. {
  1291. return keventd_wq != NULL;
  1292. }
  1293. int current_is_keventd(void)
  1294. {
  1295. struct cpu_workqueue_struct *cwq;
  1296. int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
  1297. int ret = 0;
  1298. BUG_ON(!keventd_wq);
  1299. cwq = get_cwq(cpu, keventd_wq);
  1300. if (current == cwq->worker->task)
  1301. ret = 1;
  1302. return ret;
  1303. }
  1304. static struct cpu_workqueue_struct *alloc_cwqs(void)
  1305. {
  1306. /*
  1307. * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
  1308. * Make sure that the alignment isn't lower than that of
  1309. * unsigned long long.
  1310. */
  1311. const size_t size = sizeof(struct cpu_workqueue_struct);
  1312. const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
  1313. __alignof__(unsigned long long));
  1314. struct cpu_workqueue_struct *cwqs;
  1315. #ifndef CONFIG_SMP
  1316. void *ptr;
  1317. /*
  1318. * On UP, percpu allocator doesn't honor alignment parameter
  1319. * and simply uses arch-dependent default. Allocate enough
  1320. * room to align cwq and put an extra pointer at the end
  1321. * pointing back to the originally allocated pointer which
  1322. * will be used for free.
  1323. *
  1324. * FIXME: This really belongs to UP percpu code. Update UP
  1325. * percpu code to honor alignment and remove this ugliness.
  1326. */
  1327. ptr = __alloc_percpu(size + align + sizeof(void *), 1);
  1328. cwqs = PTR_ALIGN(ptr, align);
  1329. *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
  1330. #else
  1331. /* On SMP, percpu allocator can do it itself */
  1332. cwqs = __alloc_percpu(size, align);
  1333. #endif
  1334. /* just in case, make sure it's actually aligned */
  1335. BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
  1336. return cwqs;
  1337. }
  1338. static void free_cwqs(struct cpu_workqueue_struct *cwqs)
  1339. {
  1340. #ifndef CONFIG_SMP
  1341. /* on UP, the pointer to free is stored right after the cwq */
  1342. if (cwqs)
  1343. free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
  1344. #else
  1345. free_percpu(cwqs);
  1346. #endif
  1347. }
  1348. struct workqueue_struct *__create_workqueue_key(const char *name,
  1349. unsigned int flags,
  1350. int max_active,
  1351. struct lock_class_key *key,
  1352. const char *lock_name)
  1353. {
  1354. bool singlethread = flags & WQ_SINGLE_THREAD;
  1355. struct workqueue_struct *wq;
  1356. bool failed = false;
  1357. unsigned int cpu;
  1358. max_active = clamp_val(max_active, 1, INT_MAX);
  1359. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  1360. if (!wq)
  1361. goto err;
  1362. wq->cpu_wq = alloc_cwqs();
  1363. if (!wq->cpu_wq)
  1364. goto err;
  1365. wq->flags = flags;
  1366. wq->saved_max_active = max_active;
  1367. mutex_init(&wq->flush_mutex);
  1368. atomic_set(&wq->nr_cwqs_to_flush, 0);
  1369. INIT_LIST_HEAD(&wq->flusher_queue);
  1370. INIT_LIST_HEAD(&wq->flusher_overflow);
  1371. wq->name = name;
  1372. lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
  1373. INIT_LIST_HEAD(&wq->list);
  1374. cpu_maps_update_begin();
  1375. /*
  1376. * We must initialize cwqs for each possible cpu even if we
  1377. * are going to call destroy_workqueue() finally. Otherwise
  1378. * cpu_up() can hit the uninitialized cwq once we drop the
  1379. * lock.
  1380. */
  1381. for_each_possible_cpu(cpu) {
  1382. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1383. struct global_cwq *gcwq = get_gcwq(cpu);
  1384. BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
  1385. cwq->gcwq = gcwq;
  1386. cwq->wq = wq;
  1387. cwq->flush_color = -1;
  1388. cwq->max_active = max_active;
  1389. INIT_LIST_HEAD(&cwq->worklist);
  1390. INIT_LIST_HEAD(&cwq->delayed_works);
  1391. init_waitqueue_head(&cwq->more_work);
  1392. if (failed)
  1393. continue;
  1394. cwq->worker = create_worker(cwq,
  1395. cpu_online(cpu) && !singlethread);
  1396. if (cwq->worker)
  1397. start_worker(cwq->worker);
  1398. else
  1399. failed = true;
  1400. }
  1401. /*
  1402. * workqueue_lock protects global freeze state and workqueues
  1403. * list. Grab it, set max_active accordingly and add the new
  1404. * workqueue to workqueues list.
  1405. */
  1406. spin_lock(&workqueue_lock);
  1407. if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
  1408. for_each_possible_cpu(cpu)
  1409. get_cwq(cpu, wq)->max_active = 0;
  1410. list_add(&wq->list, &workqueues);
  1411. spin_unlock(&workqueue_lock);
  1412. cpu_maps_update_done();
  1413. if (failed) {
  1414. destroy_workqueue(wq);
  1415. wq = NULL;
  1416. }
  1417. return wq;
  1418. err:
  1419. if (wq) {
  1420. free_cwqs(wq->cpu_wq);
  1421. kfree(wq);
  1422. }
  1423. return NULL;
  1424. }
  1425. EXPORT_SYMBOL_GPL(__create_workqueue_key);
  1426. /**
  1427. * destroy_workqueue - safely terminate a workqueue
  1428. * @wq: target workqueue
  1429. *
  1430. * Safely destroy a workqueue. All work currently pending will be done first.
  1431. */
  1432. void destroy_workqueue(struct workqueue_struct *wq)
  1433. {
  1434. int cpu;
  1435. flush_workqueue(wq);
  1436. /*
  1437. * wq list is used to freeze wq, remove from list after
  1438. * flushing is complete in case freeze races us.
  1439. */
  1440. cpu_maps_update_begin();
  1441. spin_lock(&workqueue_lock);
  1442. list_del(&wq->list);
  1443. spin_unlock(&workqueue_lock);
  1444. cpu_maps_update_done();
  1445. for_each_possible_cpu(cpu) {
  1446. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1447. int i;
  1448. if (cwq->worker) {
  1449. destroy_worker(cwq->worker);
  1450. cwq->worker = NULL;
  1451. }
  1452. for (i = 0; i < WORK_NR_COLORS; i++)
  1453. BUG_ON(cwq->nr_in_flight[i]);
  1454. BUG_ON(cwq->nr_active);
  1455. BUG_ON(!list_empty(&cwq->delayed_works));
  1456. }
  1457. free_cwqs(wq->cpu_wq);
  1458. kfree(wq);
  1459. }
  1460. EXPORT_SYMBOL_GPL(destroy_workqueue);
  1461. static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  1462. unsigned long action,
  1463. void *hcpu)
  1464. {
  1465. unsigned int cpu = (unsigned long)hcpu;
  1466. struct cpu_workqueue_struct *cwq;
  1467. struct workqueue_struct *wq;
  1468. action &= ~CPU_TASKS_FROZEN;
  1469. list_for_each_entry(wq, &workqueues, list) {
  1470. if (wq->flags & WQ_SINGLE_THREAD)
  1471. continue;
  1472. cwq = get_cwq(cpu, wq);
  1473. switch (action) {
  1474. case CPU_POST_DEAD:
  1475. flush_workqueue(wq);
  1476. break;
  1477. }
  1478. }
  1479. return notifier_from_errno(0);
  1480. }
  1481. #ifdef CONFIG_SMP
  1482. struct work_for_cpu {
  1483. struct completion completion;
  1484. long (*fn)(void *);
  1485. void *arg;
  1486. long ret;
  1487. };
  1488. static int do_work_for_cpu(void *_wfc)
  1489. {
  1490. struct work_for_cpu *wfc = _wfc;
  1491. wfc->ret = wfc->fn(wfc->arg);
  1492. complete(&wfc->completion);
  1493. return 0;
  1494. }
  1495. /**
  1496. * work_on_cpu - run a function in user context on a particular cpu
  1497. * @cpu: the cpu to run on
  1498. * @fn: the function to run
  1499. * @arg: the function arg
  1500. *
  1501. * This will return the value @fn returns.
  1502. * It is up to the caller to ensure that the cpu doesn't go offline.
  1503. * The caller must not hold any locks which would prevent @fn from completing.
  1504. */
  1505. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  1506. {
  1507. struct task_struct *sub_thread;
  1508. struct work_for_cpu wfc = {
  1509. .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
  1510. .fn = fn,
  1511. .arg = arg,
  1512. };
  1513. sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
  1514. if (IS_ERR(sub_thread))
  1515. return PTR_ERR(sub_thread);
  1516. kthread_bind(sub_thread, cpu);
  1517. wake_up_process(sub_thread);
  1518. wait_for_completion(&wfc.completion);
  1519. return wfc.ret;
  1520. }
  1521. EXPORT_SYMBOL_GPL(work_on_cpu);
  1522. #endif /* CONFIG_SMP */
  1523. #ifdef CONFIG_FREEZER
  1524. /**
  1525. * freeze_workqueues_begin - begin freezing workqueues
  1526. *
  1527. * Start freezing workqueues. After this function returns, all
  1528. * freezeable workqueues will queue new works to their frozen_works
  1529. * list instead of the cwq ones.
  1530. *
  1531. * CONTEXT:
  1532. * Grabs and releases workqueue_lock and gcwq->lock's.
  1533. */
  1534. void freeze_workqueues_begin(void)
  1535. {
  1536. struct workqueue_struct *wq;
  1537. unsigned int cpu;
  1538. spin_lock(&workqueue_lock);
  1539. BUG_ON(workqueue_freezing);
  1540. workqueue_freezing = true;
  1541. for_each_possible_cpu(cpu) {
  1542. struct global_cwq *gcwq = get_gcwq(cpu);
  1543. spin_lock_irq(&gcwq->lock);
  1544. list_for_each_entry(wq, &workqueues, list) {
  1545. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1546. if (wq->flags & WQ_FREEZEABLE)
  1547. cwq->max_active = 0;
  1548. }
  1549. spin_unlock_irq(&gcwq->lock);
  1550. }
  1551. spin_unlock(&workqueue_lock);
  1552. }
  1553. /**
  1554. * freeze_workqueues_busy - are freezeable workqueues still busy?
  1555. *
  1556. * Check whether freezing is complete. This function must be called
  1557. * between freeze_workqueues_begin() and thaw_workqueues().
  1558. *
  1559. * CONTEXT:
  1560. * Grabs and releases workqueue_lock.
  1561. *
  1562. * RETURNS:
  1563. * %true if some freezeable workqueues are still busy. %false if
  1564. * freezing is complete.
  1565. */
  1566. bool freeze_workqueues_busy(void)
  1567. {
  1568. struct workqueue_struct *wq;
  1569. unsigned int cpu;
  1570. bool busy = false;
  1571. spin_lock(&workqueue_lock);
  1572. BUG_ON(!workqueue_freezing);
  1573. for_each_possible_cpu(cpu) {
  1574. /*
  1575. * nr_active is monotonically decreasing. It's safe
  1576. * to peek without lock.
  1577. */
  1578. list_for_each_entry(wq, &workqueues, list) {
  1579. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1580. if (!(wq->flags & WQ_FREEZEABLE))
  1581. continue;
  1582. BUG_ON(cwq->nr_active < 0);
  1583. if (cwq->nr_active) {
  1584. busy = true;
  1585. goto out_unlock;
  1586. }
  1587. }
  1588. }
  1589. out_unlock:
  1590. spin_unlock(&workqueue_lock);
  1591. return busy;
  1592. }
  1593. /**
  1594. * thaw_workqueues - thaw workqueues
  1595. *
  1596. * Thaw workqueues. Normal queueing is restored and all collected
  1597. * frozen works are transferred to their respective cwq worklists.
  1598. *
  1599. * CONTEXT:
  1600. * Grabs and releases workqueue_lock and gcwq->lock's.
  1601. */
  1602. void thaw_workqueues(void)
  1603. {
  1604. struct workqueue_struct *wq;
  1605. unsigned int cpu;
  1606. spin_lock(&workqueue_lock);
  1607. if (!workqueue_freezing)
  1608. goto out_unlock;
  1609. for_each_possible_cpu(cpu) {
  1610. struct global_cwq *gcwq = get_gcwq(cpu);
  1611. spin_lock_irq(&gcwq->lock);
  1612. list_for_each_entry(wq, &workqueues, list) {
  1613. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1614. if (!(wq->flags & WQ_FREEZEABLE))
  1615. continue;
  1616. /* restore max_active and repopulate worklist */
  1617. cwq->max_active = wq->saved_max_active;
  1618. while (!list_empty(&cwq->delayed_works) &&
  1619. cwq->nr_active < cwq->max_active)
  1620. cwq_activate_first_delayed(cwq);
  1621. wake_up(&cwq->more_work);
  1622. }
  1623. spin_unlock_irq(&gcwq->lock);
  1624. }
  1625. workqueue_freezing = false;
  1626. out_unlock:
  1627. spin_unlock(&workqueue_lock);
  1628. }
  1629. #endif /* CONFIG_FREEZER */
  1630. void __init init_workqueues(void)
  1631. {
  1632. unsigned int cpu;
  1633. singlethread_cpu = cpumask_first(cpu_possible_mask);
  1634. hotcpu_notifier(workqueue_cpu_callback, 0);
  1635. /* initialize gcwqs */
  1636. for_each_possible_cpu(cpu) {
  1637. struct global_cwq *gcwq = get_gcwq(cpu);
  1638. spin_lock_init(&gcwq->lock);
  1639. gcwq->cpu = cpu;
  1640. ida_init(&gcwq->worker_ida);
  1641. }
  1642. keventd_wq = create_workqueue("events");
  1643. BUG_ON(!keventd_wq);
  1644. }