workqueue.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736
  1. /*
  2. * linux/kernel/workqueue.c
  3. *
  4. * Generic mechanism for defining kernel helper threads for running
  5. * arbitrary tasks in process context.
  6. *
  7. * Started by Ingo Molnar, Copyright (C) 2002
  8. *
  9. * Derived from the taskqueue/keventd code by:
  10. *
  11. * David Woodhouse <dwmw2@infradead.org>
  12. * Andrew Morton
  13. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  14. * Theodore Ts'o <tytso@mit.edu>
  15. *
  16. * Made to use alloc_percpu by Christoph Lameter.
  17. */
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/sched.h>
  21. #include <linux/init.h>
  22. #include <linux/signal.h>
  23. #include <linux/completion.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/slab.h>
  26. #include <linux/cpu.h>
  27. #include <linux/notifier.h>
  28. #include <linux/kthread.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/mempolicy.h>
  31. #include <linux/freezer.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/debug_locks.h>
  34. #include <linux/lockdep.h>
  35. #include <linux/idr.h>
  36. /*
  37. * Structure fields follow one of the following exclusion rules.
  38. *
  39. * I: Set during initialization and read-only afterwards.
  40. *
  41. * L: cwq->lock protected. Access with cwq->lock held.
  42. *
  43. * F: wq->flush_mutex protected.
  44. *
  45. * W: workqueue_lock protected.
  46. */
  47. struct cpu_workqueue_struct;
  48. struct worker {
  49. struct work_struct *current_work; /* L: work being processed */
  50. struct list_head scheduled; /* L: scheduled works */
  51. struct task_struct *task; /* I: worker task */
  52. struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
  53. int id; /* I: worker id */
  54. };
  55. /*
  56. * The per-CPU workqueue (if single thread, we always use the first
  57. * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
  58. * work_struct->data are used for flags and thus cwqs need to be
  59. * aligned at two's power of the number of flag bits.
  60. */
  61. struct cpu_workqueue_struct {
  62. spinlock_t lock;
  63. struct list_head worklist;
  64. wait_queue_head_t more_work;
  65. unsigned int cpu;
  66. struct worker *worker;
  67. struct workqueue_struct *wq; /* I: the owning workqueue */
  68. int work_color; /* L: current color */
  69. int flush_color; /* L: flushing color */
  70. int nr_in_flight[WORK_NR_COLORS];
  71. /* L: nr of in_flight works */
  72. int nr_active; /* L: nr of active works */
  73. int max_active; /* I: max active works */
  74. struct list_head delayed_works; /* L: delayed works */
  75. };
  76. /*
  77. * Structure used to wait for workqueue flush.
  78. */
  79. struct wq_flusher {
  80. struct list_head list; /* F: list of flushers */
  81. int flush_color; /* F: flush color waiting for */
  82. struct completion done; /* flush completion */
  83. };
  84. /*
  85. * The externally visible workqueue abstraction is an array of
  86. * per-CPU workqueues:
  87. */
  88. struct workqueue_struct {
  89. unsigned int flags; /* I: WQ_* flags */
  90. struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
  91. struct list_head list; /* W: list of all workqueues */
  92. struct mutex flush_mutex; /* protects wq flushing */
  93. int work_color; /* F: current work color */
  94. int flush_color; /* F: current flush color */
  95. atomic_t nr_cwqs_to_flush; /* flush in progress */
  96. struct wq_flusher *first_flusher; /* F: first flusher */
  97. struct list_head flusher_queue; /* F: flush waiters */
  98. struct list_head flusher_overflow; /* F: flush overflow list */
  99. const char *name; /* I: workqueue name */
  100. #ifdef CONFIG_LOCKDEP
  101. struct lockdep_map lockdep_map;
  102. #endif
  103. };
  104. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  105. static struct debug_obj_descr work_debug_descr;
  106. /*
  107. * fixup_init is called when:
  108. * - an active object is initialized
  109. */
  110. static int work_fixup_init(void *addr, enum debug_obj_state state)
  111. {
  112. struct work_struct *work = addr;
  113. switch (state) {
  114. case ODEBUG_STATE_ACTIVE:
  115. cancel_work_sync(work);
  116. debug_object_init(work, &work_debug_descr);
  117. return 1;
  118. default:
  119. return 0;
  120. }
  121. }
  122. /*
  123. * fixup_activate is called when:
  124. * - an active object is activated
  125. * - an unknown object is activated (might be a statically initialized object)
  126. */
  127. static int work_fixup_activate(void *addr, enum debug_obj_state state)
  128. {
  129. struct work_struct *work = addr;
  130. switch (state) {
  131. case ODEBUG_STATE_NOTAVAILABLE:
  132. /*
  133. * This is not really a fixup. The work struct was
  134. * statically initialized. We just make sure that it
  135. * is tracked in the object tracker.
  136. */
  137. if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
  138. debug_object_init(work, &work_debug_descr);
  139. debug_object_activate(work, &work_debug_descr);
  140. return 0;
  141. }
  142. WARN_ON_ONCE(1);
  143. return 0;
  144. case ODEBUG_STATE_ACTIVE:
  145. WARN_ON(1);
  146. default:
  147. return 0;
  148. }
  149. }
  150. /*
  151. * fixup_free is called when:
  152. * - an active object is freed
  153. */
  154. static int work_fixup_free(void *addr, enum debug_obj_state state)
  155. {
  156. struct work_struct *work = addr;
  157. switch (state) {
  158. case ODEBUG_STATE_ACTIVE:
  159. cancel_work_sync(work);
  160. debug_object_free(work, &work_debug_descr);
  161. return 1;
  162. default:
  163. return 0;
  164. }
  165. }
  166. static struct debug_obj_descr work_debug_descr = {
  167. .name = "work_struct",
  168. .fixup_init = work_fixup_init,
  169. .fixup_activate = work_fixup_activate,
  170. .fixup_free = work_fixup_free,
  171. };
  172. static inline void debug_work_activate(struct work_struct *work)
  173. {
  174. debug_object_activate(work, &work_debug_descr);
  175. }
  176. static inline void debug_work_deactivate(struct work_struct *work)
  177. {
  178. debug_object_deactivate(work, &work_debug_descr);
  179. }
  180. void __init_work(struct work_struct *work, int onstack)
  181. {
  182. if (onstack)
  183. debug_object_init_on_stack(work, &work_debug_descr);
  184. else
  185. debug_object_init(work, &work_debug_descr);
  186. }
  187. EXPORT_SYMBOL_GPL(__init_work);
  188. void destroy_work_on_stack(struct work_struct *work)
  189. {
  190. debug_object_free(work, &work_debug_descr);
  191. }
  192. EXPORT_SYMBOL_GPL(destroy_work_on_stack);
  193. #else
  194. static inline void debug_work_activate(struct work_struct *work) { }
  195. static inline void debug_work_deactivate(struct work_struct *work) { }
  196. #endif
  197. /* Serializes the accesses to the list of workqueues. */
  198. static DEFINE_SPINLOCK(workqueue_lock);
  199. static LIST_HEAD(workqueues);
  200. static DEFINE_PER_CPU(struct ida, worker_ida);
  201. static int worker_thread(void *__worker);
  202. static int singlethread_cpu __read_mostly;
  203. static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
  204. struct workqueue_struct *wq)
  205. {
  206. return per_cpu_ptr(wq->cpu_wq, cpu);
  207. }
  208. static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
  209. struct workqueue_struct *wq)
  210. {
  211. if (unlikely(wq->flags & WQ_SINGLE_THREAD))
  212. cpu = singlethread_cpu;
  213. return get_cwq(cpu, wq);
  214. }
  215. static unsigned int work_color_to_flags(int color)
  216. {
  217. return color << WORK_STRUCT_COLOR_SHIFT;
  218. }
  219. static int get_work_color(struct work_struct *work)
  220. {
  221. return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
  222. ((1 << WORK_STRUCT_COLOR_BITS) - 1);
  223. }
  224. static int work_next_color(int color)
  225. {
  226. return (color + 1) % WORK_NR_COLORS;
  227. }
  228. /*
  229. * Set the workqueue on which a work item is to be run
  230. * - Must *only* be called if the pending flag is set
  231. */
  232. static inline void set_wq_data(struct work_struct *work,
  233. struct cpu_workqueue_struct *cwq,
  234. unsigned long extra_flags)
  235. {
  236. BUG_ON(!work_pending(work));
  237. atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
  238. WORK_STRUCT_PENDING | extra_flags);
  239. }
  240. /*
  241. * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
  242. */
  243. static inline void clear_wq_data(struct work_struct *work)
  244. {
  245. atomic_long_set(&work->data, work_static(work));
  246. }
  247. static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
  248. {
  249. return (void *)(atomic_long_read(&work->data) &
  250. WORK_STRUCT_WQ_DATA_MASK);
  251. }
  252. /**
  253. * insert_work - insert a work into cwq
  254. * @cwq: cwq @work belongs to
  255. * @work: work to insert
  256. * @head: insertion point
  257. * @extra_flags: extra WORK_STRUCT_* flags to set
  258. *
  259. * Insert @work into @cwq after @head.
  260. *
  261. * CONTEXT:
  262. * spin_lock_irq(cwq->lock).
  263. */
  264. static void insert_work(struct cpu_workqueue_struct *cwq,
  265. struct work_struct *work, struct list_head *head,
  266. unsigned int extra_flags)
  267. {
  268. /* we own @work, set data and link */
  269. set_wq_data(work, cwq, extra_flags);
  270. /*
  271. * Ensure that we get the right work->data if we see the
  272. * result of list_add() below, see try_to_grab_pending().
  273. */
  274. smp_wmb();
  275. list_add_tail(&work->entry, head);
  276. wake_up(&cwq->more_work);
  277. }
  278. static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
  279. struct work_struct *work)
  280. {
  281. struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
  282. struct list_head *worklist;
  283. unsigned long flags;
  284. debug_work_activate(work);
  285. spin_lock_irqsave(&cwq->lock, flags);
  286. BUG_ON(!list_empty(&work->entry));
  287. cwq->nr_in_flight[cwq->work_color]++;
  288. if (likely(cwq->nr_active < cwq->max_active)) {
  289. cwq->nr_active++;
  290. worklist = &cwq->worklist;
  291. } else
  292. worklist = &cwq->delayed_works;
  293. insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
  294. spin_unlock_irqrestore(&cwq->lock, flags);
  295. }
  296. /**
  297. * queue_work - queue work on a workqueue
  298. * @wq: workqueue to use
  299. * @work: work to queue
  300. *
  301. * Returns 0 if @work was already on a queue, non-zero otherwise.
  302. *
  303. * We queue the work to the CPU on which it was submitted, but if the CPU dies
  304. * it can be processed by another CPU.
  305. */
  306. int queue_work(struct workqueue_struct *wq, struct work_struct *work)
  307. {
  308. int ret;
  309. ret = queue_work_on(get_cpu(), wq, work);
  310. put_cpu();
  311. return ret;
  312. }
  313. EXPORT_SYMBOL_GPL(queue_work);
  314. /**
  315. * queue_work_on - queue work on specific cpu
  316. * @cpu: CPU number to execute work on
  317. * @wq: workqueue to use
  318. * @work: work to queue
  319. *
  320. * Returns 0 if @work was already on a queue, non-zero otherwise.
  321. *
  322. * We queue the work to a specific CPU, the caller must ensure it
  323. * can't go away.
  324. */
  325. int
  326. queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
  327. {
  328. int ret = 0;
  329. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  330. __queue_work(cpu, wq, work);
  331. ret = 1;
  332. }
  333. return ret;
  334. }
  335. EXPORT_SYMBOL_GPL(queue_work_on);
  336. static void delayed_work_timer_fn(unsigned long __data)
  337. {
  338. struct delayed_work *dwork = (struct delayed_work *)__data;
  339. struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
  340. __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
  341. }
  342. /**
  343. * queue_delayed_work - queue work on a workqueue after delay
  344. * @wq: workqueue to use
  345. * @dwork: delayable work to queue
  346. * @delay: number of jiffies to wait before queueing
  347. *
  348. * Returns 0 if @work was already on a queue, non-zero otherwise.
  349. */
  350. int queue_delayed_work(struct workqueue_struct *wq,
  351. struct delayed_work *dwork, unsigned long delay)
  352. {
  353. if (delay == 0)
  354. return queue_work(wq, &dwork->work);
  355. return queue_delayed_work_on(-1, wq, dwork, delay);
  356. }
  357. EXPORT_SYMBOL_GPL(queue_delayed_work);
  358. /**
  359. * queue_delayed_work_on - queue work on specific CPU after delay
  360. * @cpu: CPU number to execute work on
  361. * @wq: workqueue to use
  362. * @dwork: work to queue
  363. * @delay: number of jiffies to wait before queueing
  364. *
  365. * Returns 0 if @work was already on a queue, non-zero otherwise.
  366. */
  367. int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  368. struct delayed_work *dwork, unsigned long delay)
  369. {
  370. int ret = 0;
  371. struct timer_list *timer = &dwork->timer;
  372. struct work_struct *work = &dwork->work;
  373. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  374. BUG_ON(timer_pending(timer));
  375. BUG_ON(!list_empty(&work->entry));
  376. timer_stats_timer_set_start_info(&dwork->timer);
  377. /* This stores cwq for the moment, for the timer_fn */
  378. set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
  379. timer->expires = jiffies + delay;
  380. timer->data = (unsigned long)dwork;
  381. timer->function = delayed_work_timer_fn;
  382. if (unlikely(cpu >= 0))
  383. add_timer_on(timer, cpu);
  384. else
  385. add_timer(timer);
  386. ret = 1;
  387. }
  388. return ret;
  389. }
  390. EXPORT_SYMBOL_GPL(queue_delayed_work_on);
  391. static struct worker *alloc_worker(void)
  392. {
  393. struct worker *worker;
  394. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  395. if (worker)
  396. INIT_LIST_HEAD(&worker->scheduled);
  397. return worker;
  398. }
  399. /**
  400. * create_worker - create a new workqueue worker
  401. * @cwq: cwq the new worker will belong to
  402. * @bind: whether to set affinity to @cpu or not
  403. *
  404. * Create a new worker which is bound to @cwq. The returned worker
  405. * can be started by calling start_worker() or destroyed using
  406. * destroy_worker().
  407. *
  408. * CONTEXT:
  409. * Might sleep. Does GFP_KERNEL allocations.
  410. *
  411. * RETURNS:
  412. * Pointer to the newly created worker.
  413. */
  414. static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
  415. {
  416. int id = -1;
  417. struct worker *worker = NULL;
  418. spin_lock(&workqueue_lock);
  419. while (ida_get_new(&per_cpu(worker_ida, cwq->cpu), &id)) {
  420. spin_unlock(&workqueue_lock);
  421. if (!ida_pre_get(&per_cpu(worker_ida, cwq->cpu), GFP_KERNEL))
  422. goto fail;
  423. spin_lock(&workqueue_lock);
  424. }
  425. spin_unlock(&workqueue_lock);
  426. worker = alloc_worker();
  427. if (!worker)
  428. goto fail;
  429. worker->cwq = cwq;
  430. worker->id = id;
  431. worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
  432. cwq->cpu, id);
  433. if (IS_ERR(worker->task))
  434. goto fail;
  435. if (bind)
  436. kthread_bind(worker->task, cwq->cpu);
  437. return worker;
  438. fail:
  439. if (id >= 0) {
  440. spin_lock(&workqueue_lock);
  441. ida_remove(&per_cpu(worker_ida, cwq->cpu), id);
  442. spin_unlock(&workqueue_lock);
  443. }
  444. kfree(worker);
  445. return NULL;
  446. }
  447. /**
  448. * start_worker - start a newly created worker
  449. * @worker: worker to start
  450. *
  451. * Start @worker.
  452. *
  453. * CONTEXT:
  454. * spin_lock_irq(cwq->lock).
  455. */
  456. static void start_worker(struct worker *worker)
  457. {
  458. wake_up_process(worker->task);
  459. }
  460. /**
  461. * destroy_worker - destroy a workqueue worker
  462. * @worker: worker to be destroyed
  463. *
  464. * Destroy @worker.
  465. */
  466. static void destroy_worker(struct worker *worker)
  467. {
  468. int cpu = worker->cwq->cpu;
  469. int id = worker->id;
  470. /* sanity check frenzy */
  471. BUG_ON(worker->current_work);
  472. BUG_ON(!list_empty(&worker->scheduled));
  473. kthread_stop(worker->task);
  474. kfree(worker);
  475. spin_lock(&workqueue_lock);
  476. ida_remove(&per_cpu(worker_ida, cpu), id);
  477. spin_unlock(&workqueue_lock);
  478. }
  479. /**
  480. * move_linked_works - move linked works to a list
  481. * @work: start of series of works to be scheduled
  482. * @head: target list to append @work to
  483. * @nextp: out paramter for nested worklist walking
  484. *
  485. * Schedule linked works starting from @work to @head. Work series to
  486. * be scheduled starts at @work and includes any consecutive work with
  487. * WORK_STRUCT_LINKED set in its predecessor.
  488. *
  489. * If @nextp is not NULL, it's updated to point to the next work of
  490. * the last scheduled work. This allows move_linked_works() to be
  491. * nested inside outer list_for_each_entry_safe().
  492. *
  493. * CONTEXT:
  494. * spin_lock_irq(cwq->lock).
  495. */
  496. static void move_linked_works(struct work_struct *work, struct list_head *head,
  497. struct work_struct **nextp)
  498. {
  499. struct work_struct *n;
  500. /*
  501. * Linked worklist will always end before the end of the list,
  502. * use NULL for list head.
  503. */
  504. list_for_each_entry_safe_from(work, n, NULL, entry) {
  505. list_move_tail(&work->entry, head);
  506. if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
  507. break;
  508. }
  509. /*
  510. * If we're already inside safe list traversal and have moved
  511. * multiple works to the scheduled queue, the next position
  512. * needs to be updated.
  513. */
  514. if (nextp)
  515. *nextp = n;
  516. }
  517. static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  518. {
  519. struct work_struct *work = list_first_entry(&cwq->delayed_works,
  520. struct work_struct, entry);
  521. move_linked_works(work, &cwq->worklist, NULL);
  522. cwq->nr_active++;
  523. }
  524. /**
  525. * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  526. * @cwq: cwq of interest
  527. * @color: color of work which left the queue
  528. *
  529. * A work either has completed or is removed from pending queue,
  530. * decrement nr_in_flight of its cwq and handle workqueue flushing.
  531. *
  532. * CONTEXT:
  533. * spin_lock_irq(cwq->lock).
  534. */
  535. static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  536. {
  537. /* ignore uncolored works */
  538. if (color == WORK_NO_COLOR)
  539. return;
  540. cwq->nr_in_flight[color]--;
  541. cwq->nr_active--;
  542. /* one down, submit a delayed one */
  543. if (!list_empty(&cwq->delayed_works) &&
  544. cwq->nr_active < cwq->max_active)
  545. cwq_activate_first_delayed(cwq);
  546. /* is flush in progress and are we at the flushing tip? */
  547. if (likely(cwq->flush_color != color))
  548. return;
  549. /* are there still in-flight works? */
  550. if (cwq->nr_in_flight[color])
  551. return;
  552. /* this cwq is done, clear flush_color */
  553. cwq->flush_color = -1;
  554. /*
  555. * If this was the last cwq, wake up the first flusher. It
  556. * will handle the rest.
  557. */
  558. if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
  559. complete(&cwq->wq->first_flusher->done);
  560. }
  561. /**
  562. * process_one_work - process single work
  563. * @worker: self
  564. * @work: work to process
  565. *
  566. * Process @work. This function contains all the logics necessary to
  567. * process a single work including synchronization against and
  568. * interaction with other workers on the same cpu, queueing and
  569. * flushing. As long as context requirement is met, any worker can
  570. * call this function to process a work.
  571. *
  572. * CONTEXT:
  573. * spin_lock_irq(cwq->lock) which is released and regrabbed.
  574. */
  575. static void process_one_work(struct worker *worker, struct work_struct *work)
  576. {
  577. struct cpu_workqueue_struct *cwq = worker->cwq;
  578. work_func_t f = work->func;
  579. int work_color;
  580. #ifdef CONFIG_LOCKDEP
  581. /*
  582. * It is permissible to free the struct work_struct from
  583. * inside the function that is called from it, this we need to
  584. * take into account for lockdep too. To avoid bogus "held
  585. * lock freed" warnings as well as problems when looking into
  586. * work->lockdep_map, make a copy and use that here.
  587. */
  588. struct lockdep_map lockdep_map = work->lockdep_map;
  589. #endif
  590. /* claim and process */
  591. debug_work_deactivate(work);
  592. worker->current_work = work;
  593. work_color = get_work_color(work);
  594. list_del_init(&work->entry);
  595. spin_unlock_irq(&cwq->lock);
  596. BUG_ON(get_wq_data(work) != cwq);
  597. work_clear_pending(work);
  598. lock_map_acquire(&cwq->wq->lockdep_map);
  599. lock_map_acquire(&lockdep_map);
  600. f(work);
  601. lock_map_release(&lockdep_map);
  602. lock_map_release(&cwq->wq->lockdep_map);
  603. if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
  604. printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
  605. "%s/0x%08x/%d\n",
  606. current->comm, preempt_count(), task_pid_nr(current));
  607. printk(KERN_ERR " last function: ");
  608. print_symbol("%s\n", (unsigned long)f);
  609. debug_show_held_locks(current);
  610. dump_stack();
  611. }
  612. spin_lock_irq(&cwq->lock);
  613. /* we're done with it, release */
  614. worker->current_work = NULL;
  615. cwq_dec_nr_in_flight(cwq, work_color);
  616. }
  617. /**
  618. * process_scheduled_works - process scheduled works
  619. * @worker: self
  620. *
  621. * Process all scheduled works. Please note that the scheduled list
  622. * may change while processing a work, so this function repeatedly
  623. * fetches a work from the top and executes it.
  624. *
  625. * CONTEXT:
  626. * spin_lock_irq(cwq->lock) which may be released and regrabbed
  627. * multiple times.
  628. */
  629. static void process_scheduled_works(struct worker *worker)
  630. {
  631. while (!list_empty(&worker->scheduled)) {
  632. struct work_struct *work = list_first_entry(&worker->scheduled,
  633. struct work_struct, entry);
  634. process_one_work(worker, work);
  635. }
  636. }
  637. /**
  638. * worker_thread - the worker thread function
  639. * @__worker: self
  640. *
  641. * The cwq worker thread function.
  642. */
  643. static int worker_thread(void *__worker)
  644. {
  645. struct worker *worker = __worker;
  646. struct cpu_workqueue_struct *cwq = worker->cwq;
  647. DEFINE_WAIT(wait);
  648. if (cwq->wq->flags & WQ_FREEZEABLE)
  649. set_freezable();
  650. for (;;) {
  651. prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
  652. if (!freezing(current) &&
  653. !kthread_should_stop() &&
  654. list_empty(&cwq->worklist))
  655. schedule();
  656. finish_wait(&cwq->more_work, &wait);
  657. try_to_freeze();
  658. if (kthread_should_stop())
  659. break;
  660. if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
  661. get_cpu_mask(cwq->cpu))))
  662. set_cpus_allowed_ptr(worker->task,
  663. get_cpu_mask(cwq->cpu));
  664. spin_lock_irq(&cwq->lock);
  665. while (!list_empty(&cwq->worklist)) {
  666. struct work_struct *work =
  667. list_first_entry(&cwq->worklist,
  668. struct work_struct, entry);
  669. if (likely(!(*work_data_bits(work) &
  670. WORK_STRUCT_LINKED))) {
  671. /* optimization path, not strictly necessary */
  672. process_one_work(worker, work);
  673. if (unlikely(!list_empty(&worker->scheduled)))
  674. process_scheduled_works(worker);
  675. } else {
  676. move_linked_works(work, &worker->scheduled,
  677. NULL);
  678. process_scheduled_works(worker);
  679. }
  680. }
  681. spin_unlock_irq(&cwq->lock);
  682. }
  683. return 0;
  684. }
  685. struct wq_barrier {
  686. struct work_struct work;
  687. struct completion done;
  688. };
  689. static void wq_barrier_func(struct work_struct *work)
  690. {
  691. struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
  692. complete(&barr->done);
  693. }
  694. /**
  695. * insert_wq_barrier - insert a barrier work
  696. * @cwq: cwq to insert barrier into
  697. * @barr: wq_barrier to insert
  698. * @target: target work to attach @barr to
  699. * @worker: worker currently executing @target, NULL if @target is not executing
  700. *
  701. * @barr is linked to @target such that @barr is completed only after
  702. * @target finishes execution. Please note that the ordering
  703. * guarantee is observed only with respect to @target and on the local
  704. * cpu.
  705. *
  706. * Currently, a queued barrier can't be canceled. This is because
  707. * try_to_grab_pending() can't determine whether the work to be
  708. * grabbed is at the head of the queue and thus can't clear LINKED
  709. * flag of the previous work while there must be a valid next work
  710. * after a work with LINKED flag set.
  711. *
  712. * Note that when @worker is non-NULL, @target may be modified
  713. * underneath us, so we can't reliably determine cwq from @target.
  714. *
  715. * CONTEXT:
  716. * spin_lock_irq(cwq->lock).
  717. */
  718. static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  719. struct wq_barrier *barr,
  720. struct work_struct *target, struct worker *worker)
  721. {
  722. struct list_head *head;
  723. unsigned int linked = 0;
  724. /*
  725. * debugobject calls are safe here even with cwq->lock locked
  726. * as we know for sure that this will not trigger any of the
  727. * checks and call back into the fixup functions where we
  728. * might deadlock.
  729. */
  730. INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
  731. __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
  732. init_completion(&barr->done);
  733. /*
  734. * If @target is currently being executed, schedule the
  735. * barrier to the worker; otherwise, put it after @target.
  736. */
  737. if (worker)
  738. head = worker->scheduled.next;
  739. else {
  740. unsigned long *bits = work_data_bits(target);
  741. head = target->entry.next;
  742. /* there can already be other linked works, inherit and set */
  743. linked = *bits & WORK_STRUCT_LINKED;
  744. __set_bit(WORK_STRUCT_LINKED_BIT, bits);
  745. }
  746. debug_work_activate(&barr->work);
  747. insert_work(cwq, &barr->work, head,
  748. work_color_to_flags(WORK_NO_COLOR) | linked);
  749. }
  750. /**
  751. * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
  752. * @wq: workqueue being flushed
  753. * @flush_color: new flush color, < 0 for no-op
  754. * @work_color: new work color, < 0 for no-op
  755. *
  756. * Prepare cwqs for workqueue flushing.
  757. *
  758. * If @flush_color is non-negative, flush_color on all cwqs should be
  759. * -1. If no cwq has in-flight commands at the specified color, all
  760. * cwq->flush_color's stay at -1 and %false is returned. If any cwq
  761. * has in flight commands, its cwq->flush_color is set to
  762. * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
  763. * wakeup logic is armed and %true is returned.
  764. *
  765. * The caller should have initialized @wq->first_flusher prior to
  766. * calling this function with non-negative @flush_color. If
  767. * @flush_color is negative, no flush color update is done and %false
  768. * is returned.
  769. *
  770. * If @work_color is non-negative, all cwqs should have the same
  771. * work_color which is previous to @work_color and all will be
  772. * advanced to @work_color.
  773. *
  774. * CONTEXT:
  775. * mutex_lock(wq->flush_mutex).
  776. *
  777. * RETURNS:
  778. * %true if @flush_color >= 0 and there's something to flush. %false
  779. * otherwise.
  780. */
  781. static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
  782. int flush_color, int work_color)
  783. {
  784. bool wait = false;
  785. unsigned int cpu;
  786. if (flush_color >= 0) {
  787. BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
  788. atomic_set(&wq->nr_cwqs_to_flush, 1);
  789. }
  790. for_each_possible_cpu(cpu) {
  791. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  792. spin_lock_irq(&cwq->lock);
  793. if (flush_color >= 0) {
  794. BUG_ON(cwq->flush_color != -1);
  795. if (cwq->nr_in_flight[flush_color]) {
  796. cwq->flush_color = flush_color;
  797. atomic_inc(&wq->nr_cwqs_to_flush);
  798. wait = true;
  799. }
  800. }
  801. if (work_color >= 0) {
  802. BUG_ON(work_color != work_next_color(cwq->work_color));
  803. cwq->work_color = work_color;
  804. }
  805. spin_unlock_irq(&cwq->lock);
  806. }
  807. if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
  808. complete(&wq->first_flusher->done);
  809. return wait;
  810. }
  811. /**
  812. * flush_workqueue - ensure that any scheduled work has run to completion.
  813. * @wq: workqueue to flush
  814. *
  815. * Forces execution of the workqueue and blocks until its completion.
  816. * This is typically used in driver shutdown handlers.
  817. *
  818. * We sleep until all works which were queued on entry have been handled,
  819. * but we are not livelocked by new incoming ones.
  820. */
  821. void flush_workqueue(struct workqueue_struct *wq)
  822. {
  823. struct wq_flusher this_flusher = {
  824. .list = LIST_HEAD_INIT(this_flusher.list),
  825. .flush_color = -1,
  826. .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
  827. };
  828. int next_color;
  829. lock_map_acquire(&wq->lockdep_map);
  830. lock_map_release(&wq->lockdep_map);
  831. mutex_lock(&wq->flush_mutex);
  832. /*
  833. * Start-to-wait phase
  834. */
  835. next_color = work_next_color(wq->work_color);
  836. if (next_color != wq->flush_color) {
  837. /*
  838. * Color space is not full. The current work_color
  839. * becomes our flush_color and work_color is advanced
  840. * by one.
  841. */
  842. BUG_ON(!list_empty(&wq->flusher_overflow));
  843. this_flusher.flush_color = wq->work_color;
  844. wq->work_color = next_color;
  845. if (!wq->first_flusher) {
  846. /* no flush in progress, become the first flusher */
  847. BUG_ON(wq->flush_color != this_flusher.flush_color);
  848. wq->first_flusher = &this_flusher;
  849. if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
  850. wq->work_color)) {
  851. /* nothing to flush, done */
  852. wq->flush_color = next_color;
  853. wq->first_flusher = NULL;
  854. goto out_unlock;
  855. }
  856. } else {
  857. /* wait in queue */
  858. BUG_ON(wq->flush_color == this_flusher.flush_color);
  859. list_add_tail(&this_flusher.list, &wq->flusher_queue);
  860. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  861. }
  862. } else {
  863. /*
  864. * Oops, color space is full, wait on overflow queue.
  865. * The next flush completion will assign us
  866. * flush_color and transfer to flusher_queue.
  867. */
  868. list_add_tail(&this_flusher.list, &wq->flusher_overflow);
  869. }
  870. mutex_unlock(&wq->flush_mutex);
  871. wait_for_completion(&this_flusher.done);
  872. /*
  873. * Wake-up-and-cascade phase
  874. *
  875. * First flushers are responsible for cascading flushes and
  876. * handling overflow. Non-first flushers can simply return.
  877. */
  878. if (wq->first_flusher != &this_flusher)
  879. return;
  880. mutex_lock(&wq->flush_mutex);
  881. wq->first_flusher = NULL;
  882. BUG_ON(!list_empty(&this_flusher.list));
  883. BUG_ON(wq->flush_color != this_flusher.flush_color);
  884. while (true) {
  885. struct wq_flusher *next, *tmp;
  886. /* complete all the flushers sharing the current flush color */
  887. list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
  888. if (next->flush_color != wq->flush_color)
  889. break;
  890. list_del_init(&next->list);
  891. complete(&next->done);
  892. }
  893. BUG_ON(!list_empty(&wq->flusher_overflow) &&
  894. wq->flush_color != work_next_color(wq->work_color));
  895. /* this flush_color is finished, advance by one */
  896. wq->flush_color = work_next_color(wq->flush_color);
  897. /* one color has been freed, handle overflow queue */
  898. if (!list_empty(&wq->flusher_overflow)) {
  899. /*
  900. * Assign the same color to all overflowed
  901. * flushers, advance work_color and append to
  902. * flusher_queue. This is the start-to-wait
  903. * phase for these overflowed flushers.
  904. */
  905. list_for_each_entry(tmp, &wq->flusher_overflow, list)
  906. tmp->flush_color = wq->work_color;
  907. wq->work_color = work_next_color(wq->work_color);
  908. list_splice_tail_init(&wq->flusher_overflow,
  909. &wq->flusher_queue);
  910. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  911. }
  912. if (list_empty(&wq->flusher_queue)) {
  913. BUG_ON(wq->flush_color != wq->work_color);
  914. break;
  915. }
  916. /*
  917. * Need to flush more colors. Make the next flusher
  918. * the new first flusher and arm cwqs.
  919. */
  920. BUG_ON(wq->flush_color == wq->work_color);
  921. BUG_ON(wq->flush_color != next->flush_color);
  922. list_del_init(&next->list);
  923. wq->first_flusher = next;
  924. if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
  925. break;
  926. /*
  927. * Meh... this color is already done, clear first
  928. * flusher and repeat cascading.
  929. */
  930. wq->first_flusher = NULL;
  931. }
  932. out_unlock:
  933. mutex_unlock(&wq->flush_mutex);
  934. }
  935. EXPORT_SYMBOL_GPL(flush_workqueue);
  936. /**
  937. * flush_work - block until a work_struct's callback has terminated
  938. * @work: the work which is to be flushed
  939. *
  940. * Returns false if @work has already terminated.
  941. *
  942. * It is expected that, prior to calling flush_work(), the caller has
  943. * arranged for the work to not be requeued, otherwise it doesn't make
  944. * sense to use this function.
  945. */
  946. int flush_work(struct work_struct *work)
  947. {
  948. struct worker *worker = NULL;
  949. struct cpu_workqueue_struct *cwq;
  950. struct wq_barrier barr;
  951. might_sleep();
  952. cwq = get_wq_data(work);
  953. if (!cwq)
  954. return 0;
  955. lock_map_acquire(&cwq->wq->lockdep_map);
  956. lock_map_release(&cwq->wq->lockdep_map);
  957. spin_lock_irq(&cwq->lock);
  958. if (!list_empty(&work->entry)) {
  959. /*
  960. * See the comment near try_to_grab_pending()->smp_rmb().
  961. * If it was re-queued under us we are not going to wait.
  962. */
  963. smp_rmb();
  964. if (unlikely(cwq != get_wq_data(work)))
  965. goto already_gone;
  966. } else {
  967. if (cwq->worker && cwq->worker->current_work == work)
  968. worker = cwq->worker;
  969. if (!worker)
  970. goto already_gone;
  971. }
  972. insert_wq_barrier(cwq, &barr, work, worker);
  973. spin_unlock_irq(&cwq->lock);
  974. wait_for_completion(&barr.done);
  975. destroy_work_on_stack(&barr.work);
  976. return 1;
  977. already_gone:
  978. spin_unlock_irq(&cwq->lock);
  979. return 0;
  980. }
  981. EXPORT_SYMBOL_GPL(flush_work);
  982. /*
  983. * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
  984. * so this work can't be re-armed in any way.
  985. */
  986. static int try_to_grab_pending(struct work_struct *work)
  987. {
  988. struct cpu_workqueue_struct *cwq;
  989. int ret = -1;
  990. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
  991. return 0;
  992. /*
  993. * The queueing is in progress, or it is already queued. Try to
  994. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
  995. */
  996. cwq = get_wq_data(work);
  997. if (!cwq)
  998. return ret;
  999. spin_lock_irq(&cwq->lock);
  1000. if (!list_empty(&work->entry)) {
  1001. /*
  1002. * This work is queued, but perhaps we locked the wrong cwq.
  1003. * In that case we must see the new value after rmb(), see
  1004. * insert_work()->wmb().
  1005. */
  1006. smp_rmb();
  1007. if (cwq == get_wq_data(work)) {
  1008. debug_work_deactivate(work);
  1009. list_del_init(&work->entry);
  1010. cwq_dec_nr_in_flight(cwq, get_work_color(work));
  1011. ret = 1;
  1012. }
  1013. }
  1014. spin_unlock_irq(&cwq->lock);
  1015. return ret;
  1016. }
  1017. static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
  1018. struct work_struct *work)
  1019. {
  1020. struct wq_barrier barr;
  1021. struct worker *worker;
  1022. spin_lock_irq(&cwq->lock);
  1023. worker = NULL;
  1024. if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
  1025. worker = cwq->worker;
  1026. insert_wq_barrier(cwq, &barr, work, worker);
  1027. }
  1028. spin_unlock_irq(&cwq->lock);
  1029. if (unlikely(worker)) {
  1030. wait_for_completion(&barr.done);
  1031. destroy_work_on_stack(&barr.work);
  1032. }
  1033. }
  1034. static void wait_on_work(struct work_struct *work)
  1035. {
  1036. struct cpu_workqueue_struct *cwq;
  1037. struct workqueue_struct *wq;
  1038. int cpu;
  1039. might_sleep();
  1040. lock_map_acquire(&work->lockdep_map);
  1041. lock_map_release(&work->lockdep_map);
  1042. cwq = get_wq_data(work);
  1043. if (!cwq)
  1044. return;
  1045. wq = cwq->wq;
  1046. for_each_possible_cpu(cpu)
  1047. wait_on_cpu_work(get_cwq(cpu, wq), work);
  1048. }
  1049. static int __cancel_work_timer(struct work_struct *work,
  1050. struct timer_list* timer)
  1051. {
  1052. int ret;
  1053. do {
  1054. ret = (timer && likely(del_timer(timer)));
  1055. if (!ret)
  1056. ret = try_to_grab_pending(work);
  1057. wait_on_work(work);
  1058. } while (unlikely(ret < 0));
  1059. clear_wq_data(work);
  1060. return ret;
  1061. }
  1062. /**
  1063. * cancel_work_sync - block until a work_struct's callback has terminated
  1064. * @work: the work which is to be flushed
  1065. *
  1066. * Returns true if @work was pending.
  1067. *
  1068. * cancel_work_sync() will cancel the work if it is queued. If the work's
  1069. * callback appears to be running, cancel_work_sync() will block until it
  1070. * has completed.
  1071. *
  1072. * It is possible to use this function if the work re-queues itself. It can
  1073. * cancel the work even if it migrates to another workqueue, however in that
  1074. * case it only guarantees that work->func() has completed on the last queued
  1075. * workqueue.
  1076. *
  1077. * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
  1078. * pending, otherwise it goes into a busy-wait loop until the timer expires.
  1079. *
  1080. * The caller must ensure that workqueue_struct on which this work was last
  1081. * queued can't be destroyed before this function returns.
  1082. */
  1083. int cancel_work_sync(struct work_struct *work)
  1084. {
  1085. return __cancel_work_timer(work, NULL);
  1086. }
  1087. EXPORT_SYMBOL_GPL(cancel_work_sync);
  1088. /**
  1089. * cancel_delayed_work_sync - reliably kill off a delayed work.
  1090. * @dwork: the delayed work struct
  1091. *
  1092. * Returns true if @dwork was pending.
  1093. *
  1094. * It is possible to use this function if @dwork rearms itself via queue_work()
  1095. * or queue_delayed_work(). See also the comment for cancel_work_sync().
  1096. */
  1097. int cancel_delayed_work_sync(struct delayed_work *dwork)
  1098. {
  1099. return __cancel_work_timer(&dwork->work, &dwork->timer);
  1100. }
  1101. EXPORT_SYMBOL(cancel_delayed_work_sync);
  1102. static struct workqueue_struct *keventd_wq __read_mostly;
  1103. /**
  1104. * schedule_work - put work task in global workqueue
  1105. * @work: job to be done
  1106. *
  1107. * Returns zero if @work was already on the kernel-global workqueue and
  1108. * non-zero otherwise.
  1109. *
  1110. * This puts a job in the kernel-global workqueue if it was not already
  1111. * queued and leaves it in the same position on the kernel-global
  1112. * workqueue otherwise.
  1113. */
  1114. int schedule_work(struct work_struct *work)
  1115. {
  1116. return queue_work(keventd_wq, work);
  1117. }
  1118. EXPORT_SYMBOL(schedule_work);
  1119. /*
  1120. * schedule_work_on - put work task on a specific cpu
  1121. * @cpu: cpu to put the work task on
  1122. * @work: job to be done
  1123. *
  1124. * This puts a job on a specific cpu
  1125. */
  1126. int schedule_work_on(int cpu, struct work_struct *work)
  1127. {
  1128. return queue_work_on(cpu, keventd_wq, work);
  1129. }
  1130. EXPORT_SYMBOL(schedule_work_on);
  1131. /**
  1132. * schedule_delayed_work - put work task in global workqueue after delay
  1133. * @dwork: job to be done
  1134. * @delay: number of jiffies to wait or 0 for immediate execution
  1135. *
  1136. * After waiting for a given time this puts a job in the kernel-global
  1137. * workqueue.
  1138. */
  1139. int schedule_delayed_work(struct delayed_work *dwork,
  1140. unsigned long delay)
  1141. {
  1142. return queue_delayed_work(keventd_wq, dwork, delay);
  1143. }
  1144. EXPORT_SYMBOL(schedule_delayed_work);
  1145. /**
  1146. * flush_delayed_work - block until a dwork_struct's callback has terminated
  1147. * @dwork: the delayed work which is to be flushed
  1148. *
  1149. * Any timeout is cancelled, and any pending work is run immediately.
  1150. */
  1151. void flush_delayed_work(struct delayed_work *dwork)
  1152. {
  1153. if (del_timer_sync(&dwork->timer)) {
  1154. __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
  1155. &dwork->work);
  1156. put_cpu();
  1157. }
  1158. flush_work(&dwork->work);
  1159. }
  1160. EXPORT_SYMBOL(flush_delayed_work);
  1161. /**
  1162. * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  1163. * @cpu: cpu to use
  1164. * @dwork: job to be done
  1165. * @delay: number of jiffies to wait
  1166. *
  1167. * After waiting for a given time this puts a job in the kernel-global
  1168. * workqueue on the specified CPU.
  1169. */
  1170. int schedule_delayed_work_on(int cpu,
  1171. struct delayed_work *dwork, unsigned long delay)
  1172. {
  1173. return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
  1174. }
  1175. EXPORT_SYMBOL(schedule_delayed_work_on);
  1176. /**
  1177. * schedule_on_each_cpu - call a function on each online CPU from keventd
  1178. * @func: the function to call
  1179. *
  1180. * Returns zero on success.
  1181. * Returns -ve errno on failure.
  1182. *
  1183. * schedule_on_each_cpu() is very slow.
  1184. */
  1185. int schedule_on_each_cpu(work_func_t func)
  1186. {
  1187. int cpu;
  1188. int orig = -1;
  1189. struct work_struct *works;
  1190. works = alloc_percpu(struct work_struct);
  1191. if (!works)
  1192. return -ENOMEM;
  1193. get_online_cpus();
  1194. /*
  1195. * When running in keventd don't schedule a work item on
  1196. * itself. Can just call directly because the work queue is
  1197. * already bound. This also is faster.
  1198. */
  1199. if (current_is_keventd())
  1200. orig = raw_smp_processor_id();
  1201. for_each_online_cpu(cpu) {
  1202. struct work_struct *work = per_cpu_ptr(works, cpu);
  1203. INIT_WORK(work, func);
  1204. if (cpu != orig)
  1205. schedule_work_on(cpu, work);
  1206. }
  1207. if (orig >= 0)
  1208. func(per_cpu_ptr(works, orig));
  1209. for_each_online_cpu(cpu)
  1210. flush_work(per_cpu_ptr(works, cpu));
  1211. put_online_cpus();
  1212. free_percpu(works);
  1213. return 0;
  1214. }
  1215. /**
  1216. * flush_scheduled_work - ensure that any scheduled work has run to completion.
  1217. *
  1218. * Forces execution of the kernel-global workqueue and blocks until its
  1219. * completion.
  1220. *
  1221. * Think twice before calling this function! It's very easy to get into
  1222. * trouble if you don't take great care. Either of the following situations
  1223. * will lead to deadlock:
  1224. *
  1225. * One of the work items currently on the workqueue needs to acquire
  1226. * a lock held by your code or its caller.
  1227. *
  1228. * Your code is running in the context of a work routine.
  1229. *
  1230. * They will be detected by lockdep when they occur, but the first might not
  1231. * occur very often. It depends on what work items are on the workqueue and
  1232. * what locks they need, which you have no control over.
  1233. *
  1234. * In most situations flushing the entire workqueue is overkill; you merely
  1235. * need to know that a particular work item isn't queued and isn't running.
  1236. * In such cases you should use cancel_delayed_work_sync() or
  1237. * cancel_work_sync() instead.
  1238. */
  1239. void flush_scheduled_work(void)
  1240. {
  1241. flush_workqueue(keventd_wq);
  1242. }
  1243. EXPORT_SYMBOL(flush_scheduled_work);
  1244. /**
  1245. * execute_in_process_context - reliably execute the routine with user context
  1246. * @fn: the function to execute
  1247. * @ew: guaranteed storage for the execute work structure (must
  1248. * be available when the work executes)
  1249. *
  1250. * Executes the function immediately if process context is available,
  1251. * otherwise schedules the function for delayed execution.
  1252. *
  1253. * Returns: 0 - function was executed
  1254. * 1 - function was scheduled for execution
  1255. */
  1256. int execute_in_process_context(work_func_t fn, struct execute_work *ew)
  1257. {
  1258. if (!in_interrupt()) {
  1259. fn(&ew->work);
  1260. return 0;
  1261. }
  1262. INIT_WORK(&ew->work, fn);
  1263. schedule_work(&ew->work);
  1264. return 1;
  1265. }
  1266. EXPORT_SYMBOL_GPL(execute_in_process_context);
  1267. int keventd_up(void)
  1268. {
  1269. return keventd_wq != NULL;
  1270. }
  1271. int current_is_keventd(void)
  1272. {
  1273. struct cpu_workqueue_struct *cwq;
  1274. int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
  1275. int ret = 0;
  1276. BUG_ON(!keventd_wq);
  1277. cwq = get_cwq(cpu, keventd_wq);
  1278. if (current == cwq->worker->task)
  1279. ret = 1;
  1280. return ret;
  1281. }
  1282. static struct cpu_workqueue_struct *alloc_cwqs(void)
  1283. {
  1284. /*
  1285. * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
  1286. * Make sure that the alignment isn't lower than that of
  1287. * unsigned long long.
  1288. */
  1289. const size_t size = sizeof(struct cpu_workqueue_struct);
  1290. const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
  1291. __alignof__(unsigned long long));
  1292. struct cpu_workqueue_struct *cwqs;
  1293. #ifndef CONFIG_SMP
  1294. void *ptr;
  1295. /*
  1296. * On UP, percpu allocator doesn't honor alignment parameter
  1297. * and simply uses arch-dependent default. Allocate enough
  1298. * room to align cwq and put an extra pointer at the end
  1299. * pointing back to the originally allocated pointer which
  1300. * will be used for free.
  1301. *
  1302. * FIXME: This really belongs to UP percpu code. Update UP
  1303. * percpu code to honor alignment and remove this ugliness.
  1304. */
  1305. ptr = __alloc_percpu(size + align + sizeof(void *), 1);
  1306. cwqs = PTR_ALIGN(ptr, align);
  1307. *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
  1308. #else
  1309. /* On SMP, percpu allocator can do it itself */
  1310. cwqs = __alloc_percpu(size, align);
  1311. #endif
  1312. /* just in case, make sure it's actually aligned */
  1313. BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
  1314. return cwqs;
  1315. }
  1316. static void free_cwqs(struct cpu_workqueue_struct *cwqs)
  1317. {
  1318. #ifndef CONFIG_SMP
  1319. /* on UP, the pointer to free is stored right after the cwq */
  1320. if (cwqs)
  1321. free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
  1322. #else
  1323. free_percpu(cwqs);
  1324. #endif
  1325. }
  1326. struct workqueue_struct *__create_workqueue_key(const char *name,
  1327. unsigned int flags,
  1328. int max_active,
  1329. struct lock_class_key *key,
  1330. const char *lock_name)
  1331. {
  1332. bool singlethread = flags & WQ_SINGLE_THREAD;
  1333. struct workqueue_struct *wq;
  1334. bool failed = false;
  1335. unsigned int cpu;
  1336. max_active = clamp_val(max_active, 1, INT_MAX);
  1337. wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  1338. if (!wq)
  1339. goto err;
  1340. wq->cpu_wq = alloc_cwqs();
  1341. if (!wq->cpu_wq)
  1342. goto err;
  1343. wq->flags = flags;
  1344. mutex_init(&wq->flush_mutex);
  1345. atomic_set(&wq->nr_cwqs_to_flush, 0);
  1346. INIT_LIST_HEAD(&wq->flusher_queue);
  1347. INIT_LIST_HEAD(&wq->flusher_overflow);
  1348. wq->name = name;
  1349. lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
  1350. INIT_LIST_HEAD(&wq->list);
  1351. cpu_maps_update_begin();
  1352. /*
  1353. * We must initialize cwqs for each possible cpu even if we
  1354. * are going to call destroy_workqueue() finally. Otherwise
  1355. * cpu_up() can hit the uninitialized cwq once we drop the
  1356. * lock.
  1357. */
  1358. for_each_possible_cpu(cpu) {
  1359. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1360. BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
  1361. cwq->cpu = cpu;
  1362. cwq->wq = wq;
  1363. cwq->flush_color = -1;
  1364. cwq->max_active = max_active;
  1365. spin_lock_init(&cwq->lock);
  1366. INIT_LIST_HEAD(&cwq->worklist);
  1367. INIT_LIST_HEAD(&cwq->delayed_works);
  1368. init_waitqueue_head(&cwq->more_work);
  1369. if (failed)
  1370. continue;
  1371. cwq->worker = create_worker(cwq,
  1372. cpu_online(cpu) && !singlethread);
  1373. if (cwq->worker)
  1374. start_worker(cwq->worker);
  1375. else
  1376. failed = true;
  1377. }
  1378. spin_lock(&workqueue_lock);
  1379. list_add(&wq->list, &workqueues);
  1380. spin_unlock(&workqueue_lock);
  1381. cpu_maps_update_done();
  1382. if (failed) {
  1383. destroy_workqueue(wq);
  1384. wq = NULL;
  1385. }
  1386. return wq;
  1387. err:
  1388. if (wq) {
  1389. free_cwqs(wq->cpu_wq);
  1390. kfree(wq);
  1391. }
  1392. return NULL;
  1393. }
  1394. EXPORT_SYMBOL_GPL(__create_workqueue_key);
  1395. /**
  1396. * destroy_workqueue - safely terminate a workqueue
  1397. * @wq: target workqueue
  1398. *
  1399. * Safely destroy a workqueue. All work currently pending will be done first.
  1400. */
  1401. void destroy_workqueue(struct workqueue_struct *wq)
  1402. {
  1403. int cpu;
  1404. cpu_maps_update_begin();
  1405. spin_lock(&workqueue_lock);
  1406. list_del(&wq->list);
  1407. spin_unlock(&workqueue_lock);
  1408. cpu_maps_update_done();
  1409. flush_workqueue(wq);
  1410. for_each_possible_cpu(cpu) {
  1411. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  1412. int i;
  1413. if (cwq->worker) {
  1414. destroy_worker(cwq->worker);
  1415. cwq->worker = NULL;
  1416. }
  1417. for (i = 0; i < WORK_NR_COLORS; i++)
  1418. BUG_ON(cwq->nr_in_flight[i]);
  1419. BUG_ON(cwq->nr_active);
  1420. BUG_ON(!list_empty(&cwq->delayed_works));
  1421. }
  1422. free_cwqs(wq->cpu_wq);
  1423. kfree(wq);
  1424. }
  1425. EXPORT_SYMBOL_GPL(destroy_workqueue);
  1426. static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  1427. unsigned long action,
  1428. void *hcpu)
  1429. {
  1430. unsigned int cpu = (unsigned long)hcpu;
  1431. struct cpu_workqueue_struct *cwq;
  1432. struct workqueue_struct *wq;
  1433. action &= ~CPU_TASKS_FROZEN;
  1434. list_for_each_entry(wq, &workqueues, list) {
  1435. if (wq->flags & WQ_SINGLE_THREAD)
  1436. continue;
  1437. cwq = get_cwq(cpu, wq);
  1438. switch (action) {
  1439. case CPU_POST_DEAD:
  1440. flush_workqueue(wq);
  1441. break;
  1442. }
  1443. }
  1444. return notifier_from_errno(0);
  1445. }
  1446. #ifdef CONFIG_SMP
  1447. struct work_for_cpu {
  1448. struct completion completion;
  1449. long (*fn)(void *);
  1450. void *arg;
  1451. long ret;
  1452. };
  1453. static int do_work_for_cpu(void *_wfc)
  1454. {
  1455. struct work_for_cpu *wfc = _wfc;
  1456. wfc->ret = wfc->fn(wfc->arg);
  1457. complete(&wfc->completion);
  1458. return 0;
  1459. }
  1460. /**
  1461. * work_on_cpu - run a function in user context on a particular cpu
  1462. * @cpu: the cpu to run on
  1463. * @fn: the function to run
  1464. * @arg: the function arg
  1465. *
  1466. * This will return the value @fn returns.
  1467. * It is up to the caller to ensure that the cpu doesn't go offline.
  1468. * The caller must not hold any locks which would prevent @fn from completing.
  1469. */
  1470. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  1471. {
  1472. struct task_struct *sub_thread;
  1473. struct work_for_cpu wfc = {
  1474. .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
  1475. .fn = fn,
  1476. .arg = arg,
  1477. };
  1478. sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
  1479. if (IS_ERR(sub_thread))
  1480. return PTR_ERR(sub_thread);
  1481. kthread_bind(sub_thread, cpu);
  1482. wake_up_process(sub_thread);
  1483. wait_for_completion(&wfc.completion);
  1484. return wfc.ret;
  1485. }
  1486. EXPORT_SYMBOL_GPL(work_on_cpu);
  1487. #endif /* CONFIG_SMP */
  1488. void __init init_workqueues(void)
  1489. {
  1490. unsigned int cpu;
  1491. for_each_possible_cpu(cpu)
  1492. ida_init(&per_cpu(worker_ida, cpu));
  1493. singlethread_cpu = cpumask_first(cpu_possible_mask);
  1494. hotcpu_notifier(workqueue_cpu_callback, 0);
  1495. keventd_wq = create_workqueue("events");
  1496. BUG_ON(!keventd_wq);
  1497. }