workqueue.c 107 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902
  1. /*
  2. * kernel/workqueue.c - generic async execution with shared worker pool
  3. *
  4. * Copyright (C) 2002 Ingo Molnar
  5. *
  6. * Derived from the taskqueue/keventd code by:
  7. * David Woodhouse <dwmw2@infradead.org>
  8. * Andrew Morton
  9. * Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10. * Theodore Ts'o <tytso@mit.edu>
  11. *
  12. * Made to use alloc_percpu by Christoph Lameter.
  13. *
  14. * Copyright (C) 2010 SUSE Linux Products GmbH
  15. * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
  16. *
  17. * This is the generic async execution mechanism. Work items as are
  18. * executed in process context. The worker pool is shared and
  19. * automatically managed. There is one worker pool for each CPU and
  20. * one extra for works which are better served by workers which are
  21. * not bound to any specific CPU.
  22. *
  23. * Please read Documentation/workqueue.txt for details.
  24. */
  25. #include <linux/export.h>
  26. #include <linux/kernel.h>
  27. #include <linux/sched.h>
  28. #include <linux/init.h>
  29. #include <linux/signal.h>
  30. #include <linux/completion.h>
  31. #include <linux/workqueue.h>
  32. #include <linux/slab.h>
  33. #include <linux/cpu.h>
  34. #include <linux/notifier.h>
  35. #include <linux/kthread.h>
  36. #include <linux/hardirq.h>
  37. #include <linux/mempolicy.h>
  38. #include <linux/freezer.h>
  39. #include <linux/kallsyms.h>
  40. #include <linux/debug_locks.h>
  41. #include <linux/lockdep.h>
  42. #include <linux/idr.h>
  43. #include "workqueue_sched.h"
  44. enum {
  45. /*
  46. * global_cwq flags
  47. *
  48. * A bound gcwq is either associated or disassociated with its CPU.
  49. * While associated (!DISASSOCIATED), all workers are bound to the
  50. * CPU and none has %WORKER_UNBOUND set and concurrency management
  51. * is in effect.
  52. *
  53. * While DISASSOCIATED, the cpu may be offline and all workers have
  54. * %WORKER_UNBOUND set and concurrency management disabled, and may
  55. * be executing on any CPU. The gcwq behaves as an unbound one.
  56. *
  57. * Note that DISASSOCIATED can be flipped only while holding
  58. * assoc_mutex of all pools on the gcwq to avoid changing binding
  59. * state while create_worker() is in progress.
  60. */
  61. GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
  62. GCWQ_FREEZING = 1 << 1, /* freeze in progress */
  63. /* pool flags */
  64. POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
  65. POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
  66. /* worker flags */
  67. WORKER_STARTED = 1 << 0, /* started */
  68. WORKER_DIE = 1 << 1, /* die die die */
  69. WORKER_IDLE = 1 << 2, /* is idle */
  70. WORKER_PREP = 1 << 3, /* preparing to run works */
  71. WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
  72. WORKER_UNBOUND = 1 << 7, /* worker is unbound */
  73. WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
  74. WORKER_CPU_INTENSIVE,
  75. NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
  76. BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
  77. BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
  78. BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
  79. MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
  80. IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
  81. MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
  82. /* call for help after 10ms
  83. (min two ticks) */
  84. MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
  85. CREATE_COOLDOWN = HZ, /* time to breath after fail */
  86. /*
  87. * Rescue workers are used only on emergencies and shared by
  88. * all cpus. Give -20.
  89. */
  90. RESCUER_NICE_LEVEL = -20,
  91. HIGHPRI_NICE_LEVEL = -20,
  92. };
  93. /*
  94. * Structure fields follow one of the following exclusion rules.
  95. *
  96. * I: Modifiable by initialization/destruction paths and read-only for
  97. * everyone else.
  98. *
  99. * P: Preemption protected. Disabling preemption is enough and should
  100. * only be modified and accessed from the local cpu.
  101. *
  102. * L: gcwq->lock protected. Access with gcwq->lock held.
  103. *
  104. * X: During normal operation, modification requires gcwq->lock and
  105. * should be done only from local cpu. Either disabling preemption
  106. * on local cpu or grabbing gcwq->lock is enough for read access.
  107. * If GCWQ_DISASSOCIATED is set, it's identical to L.
  108. *
  109. * F: wq->flush_mutex protected.
  110. *
  111. * W: workqueue_lock protected.
  112. */
  113. struct global_cwq;
  114. struct worker_pool;
  115. /*
  116. * The poor guys doing the actual heavy lifting. All on-duty workers
  117. * are either serving the manager role, on idle list or on busy hash.
  118. */
  119. struct worker {
  120. /* on idle list while idle, on busy hash table while busy */
  121. union {
  122. struct list_head entry; /* L: while idle */
  123. struct hlist_node hentry; /* L: while busy */
  124. };
  125. struct work_struct *current_work; /* L: work being processed */
  126. struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
  127. struct list_head scheduled; /* L: scheduled works */
  128. struct task_struct *task; /* I: worker task */
  129. struct worker_pool *pool; /* I: the associated pool */
  130. /* 64 bytes boundary on 64bit, 32 on 32bit */
  131. unsigned long last_active; /* L: last active timestamp */
  132. unsigned int flags; /* X: flags */
  133. int id; /* I: worker id */
  134. /* for rebinding worker to CPU */
  135. struct work_struct rebind_work; /* L: for busy worker */
  136. };
  137. struct worker_pool {
  138. struct global_cwq *gcwq; /* I: the owning gcwq */
  139. unsigned int flags; /* X: flags */
  140. struct list_head worklist; /* L: list of pending works */
  141. int nr_workers; /* L: total number of workers */
  142. /* nr_idle includes the ones off idle_list for rebinding */
  143. int nr_idle; /* L: currently idle ones */
  144. struct list_head idle_list; /* X: list of idle workers */
  145. struct timer_list idle_timer; /* L: worker idle timeout */
  146. struct timer_list mayday_timer; /* L: SOS timer for workers */
  147. struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */
  148. struct ida worker_ida; /* L: for worker IDs */
  149. };
  150. /*
  151. * Global per-cpu workqueue. There's one and only one for each cpu
  152. * and all works are queued and processed here regardless of their
  153. * target workqueues.
  154. */
  155. struct global_cwq {
  156. spinlock_t lock; /* the gcwq lock */
  157. unsigned int cpu; /* I: the associated cpu */
  158. unsigned int flags; /* L: GCWQ_* flags */
  159. /* workers are chained either in busy_hash or pool idle_list */
  160. struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
  161. /* L: hash of busy workers */
  162. struct worker_pool pools[NR_WORKER_POOLS];
  163. /* normal and highpri pools */
  164. } ____cacheline_aligned_in_smp;
  165. /*
  166. * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
  167. * work_struct->data are used for flags and thus cwqs need to be
  168. * aligned at two's power of the number of flag bits.
  169. */
  170. struct cpu_workqueue_struct {
  171. struct worker_pool *pool; /* I: the associated pool */
  172. struct workqueue_struct *wq; /* I: the owning workqueue */
  173. int work_color; /* L: current color */
  174. int flush_color; /* L: flushing color */
  175. int nr_in_flight[WORK_NR_COLORS];
  176. /* L: nr of in_flight works */
  177. int nr_active; /* L: nr of active works */
  178. int max_active; /* L: max active works */
  179. struct list_head delayed_works; /* L: delayed works */
  180. };
  181. /*
  182. * Structure used to wait for workqueue flush.
  183. */
  184. struct wq_flusher {
  185. struct list_head list; /* F: list of flushers */
  186. int flush_color; /* F: flush color waiting for */
  187. struct completion done; /* flush completion */
  188. };
  189. /*
  190. * All cpumasks are assumed to be always set on UP and thus can't be
  191. * used to determine whether there's something to be done.
  192. */
  193. #ifdef CONFIG_SMP
  194. typedef cpumask_var_t mayday_mask_t;
  195. #define mayday_test_and_set_cpu(cpu, mask) \
  196. cpumask_test_and_set_cpu((cpu), (mask))
  197. #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
  198. #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
  199. #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
  200. #define free_mayday_mask(mask) free_cpumask_var((mask))
  201. #else
  202. typedef unsigned long mayday_mask_t;
  203. #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
  204. #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
  205. #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
  206. #define alloc_mayday_mask(maskp, gfp) true
  207. #define free_mayday_mask(mask) do { } while (0)
  208. #endif
  209. /*
  210. * The externally visible workqueue abstraction is an array of
  211. * per-CPU workqueues:
  212. */
  213. struct workqueue_struct {
  214. unsigned int flags; /* W: WQ_* flags */
  215. union {
  216. struct cpu_workqueue_struct __percpu *pcpu;
  217. struct cpu_workqueue_struct *single;
  218. unsigned long v;
  219. } cpu_wq; /* I: cwq's */
  220. struct list_head list; /* W: list of all workqueues */
  221. struct mutex flush_mutex; /* protects wq flushing */
  222. int work_color; /* F: current work color */
  223. int flush_color; /* F: current flush color */
  224. atomic_t nr_cwqs_to_flush; /* flush in progress */
  225. struct wq_flusher *first_flusher; /* F: first flusher */
  226. struct list_head flusher_queue; /* F: flush waiters */
  227. struct list_head flusher_overflow; /* F: flush overflow list */
  228. mayday_mask_t mayday_mask; /* cpus requesting rescue */
  229. struct worker *rescuer; /* I: rescue worker */
  230. int nr_drainers; /* W: drain in progress */
  231. int saved_max_active; /* W: saved cwq max_active */
  232. #ifdef CONFIG_LOCKDEP
  233. struct lockdep_map lockdep_map;
  234. #endif
  235. char name[]; /* I: workqueue name */
  236. };
  237. struct workqueue_struct *system_wq __read_mostly;
  238. EXPORT_SYMBOL_GPL(system_wq);
  239. struct workqueue_struct *system_highpri_wq __read_mostly;
  240. EXPORT_SYMBOL_GPL(system_highpri_wq);
  241. struct workqueue_struct *system_long_wq __read_mostly;
  242. EXPORT_SYMBOL_GPL(system_long_wq);
  243. struct workqueue_struct *system_unbound_wq __read_mostly;
  244. EXPORT_SYMBOL_GPL(system_unbound_wq);
  245. struct workqueue_struct *system_freezable_wq __read_mostly;
  246. EXPORT_SYMBOL_GPL(system_freezable_wq);
  247. #define CREATE_TRACE_POINTS
  248. #include <trace/events/workqueue.h>
  249. #define for_each_worker_pool(pool, gcwq) \
  250. for ((pool) = &(gcwq)->pools[0]; \
  251. (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
  252. #define for_each_busy_worker(worker, i, pos, gcwq) \
  253. for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
  254. hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
  255. static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
  256. unsigned int sw)
  257. {
  258. if (cpu < nr_cpu_ids) {
  259. if (sw & 1) {
  260. cpu = cpumask_next(cpu, mask);
  261. if (cpu < nr_cpu_ids)
  262. return cpu;
  263. }
  264. if (sw & 2)
  265. return WORK_CPU_UNBOUND;
  266. }
  267. return WORK_CPU_NONE;
  268. }
  269. static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
  270. struct workqueue_struct *wq)
  271. {
  272. return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
  273. }
  274. /*
  275. * CPU iterators
  276. *
  277. * An extra gcwq is defined for an invalid cpu number
  278. * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
  279. * specific CPU. The following iterators are similar to
  280. * for_each_*_cpu() iterators but also considers the unbound gcwq.
  281. *
  282. * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
  283. * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
  284. * for_each_cwq_cpu() : possible CPUs for bound workqueues,
  285. * WORK_CPU_UNBOUND for unbound workqueues
  286. */
  287. #define for_each_gcwq_cpu(cpu) \
  288. for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
  289. (cpu) < WORK_CPU_NONE; \
  290. (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
  291. #define for_each_online_gcwq_cpu(cpu) \
  292. for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
  293. (cpu) < WORK_CPU_NONE; \
  294. (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
  295. #define for_each_cwq_cpu(cpu, wq) \
  296. for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
  297. (cpu) < WORK_CPU_NONE; \
  298. (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
  299. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  300. static struct debug_obj_descr work_debug_descr;
  301. static void *work_debug_hint(void *addr)
  302. {
  303. return ((struct work_struct *) addr)->func;
  304. }
  305. /*
  306. * fixup_init is called when:
  307. * - an active object is initialized
  308. */
  309. static int work_fixup_init(void *addr, enum debug_obj_state state)
  310. {
  311. struct work_struct *work = addr;
  312. switch (state) {
  313. case ODEBUG_STATE_ACTIVE:
  314. cancel_work_sync(work);
  315. debug_object_init(work, &work_debug_descr);
  316. return 1;
  317. default:
  318. return 0;
  319. }
  320. }
  321. /*
  322. * fixup_activate is called when:
  323. * - an active object is activated
  324. * - an unknown object is activated (might be a statically initialized object)
  325. */
  326. static int work_fixup_activate(void *addr, enum debug_obj_state state)
  327. {
  328. struct work_struct *work = addr;
  329. switch (state) {
  330. case ODEBUG_STATE_NOTAVAILABLE:
  331. /*
  332. * This is not really a fixup. The work struct was
  333. * statically initialized. We just make sure that it
  334. * is tracked in the object tracker.
  335. */
  336. if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
  337. debug_object_init(work, &work_debug_descr);
  338. debug_object_activate(work, &work_debug_descr);
  339. return 0;
  340. }
  341. WARN_ON_ONCE(1);
  342. return 0;
  343. case ODEBUG_STATE_ACTIVE:
  344. WARN_ON(1);
  345. default:
  346. return 0;
  347. }
  348. }
  349. /*
  350. * fixup_free is called when:
  351. * - an active object is freed
  352. */
  353. static int work_fixup_free(void *addr, enum debug_obj_state state)
  354. {
  355. struct work_struct *work = addr;
  356. switch (state) {
  357. case ODEBUG_STATE_ACTIVE:
  358. cancel_work_sync(work);
  359. debug_object_free(work, &work_debug_descr);
  360. return 1;
  361. default:
  362. return 0;
  363. }
  364. }
  365. static struct debug_obj_descr work_debug_descr = {
  366. .name = "work_struct",
  367. .debug_hint = work_debug_hint,
  368. .fixup_init = work_fixup_init,
  369. .fixup_activate = work_fixup_activate,
  370. .fixup_free = work_fixup_free,
  371. };
  372. static inline void debug_work_activate(struct work_struct *work)
  373. {
  374. debug_object_activate(work, &work_debug_descr);
  375. }
  376. static inline void debug_work_deactivate(struct work_struct *work)
  377. {
  378. debug_object_deactivate(work, &work_debug_descr);
  379. }
  380. void __init_work(struct work_struct *work, int onstack)
  381. {
  382. if (onstack)
  383. debug_object_init_on_stack(work, &work_debug_descr);
  384. else
  385. debug_object_init(work, &work_debug_descr);
  386. }
  387. EXPORT_SYMBOL_GPL(__init_work);
  388. void destroy_work_on_stack(struct work_struct *work)
  389. {
  390. debug_object_free(work, &work_debug_descr);
  391. }
  392. EXPORT_SYMBOL_GPL(destroy_work_on_stack);
  393. #else
  394. static inline void debug_work_activate(struct work_struct *work) { }
  395. static inline void debug_work_deactivate(struct work_struct *work) { }
  396. #endif
  397. /* Serializes the accesses to the list of workqueues. */
  398. static DEFINE_SPINLOCK(workqueue_lock);
  399. static LIST_HEAD(workqueues);
  400. static bool workqueue_freezing; /* W: have wqs started freezing? */
  401. /*
  402. * The almighty global cpu workqueues. nr_running is the only field
  403. * which is expected to be used frequently by other cpus via
  404. * try_to_wake_up(). Put it in a separate cacheline.
  405. */
  406. static DEFINE_PER_CPU(struct global_cwq, global_cwq);
  407. static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
  408. /*
  409. * Global cpu workqueue and nr_running counter for unbound gcwq. The
  410. * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
  411. * workers have WORKER_UNBOUND set.
  412. */
  413. static struct global_cwq unbound_global_cwq;
  414. static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
  415. [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
  416. };
  417. static int worker_thread(void *__worker);
  418. static int worker_pool_pri(struct worker_pool *pool)
  419. {
  420. return pool - pool->gcwq->pools;
  421. }
  422. static struct global_cwq *get_gcwq(unsigned int cpu)
  423. {
  424. if (cpu != WORK_CPU_UNBOUND)
  425. return &per_cpu(global_cwq, cpu);
  426. else
  427. return &unbound_global_cwq;
  428. }
  429. static atomic_t *get_pool_nr_running(struct worker_pool *pool)
  430. {
  431. int cpu = pool->gcwq->cpu;
  432. int idx = worker_pool_pri(pool);
  433. if (cpu != WORK_CPU_UNBOUND)
  434. return &per_cpu(pool_nr_running, cpu)[idx];
  435. else
  436. return &unbound_pool_nr_running[idx];
  437. }
  438. static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
  439. struct workqueue_struct *wq)
  440. {
  441. if (!(wq->flags & WQ_UNBOUND)) {
  442. if (likely(cpu < nr_cpu_ids))
  443. return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
  444. } else if (likely(cpu == WORK_CPU_UNBOUND))
  445. return wq->cpu_wq.single;
  446. return NULL;
  447. }
  448. static unsigned int work_color_to_flags(int color)
  449. {
  450. return color << WORK_STRUCT_COLOR_SHIFT;
  451. }
  452. static int get_work_color(struct work_struct *work)
  453. {
  454. return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
  455. ((1 << WORK_STRUCT_COLOR_BITS) - 1);
  456. }
  457. static int work_next_color(int color)
  458. {
  459. return (color + 1) % WORK_NR_COLORS;
  460. }
  461. /*
  462. * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
  463. * contain the pointer to the queued cwq. Once execution starts, the flag
  464. * is cleared and the high bits contain OFFQ flags and CPU number.
  465. *
  466. * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
  467. * and clear_work_data() can be used to set the cwq, cpu or clear
  468. * work->data. These functions should only be called while the work is
  469. * owned - ie. while the PENDING bit is set.
  470. *
  471. * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
  472. * a work. gcwq is available once the work has been queued anywhere after
  473. * initialization until it is sync canceled. cwq is available only while
  474. * the work item is queued.
  475. *
  476. * %WORK_OFFQ_CANCELING is used to mark a work item which is being
  477. * canceled. While being canceled, a work item may have its PENDING set
  478. * but stay off timer and worklist for arbitrarily long and nobody should
  479. * try to steal the PENDING bit.
  480. */
  481. static inline void set_work_data(struct work_struct *work, unsigned long data,
  482. unsigned long flags)
  483. {
  484. BUG_ON(!work_pending(work));
  485. atomic_long_set(&work->data, data | flags | work_static(work));
  486. }
  487. static void set_work_cwq(struct work_struct *work,
  488. struct cpu_workqueue_struct *cwq,
  489. unsigned long extra_flags)
  490. {
  491. set_work_data(work, (unsigned long)cwq,
  492. WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
  493. }
  494. static void set_work_cpu_and_clear_pending(struct work_struct *work,
  495. unsigned int cpu)
  496. {
  497. /*
  498. * The following wmb is paired with the implied mb in
  499. * test_and_set_bit(PENDING) and ensures all updates to @work made
  500. * here are visible to and precede any updates by the next PENDING
  501. * owner.
  502. */
  503. smp_wmb();
  504. set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
  505. }
  506. static void clear_work_data(struct work_struct *work)
  507. {
  508. smp_wmb(); /* see set_work_cpu_and_clear_pending() */
  509. set_work_data(work, WORK_STRUCT_NO_CPU, 0);
  510. }
  511. static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
  512. {
  513. unsigned long data = atomic_long_read(&work->data);
  514. if (data & WORK_STRUCT_CWQ)
  515. return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
  516. else
  517. return NULL;
  518. }
  519. static struct global_cwq *get_work_gcwq(struct work_struct *work)
  520. {
  521. unsigned long data = atomic_long_read(&work->data);
  522. unsigned int cpu;
  523. if (data & WORK_STRUCT_CWQ)
  524. return ((struct cpu_workqueue_struct *)
  525. (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
  526. cpu = data >> WORK_OFFQ_CPU_SHIFT;
  527. if (cpu == WORK_CPU_NONE)
  528. return NULL;
  529. BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
  530. return get_gcwq(cpu);
  531. }
  532. static void mark_work_canceling(struct work_struct *work)
  533. {
  534. struct global_cwq *gcwq = get_work_gcwq(work);
  535. unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
  536. set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
  537. WORK_STRUCT_PENDING);
  538. }
  539. static bool work_is_canceling(struct work_struct *work)
  540. {
  541. unsigned long data = atomic_long_read(&work->data);
  542. return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
  543. }
  544. /*
  545. * Policy functions. These define the policies on how the global worker
  546. * pools are managed. Unless noted otherwise, these functions assume that
  547. * they're being called with gcwq->lock held.
  548. */
  549. static bool __need_more_worker(struct worker_pool *pool)
  550. {
  551. return !atomic_read(get_pool_nr_running(pool));
  552. }
  553. /*
  554. * Need to wake up a worker? Called from anything but currently
  555. * running workers.
  556. *
  557. * Note that, because unbound workers never contribute to nr_running, this
  558. * function will always return %true for unbound gcwq as long as the
  559. * worklist isn't empty.
  560. */
  561. static bool need_more_worker(struct worker_pool *pool)
  562. {
  563. return !list_empty(&pool->worklist) && __need_more_worker(pool);
  564. }
  565. /* Can I start working? Called from busy but !running workers. */
  566. static bool may_start_working(struct worker_pool *pool)
  567. {
  568. return pool->nr_idle;
  569. }
  570. /* Do I need to keep working? Called from currently running workers. */
  571. static bool keep_working(struct worker_pool *pool)
  572. {
  573. atomic_t *nr_running = get_pool_nr_running(pool);
  574. return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
  575. }
  576. /* Do we need a new worker? Called from manager. */
  577. static bool need_to_create_worker(struct worker_pool *pool)
  578. {
  579. return need_more_worker(pool) && !may_start_working(pool);
  580. }
  581. /* Do I need to be the manager? */
  582. static bool need_to_manage_workers(struct worker_pool *pool)
  583. {
  584. return need_to_create_worker(pool) ||
  585. (pool->flags & POOL_MANAGE_WORKERS);
  586. }
  587. /* Do we have too many workers and should some go away? */
  588. static bool too_many_workers(struct worker_pool *pool)
  589. {
  590. bool managing = pool->flags & POOL_MANAGING_WORKERS;
  591. int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
  592. int nr_busy = pool->nr_workers - nr_idle;
  593. /*
  594. * nr_idle and idle_list may disagree if idle rebinding is in
  595. * progress. Never return %true if idle_list is empty.
  596. */
  597. if (list_empty(&pool->idle_list))
  598. return false;
  599. return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
  600. }
  601. /*
  602. * Wake up functions.
  603. */
  604. /* Return the first worker. Safe with preemption disabled */
  605. static struct worker *first_worker(struct worker_pool *pool)
  606. {
  607. if (unlikely(list_empty(&pool->idle_list)))
  608. return NULL;
  609. return list_first_entry(&pool->idle_list, struct worker, entry);
  610. }
  611. /**
  612. * wake_up_worker - wake up an idle worker
  613. * @pool: worker pool to wake worker from
  614. *
  615. * Wake up the first idle worker of @pool.
  616. *
  617. * CONTEXT:
  618. * spin_lock_irq(gcwq->lock).
  619. */
  620. static void wake_up_worker(struct worker_pool *pool)
  621. {
  622. struct worker *worker = first_worker(pool);
  623. if (likely(worker))
  624. wake_up_process(worker->task);
  625. }
  626. /**
  627. * wq_worker_waking_up - a worker is waking up
  628. * @task: task waking up
  629. * @cpu: CPU @task is waking up to
  630. *
  631. * This function is called during try_to_wake_up() when a worker is
  632. * being awoken.
  633. *
  634. * CONTEXT:
  635. * spin_lock_irq(rq->lock)
  636. */
  637. void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
  638. {
  639. struct worker *worker = kthread_data(task);
  640. if (!(worker->flags & WORKER_NOT_RUNNING)) {
  641. WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu);
  642. atomic_inc(get_pool_nr_running(worker->pool));
  643. }
  644. }
  645. /**
  646. * wq_worker_sleeping - a worker is going to sleep
  647. * @task: task going to sleep
  648. * @cpu: CPU in question, must be the current CPU number
  649. *
  650. * This function is called during schedule() when a busy worker is
  651. * going to sleep. Worker on the same cpu can be woken up by
  652. * returning pointer to its task.
  653. *
  654. * CONTEXT:
  655. * spin_lock_irq(rq->lock)
  656. *
  657. * RETURNS:
  658. * Worker task on @cpu to wake up, %NULL if none.
  659. */
  660. struct task_struct *wq_worker_sleeping(struct task_struct *task,
  661. unsigned int cpu)
  662. {
  663. struct worker *worker = kthread_data(task), *to_wakeup = NULL;
  664. struct worker_pool *pool = worker->pool;
  665. atomic_t *nr_running = get_pool_nr_running(pool);
  666. if (worker->flags & WORKER_NOT_RUNNING)
  667. return NULL;
  668. /* this can only happen on the local cpu */
  669. BUG_ON(cpu != raw_smp_processor_id());
  670. /*
  671. * The counterpart of the following dec_and_test, implied mb,
  672. * worklist not empty test sequence is in insert_work().
  673. * Please read comment there.
  674. *
  675. * NOT_RUNNING is clear. This means that we're bound to and
  676. * running on the local cpu w/ rq lock held and preemption
  677. * disabled, which in turn means that none else could be
  678. * manipulating idle_list, so dereferencing idle_list without gcwq
  679. * lock is safe.
  680. */
  681. if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
  682. to_wakeup = first_worker(pool);
  683. return to_wakeup ? to_wakeup->task : NULL;
  684. }
  685. /**
  686. * worker_set_flags - set worker flags and adjust nr_running accordingly
  687. * @worker: self
  688. * @flags: flags to set
  689. * @wakeup: wakeup an idle worker if necessary
  690. *
  691. * Set @flags in @worker->flags and adjust nr_running accordingly. If
  692. * nr_running becomes zero and @wakeup is %true, an idle worker is
  693. * woken up.
  694. *
  695. * CONTEXT:
  696. * spin_lock_irq(gcwq->lock)
  697. */
  698. static inline void worker_set_flags(struct worker *worker, unsigned int flags,
  699. bool wakeup)
  700. {
  701. struct worker_pool *pool = worker->pool;
  702. WARN_ON_ONCE(worker->task != current);
  703. /*
  704. * If transitioning into NOT_RUNNING, adjust nr_running and
  705. * wake up an idle worker as necessary if requested by
  706. * @wakeup.
  707. */
  708. if ((flags & WORKER_NOT_RUNNING) &&
  709. !(worker->flags & WORKER_NOT_RUNNING)) {
  710. atomic_t *nr_running = get_pool_nr_running(pool);
  711. if (wakeup) {
  712. if (atomic_dec_and_test(nr_running) &&
  713. !list_empty(&pool->worklist))
  714. wake_up_worker(pool);
  715. } else
  716. atomic_dec(nr_running);
  717. }
  718. worker->flags |= flags;
  719. }
  720. /**
  721. * worker_clr_flags - clear worker flags and adjust nr_running accordingly
  722. * @worker: self
  723. * @flags: flags to clear
  724. *
  725. * Clear @flags in @worker->flags and adjust nr_running accordingly.
  726. *
  727. * CONTEXT:
  728. * spin_lock_irq(gcwq->lock)
  729. */
  730. static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
  731. {
  732. struct worker_pool *pool = worker->pool;
  733. unsigned int oflags = worker->flags;
  734. WARN_ON_ONCE(worker->task != current);
  735. worker->flags &= ~flags;
  736. /*
  737. * If transitioning out of NOT_RUNNING, increment nr_running. Note
  738. * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
  739. * of multiple flags, not a single flag.
  740. */
  741. if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
  742. if (!(worker->flags & WORKER_NOT_RUNNING))
  743. atomic_inc(get_pool_nr_running(pool));
  744. }
  745. /**
  746. * busy_worker_head - return the busy hash head for a work
  747. * @gcwq: gcwq of interest
  748. * @work: work to be hashed
  749. *
  750. * Return hash head of @gcwq for @work.
  751. *
  752. * CONTEXT:
  753. * spin_lock_irq(gcwq->lock).
  754. *
  755. * RETURNS:
  756. * Pointer to the hash head.
  757. */
  758. static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
  759. struct work_struct *work)
  760. {
  761. const int base_shift = ilog2(sizeof(struct work_struct));
  762. unsigned long v = (unsigned long)work;
  763. /* simple shift and fold hash, do we need something better? */
  764. v >>= base_shift;
  765. v += v >> BUSY_WORKER_HASH_ORDER;
  766. v &= BUSY_WORKER_HASH_MASK;
  767. return &gcwq->busy_hash[v];
  768. }
  769. /**
  770. * __find_worker_executing_work - find worker which is executing a work
  771. * @gcwq: gcwq of interest
  772. * @bwh: hash head as returned by busy_worker_head()
  773. * @work: work to find worker for
  774. *
  775. * Find a worker which is executing @work on @gcwq. @bwh should be
  776. * the hash head obtained by calling busy_worker_head() with the same
  777. * work.
  778. *
  779. * CONTEXT:
  780. * spin_lock_irq(gcwq->lock).
  781. *
  782. * RETURNS:
  783. * Pointer to worker which is executing @work if found, NULL
  784. * otherwise.
  785. */
  786. static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
  787. struct hlist_head *bwh,
  788. struct work_struct *work)
  789. {
  790. struct worker *worker;
  791. struct hlist_node *tmp;
  792. hlist_for_each_entry(worker, tmp, bwh, hentry)
  793. if (worker->current_work == work)
  794. return worker;
  795. return NULL;
  796. }
  797. /**
  798. * find_worker_executing_work - find worker which is executing a work
  799. * @gcwq: gcwq of interest
  800. * @work: work to find worker for
  801. *
  802. * Find a worker which is executing @work on @gcwq. This function is
  803. * identical to __find_worker_executing_work() except that this
  804. * function calculates @bwh itself.
  805. *
  806. * CONTEXT:
  807. * spin_lock_irq(gcwq->lock).
  808. *
  809. * RETURNS:
  810. * Pointer to worker which is executing @work if found, NULL
  811. * otherwise.
  812. */
  813. static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
  814. struct work_struct *work)
  815. {
  816. return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
  817. work);
  818. }
  819. /**
  820. * move_linked_works - move linked works to a list
  821. * @work: start of series of works to be scheduled
  822. * @head: target list to append @work to
  823. * @nextp: out paramter for nested worklist walking
  824. *
  825. * Schedule linked works starting from @work to @head. Work series to
  826. * be scheduled starts at @work and includes any consecutive work with
  827. * WORK_STRUCT_LINKED set in its predecessor.
  828. *
  829. * If @nextp is not NULL, it's updated to point to the next work of
  830. * the last scheduled work. This allows move_linked_works() to be
  831. * nested inside outer list_for_each_entry_safe().
  832. *
  833. * CONTEXT:
  834. * spin_lock_irq(gcwq->lock).
  835. */
  836. static void move_linked_works(struct work_struct *work, struct list_head *head,
  837. struct work_struct **nextp)
  838. {
  839. struct work_struct *n;
  840. /*
  841. * Linked worklist will always end before the end of the list,
  842. * use NULL for list head.
  843. */
  844. list_for_each_entry_safe_from(work, n, NULL, entry) {
  845. list_move_tail(&work->entry, head);
  846. if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
  847. break;
  848. }
  849. /*
  850. * If we're already inside safe list traversal and have moved
  851. * multiple works to the scheduled queue, the next position
  852. * needs to be updated.
  853. */
  854. if (nextp)
  855. *nextp = n;
  856. }
  857. static void cwq_activate_delayed_work(struct work_struct *work)
  858. {
  859. struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  860. trace_workqueue_activate_work(work);
  861. move_linked_works(work, &cwq->pool->worklist, NULL);
  862. __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
  863. cwq->nr_active++;
  864. }
  865. static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  866. {
  867. struct work_struct *work = list_first_entry(&cwq->delayed_works,
  868. struct work_struct, entry);
  869. cwq_activate_delayed_work(work);
  870. }
  871. /**
  872. * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  873. * @cwq: cwq of interest
  874. * @color: color of work which left the queue
  875. *
  876. * A work either has completed or is removed from pending queue,
  877. * decrement nr_in_flight of its cwq and handle workqueue flushing.
  878. *
  879. * CONTEXT:
  880. * spin_lock_irq(gcwq->lock).
  881. */
  882. static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  883. {
  884. /* ignore uncolored works */
  885. if (color == WORK_NO_COLOR)
  886. return;
  887. cwq->nr_in_flight[color]--;
  888. cwq->nr_active--;
  889. if (!list_empty(&cwq->delayed_works)) {
  890. /* one down, submit a delayed one */
  891. if (cwq->nr_active < cwq->max_active)
  892. cwq_activate_first_delayed(cwq);
  893. }
  894. /* is flush in progress and are we at the flushing tip? */
  895. if (likely(cwq->flush_color != color))
  896. return;
  897. /* are there still in-flight works? */
  898. if (cwq->nr_in_flight[color])
  899. return;
  900. /* this cwq is done, clear flush_color */
  901. cwq->flush_color = -1;
  902. /*
  903. * If this was the last cwq, wake up the first flusher. It
  904. * will handle the rest.
  905. */
  906. if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
  907. complete(&cwq->wq->first_flusher->done);
  908. }
  909. /**
  910. * try_to_grab_pending - steal work item from worklist and disable irq
  911. * @work: work item to steal
  912. * @is_dwork: @work is a delayed_work
  913. * @flags: place to store irq state
  914. *
  915. * Try to grab PENDING bit of @work. This function can handle @work in any
  916. * stable state - idle, on timer or on worklist. Return values are
  917. *
  918. * 1 if @work was pending and we successfully stole PENDING
  919. * 0 if @work was idle and we claimed PENDING
  920. * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
  921. * -ENOENT if someone else is canceling @work, this state may persist
  922. * for arbitrarily long
  923. *
  924. * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
  925. * interrupted while holding PENDING and @work off queue, irq must be
  926. * disabled on entry. This, combined with delayed_work->timer being
  927. * irqsafe, ensures that we return -EAGAIN for finite short period of time.
  928. *
  929. * On successful return, >= 0, irq is disabled and the caller is
  930. * responsible for releasing it using local_irq_restore(*@flags).
  931. *
  932. * This function is safe to call from any context including IRQ handler.
  933. */
  934. static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
  935. unsigned long *flags)
  936. {
  937. struct global_cwq *gcwq;
  938. local_irq_save(*flags);
  939. /* try to steal the timer if it exists */
  940. if (is_dwork) {
  941. struct delayed_work *dwork = to_delayed_work(work);
  942. /*
  943. * dwork->timer is irqsafe. If del_timer() fails, it's
  944. * guaranteed that the timer is not queued anywhere and not
  945. * running on the local CPU.
  946. */
  947. if (likely(del_timer(&dwork->timer)))
  948. return 1;
  949. }
  950. /* try to claim PENDING the normal way */
  951. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
  952. return 0;
  953. /*
  954. * The queueing is in progress, or it is already queued. Try to
  955. * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
  956. */
  957. gcwq = get_work_gcwq(work);
  958. if (!gcwq)
  959. goto fail;
  960. spin_lock(&gcwq->lock);
  961. if (!list_empty(&work->entry)) {
  962. /*
  963. * This work is queued, but perhaps we locked the wrong gcwq.
  964. * In that case we must see the new value after rmb(), see
  965. * insert_work()->wmb().
  966. */
  967. smp_rmb();
  968. if (gcwq == get_work_gcwq(work)) {
  969. debug_work_deactivate(work);
  970. /*
  971. * A delayed work item cannot be grabbed directly
  972. * because it might have linked NO_COLOR work items
  973. * which, if left on the delayed_list, will confuse
  974. * cwq->nr_active management later on and cause
  975. * stall. Make sure the work item is activated
  976. * before grabbing.
  977. */
  978. if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
  979. cwq_activate_delayed_work(work);
  980. list_del_init(&work->entry);
  981. cwq_dec_nr_in_flight(get_work_cwq(work),
  982. get_work_color(work));
  983. spin_unlock(&gcwq->lock);
  984. return 1;
  985. }
  986. }
  987. spin_unlock(&gcwq->lock);
  988. fail:
  989. local_irq_restore(*flags);
  990. if (work_is_canceling(work))
  991. return -ENOENT;
  992. cpu_relax();
  993. return -EAGAIN;
  994. }
  995. /**
  996. * insert_work - insert a work into gcwq
  997. * @cwq: cwq @work belongs to
  998. * @work: work to insert
  999. * @head: insertion point
  1000. * @extra_flags: extra WORK_STRUCT_* flags to set
  1001. *
  1002. * Insert @work which belongs to @cwq into @gcwq after @head.
  1003. * @extra_flags is or'd to work_struct flags.
  1004. *
  1005. * CONTEXT:
  1006. * spin_lock_irq(gcwq->lock).
  1007. */
  1008. static void insert_work(struct cpu_workqueue_struct *cwq,
  1009. struct work_struct *work, struct list_head *head,
  1010. unsigned int extra_flags)
  1011. {
  1012. struct worker_pool *pool = cwq->pool;
  1013. /* we own @work, set data and link */
  1014. set_work_cwq(work, cwq, extra_flags);
  1015. /*
  1016. * Ensure that we get the right work->data if we see the
  1017. * result of list_add() below, see try_to_grab_pending().
  1018. */
  1019. smp_wmb();
  1020. list_add_tail(&work->entry, head);
  1021. /*
  1022. * Ensure either worker_sched_deactivated() sees the above
  1023. * list_add_tail() or we see zero nr_running to avoid workers
  1024. * lying around lazily while there are works to be processed.
  1025. */
  1026. smp_mb();
  1027. if (__need_more_worker(pool))
  1028. wake_up_worker(pool);
  1029. }
  1030. /*
  1031. * Test whether @work is being queued from another work executing on the
  1032. * same workqueue. This is rather expensive and should only be used from
  1033. * cold paths.
  1034. */
  1035. static bool is_chained_work(struct workqueue_struct *wq)
  1036. {
  1037. unsigned long flags;
  1038. unsigned int cpu;
  1039. for_each_gcwq_cpu(cpu) {
  1040. struct global_cwq *gcwq = get_gcwq(cpu);
  1041. struct worker *worker;
  1042. struct hlist_node *pos;
  1043. int i;
  1044. spin_lock_irqsave(&gcwq->lock, flags);
  1045. for_each_busy_worker(worker, i, pos, gcwq) {
  1046. if (worker->task != current)
  1047. continue;
  1048. spin_unlock_irqrestore(&gcwq->lock, flags);
  1049. /*
  1050. * I'm @worker, no locking necessary. See if @work
  1051. * is headed to the same workqueue.
  1052. */
  1053. return worker->current_cwq->wq == wq;
  1054. }
  1055. spin_unlock_irqrestore(&gcwq->lock, flags);
  1056. }
  1057. return false;
  1058. }
  1059. static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
  1060. struct work_struct *work)
  1061. {
  1062. struct global_cwq *gcwq;
  1063. struct cpu_workqueue_struct *cwq;
  1064. struct list_head *worklist;
  1065. unsigned int work_flags;
  1066. unsigned int req_cpu = cpu;
  1067. /*
  1068. * While a work item is PENDING && off queue, a task trying to
  1069. * steal the PENDING will busy-loop waiting for it to either get
  1070. * queued or lose PENDING. Grabbing PENDING and queueing should
  1071. * happen with IRQ disabled.
  1072. */
  1073. WARN_ON_ONCE(!irqs_disabled());
  1074. debug_work_activate(work);
  1075. /* if dying, only works from the same workqueue are allowed */
  1076. if (unlikely(wq->flags & WQ_DRAINING) &&
  1077. WARN_ON_ONCE(!is_chained_work(wq)))
  1078. return;
  1079. /* determine gcwq to use */
  1080. if (!(wq->flags & WQ_UNBOUND)) {
  1081. struct global_cwq *last_gcwq;
  1082. if (cpu == WORK_CPU_UNBOUND)
  1083. cpu = raw_smp_processor_id();
  1084. /*
  1085. * It's multi cpu. If @work was previously on a different
  1086. * cpu, it might still be running there, in which case the
  1087. * work needs to be queued on that cpu to guarantee
  1088. * non-reentrancy.
  1089. */
  1090. gcwq = get_gcwq(cpu);
  1091. last_gcwq = get_work_gcwq(work);
  1092. if (last_gcwq && last_gcwq != gcwq) {
  1093. struct worker *worker;
  1094. spin_lock(&last_gcwq->lock);
  1095. worker = find_worker_executing_work(last_gcwq, work);
  1096. if (worker && worker->current_cwq->wq == wq)
  1097. gcwq = last_gcwq;
  1098. else {
  1099. /* meh... not running there, queue here */
  1100. spin_unlock(&last_gcwq->lock);
  1101. spin_lock(&gcwq->lock);
  1102. }
  1103. } else {
  1104. spin_lock(&gcwq->lock);
  1105. }
  1106. } else {
  1107. gcwq = get_gcwq(WORK_CPU_UNBOUND);
  1108. spin_lock(&gcwq->lock);
  1109. }
  1110. /* gcwq determined, get cwq and queue */
  1111. cwq = get_cwq(gcwq->cpu, wq);
  1112. trace_workqueue_queue_work(req_cpu, cwq, work);
  1113. if (WARN_ON(!list_empty(&work->entry))) {
  1114. spin_unlock(&gcwq->lock);
  1115. return;
  1116. }
  1117. cwq->nr_in_flight[cwq->work_color]++;
  1118. work_flags = work_color_to_flags(cwq->work_color);
  1119. if (likely(cwq->nr_active < cwq->max_active)) {
  1120. trace_workqueue_activate_work(work);
  1121. cwq->nr_active++;
  1122. worklist = &cwq->pool->worklist;
  1123. } else {
  1124. work_flags |= WORK_STRUCT_DELAYED;
  1125. worklist = &cwq->delayed_works;
  1126. }
  1127. insert_work(cwq, work, worklist, work_flags);
  1128. spin_unlock(&gcwq->lock);
  1129. }
  1130. /**
  1131. * queue_work_on - queue work on specific cpu
  1132. * @cpu: CPU number to execute work on
  1133. * @wq: workqueue to use
  1134. * @work: work to queue
  1135. *
  1136. * Returns %false if @work was already on a queue, %true otherwise.
  1137. *
  1138. * We queue the work to a specific CPU, the caller must ensure it
  1139. * can't go away.
  1140. */
  1141. bool queue_work_on(int cpu, struct workqueue_struct *wq,
  1142. struct work_struct *work)
  1143. {
  1144. bool ret = false;
  1145. unsigned long flags;
  1146. local_irq_save(flags);
  1147. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  1148. __queue_work(cpu, wq, work);
  1149. ret = true;
  1150. }
  1151. local_irq_restore(flags);
  1152. return ret;
  1153. }
  1154. EXPORT_SYMBOL_GPL(queue_work_on);
  1155. /**
  1156. * queue_work - queue work on a workqueue
  1157. * @wq: workqueue to use
  1158. * @work: work to queue
  1159. *
  1160. * Returns %false if @work was already on a queue, %true otherwise.
  1161. *
  1162. * We queue the work to the CPU on which it was submitted, but if the CPU dies
  1163. * it can be processed by another CPU.
  1164. */
  1165. bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
  1166. {
  1167. return queue_work_on(WORK_CPU_UNBOUND, wq, work);
  1168. }
  1169. EXPORT_SYMBOL_GPL(queue_work);
  1170. void delayed_work_timer_fn(unsigned long __data)
  1171. {
  1172. struct delayed_work *dwork = (struct delayed_work *)__data;
  1173. struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
  1174. /* should have been called from irqsafe timer with irq already off */
  1175. __queue_work(dwork->cpu, cwq->wq, &dwork->work);
  1176. }
  1177. EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
  1178. static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
  1179. struct delayed_work *dwork, unsigned long delay)
  1180. {
  1181. struct timer_list *timer = &dwork->timer;
  1182. struct work_struct *work = &dwork->work;
  1183. unsigned int lcpu;
  1184. WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
  1185. timer->data != (unsigned long)dwork);
  1186. WARN_ON_ONCE(timer_pending(timer));
  1187. WARN_ON_ONCE(!list_empty(&work->entry));
  1188. /*
  1189. * If @delay is 0, queue @dwork->work immediately. This is for
  1190. * both optimization and correctness. The earliest @timer can
  1191. * expire is on the closest next tick and delayed_work users depend
  1192. * on that there's no such delay when @delay is 0.
  1193. */
  1194. if (!delay) {
  1195. __queue_work(cpu, wq, &dwork->work);
  1196. return;
  1197. }
  1198. timer_stats_timer_set_start_info(&dwork->timer);
  1199. /*
  1200. * This stores cwq for the moment, for the timer_fn. Note that the
  1201. * work's gcwq is preserved to allow reentrance detection for
  1202. * delayed works.
  1203. */
  1204. if (!(wq->flags & WQ_UNBOUND)) {
  1205. struct global_cwq *gcwq = get_work_gcwq(work);
  1206. /*
  1207. * If we cannot get the last gcwq from @work directly,
  1208. * select the last CPU such that it avoids unnecessarily
  1209. * triggering non-reentrancy check in __queue_work().
  1210. */
  1211. lcpu = cpu;
  1212. if (gcwq)
  1213. lcpu = gcwq->cpu;
  1214. if (lcpu == WORK_CPU_UNBOUND)
  1215. lcpu = raw_smp_processor_id();
  1216. } else {
  1217. lcpu = WORK_CPU_UNBOUND;
  1218. }
  1219. set_work_cwq(work, get_cwq(lcpu, wq), 0);
  1220. dwork->cpu = cpu;
  1221. timer->expires = jiffies + delay;
  1222. if (unlikely(cpu != WORK_CPU_UNBOUND))
  1223. add_timer_on(timer, cpu);
  1224. else
  1225. add_timer(timer);
  1226. }
  1227. /**
  1228. * queue_delayed_work_on - queue work on specific CPU after delay
  1229. * @cpu: CPU number to execute work on
  1230. * @wq: workqueue to use
  1231. * @dwork: work to queue
  1232. * @delay: number of jiffies to wait before queueing
  1233. *
  1234. * Returns %false if @work was already on a queue, %true otherwise. If
  1235. * @delay is zero and @dwork is idle, it will be scheduled for immediate
  1236. * execution.
  1237. */
  1238. bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  1239. struct delayed_work *dwork, unsigned long delay)
  1240. {
  1241. struct work_struct *work = &dwork->work;
  1242. bool ret = false;
  1243. unsigned long flags;
  1244. /* read the comment in __queue_work() */
  1245. local_irq_save(flags);
  1246. if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
  1247. __queue_delayed_work(cpu, wq, dwork, delay);
  1248. ret = true;
  1249. }
  1250. local_irq_restore(flags);
  1251. return ret;
  1252. }
  1253. EXPORT_SYMBOL_GPL(queue_delayed_work_on);
  1254. /**
  1255. * queue_delayed_work - queue work on a workqueue after delay
  1256. * @wq: workqueue to use
  1257. * @dwork: delayable work to queue
  1258. * @delay: number of jiffies to wait before queueing
  1259. *
  1260. * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
  1261. */
  1262. bool queue_delayed_work(struct workqueue_struct *wq,
  1263. struct delayed_work *dwork, unsigned long delay)
  1264. {
  1265. return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
  1266. }
  1267. EXPORT_SYMBOL_GPL(queue_delayed_work);
  1268. /**
  1269. * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
  1270. * @cpu: CPU number to execute work on
  1271. * @wq: workqueue to use
  1272. * @dwork: work to queue
  1273. * @delay: number of jiffies to wait before queueing
  1274. *
  1275. * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
  1276. * modify @dwork's timer so that it expires after @delay. If @delay is
  1277. * zero, @work is guaranteed to be scheduled immediately regardless of its
  1278. * current state.
  1279. *
  1280. * Returns %false if @dwork was idle and queued, %true if @dwork was
  1281. * pending and its timer was modified.
  1282. *
  1283. * This function is safe to call from any context including IRQ handler.
  1284. * See try_to_grab_pending() for details.
  1285. */
  1286. bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
  1287. struct delayed_work *dwork, unsigned long delay)
  1288. {
  1289. unsigned long flags;
  1290. int ret;
  1291. do {
  1292. ret = try_to_grab_pending(&dwork->work, true, &flags);
  1293. } while (unlikely(ret == -EAGAIN));
  1294. if (likely(ret >= 0)) {
  1295. __queue_delayed_work(cpu, wq, dwork, delay);
  1296. local_irq_restore(flags);
  1297. }
  1298. /* -ENOENT from try_to_grab_pending() becomes %true */
  1299. return ret;
  1300. }
  1301. EXPORT_SYMBOL_GPL(mod_delayed_work_on);
  1302. /**
  1303. * mod_delayed_work - modify delay of or queue a delayed work
  1304. * @wq: workqueue to use
  1305. * @dwork: work to queue
  1306. * @delay: number of jiffies to wait before queueing
  1307. *
  1308. * mod_delayed_work_on() on local CPU.
  1309. */
  1310. bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
  1311. unsigned long delay)
  1312. {
  1313. return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
  1314. }
  1315. EXPORT_SYMBOL_GPL(mod_delayed_work);
  1316. /**
  1317. * worker_enter_idle - enter idle state
  1318. * @worker: worker which is entering idle state
  1319. *
  1320. * @worker is entering idle state. Update stats and idle timer if
  1321. * necessary.
  1322. *
  1323. * LOCKING:
  1324. * spin_lock_irq(gcwq->lock).
  1325. */
  1326. static void worker_enter_idle(struct worker *worker)
  1327. {
  1328. struct worker_pool *pool = worker->pool;
  1329. struct global_cwq *gcwq = pool->gcwq;
  1330. BUG_ON(worker->flags & WORKER_IDLE);
  1331. BUG_ON(!list_empty(&worker->entry) &&
  1332. (worker->hentry.next || worker->hentry.pprev));
  1333. /* can't use worker_set_flags(), also called from start_worker() */
  1334. worker->flags |= WORKER_IDLE;
  1335. pool->nr_idle++;
  1336. worker->last_active = jiffies;
  1337. /* idle_list is LIFO */
  1338. list_add(&worker->entry, &pool->idle_list);
  1339. if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
  1340. mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
  1341. /*
  1342. * Sanity check nr_running. Because gcwq_unbind_fn() releases
  1343. * gcwq->lock between setting %WORKER_UNBOUND and zapping
  1344. * nr_running, the warning may trigger spuriously. Check iff
  1345. * unbind is not in progress.
  1346. */
  1347. WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
  1348. pool->nr_workers == pool->nr_idle &&
  1349. atomic_read(get_pool_nr_running(pool)));
  1350. }
  1351. /**
  1352. * worker_leave_idle - leave idle state
  1353. * @worker: worker which is leaving idle state
  1354. *
  1355. * @worker is leaving idle state. Update stats.
  1356. *
  1357. * LOCKING:
  1358. * spin_lock_irq(gcwq->lock).
  1359. */
  1360. static void worker_leave_idle(struct worker *worker)
  1361. {
  1362. struct worker_pool *pool = worker->pool;
  1363. BUG_ON(!(worker->flags & WORKER_IDLE));
  1364. worker_clr_flags(worker, WORKER_IDLE);
  1365. pool->nr_idle--;
  1366. list_del_init(&worker->entry);
  1367. }
  1368. /**
  1369. * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
  1370. * @worker: self
  1371. *
  1372. * Works which are scheduled while the cpu is online must at least be
  1373. * scheduled to a worker which is bound to the cpu so that if they are
  1374. * flushed from cpu callbacks while cpu is going down, they are
  1375. * guaranteed to execute on the cpu.
  1376. *
  1377. * This function is to be used by rogue workers and rescuers to bind
  1378. * themselves to the target cpu and may race with cpu going down or
  1379. * coming online. kthread_bind() can't be used because it may put the
  1380. * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
  1381. * verbatim as it's best effort and blocking and gcwq may be
  1382. * [dis]associated in the meantime.
  1383. *
  1384. * This function tries set_cpus_allowed() and locks gcwq and verifies the
  1385. * binding against %GCWQ_DISASSOCIATED which is set during
  1386. * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
  1387. * enters idle state or fetches works without dropping lock, it can
  1388. * guarantee the scheduling requirement described in the first paragraph.
  1389. *
  1390. * CONTEXT:
  1391. * Might sleep. Called without any lock but returns with gcwq->lock
  1392. * held.
  1393. *
  1394. * RETURNS:
  1395. * %true if the associated gcwq is online (@worker is successfully
  1396. * bound), %false if offline.
  1397. */
  1398. static bool worker_maybe_bind_and_lock(struct worker *worker)
  1399. __acquires(&gcwq->lock)
  1400. {
  1401. struct global_cwq *gcwq = worker->pool->gcwq;
  1402. struct task_struct *task = worker->task;
  1403. while (true) {
  1404. /*
  1405. * The following call may fail, succeed or succeed
  1406. * without actually migrating the task to the cpu if
  1407. * it races with cpu hotunplug operation. Verify
  1408. * against GCWQ_DISASSOCIATED.
  1409. */
  1410. if (!(gcwq->flags & GCWQ_DISASSOCIATED))
  1411. set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
  1412. spin_lock_irq(&gcwq->lock);
  1413. if (gcwq->flags & GCWQ_DISASSOCIATED)
  1414. return false;
  1415. if (task_cpu(task) == gcwq->cpu &&
  1416. cpumask_equal(&current->cpus_allowed,
  1417. get_cpu_mask(gcwq->cpu)))
  1418. return true;
  1419. spin_unlock_irq(&gcwq->lock);
  1420. /*
  1421. * We've raced with CPU hot[un]plug. Give it a breather
  1422. * and retry migration. cond_resched() is required here;
  1423. * otherwise, we might deadlock against cpu_stop trying to
  1424. * bring down the CPU on non-preemptive kernel.
  1425. */
  1426. cpu_relax();
  1427. cond_resched();
  1428. }
  1429. }
  1430. /*
  1431. * Rebind an idle @worker to its CPU. worker_thread() will test
  1432. * list_empty(@worker->entry) before leaving idle and call this function.
  1433. */
  1434. static void idle_worker_rebind(struct worker *worker)
  1435. {
  1436. struct global_cwq *gcwq = worker->pool->gcwq;
  1437. /* CPU may go down again inbetween, clear UNBOUND only on success */
  1438. if (worker_maybe_bind_and_lock(worker))
  1439. worker_clr_flags(worker, WORKER_UNBOUND);
  1440. /* rebind complete, become available again */
  1441. list_add(&worker->entry, &worker->pool->idle_list);
  1442. spin_unlock_irq(&gcwq->lock);
  1443. }
  1444. /*
  1445. * Function for @worker->rebind.work used to rebind unbound busy workers to
  1446. * the associated cpu which is coming back online. This is scheduled by
  1447. * cpu up but can race with other cpu hotplug operations and may be
  1448. * executed twice without intervening cpu down.
  1449. */
  1450. static void busy_worker_rebind_fn(struct work_struct *work)
  1451. {
  1452. struct worker *worker = container_of(work, struct worker, rebind_work);
  1453. struct global_cwq *gcwq = worker->pool->gcwq;
  1454. if (worker_maybe_bind_and_lock(worker))
  1455. worker_clr_flags(worker, WORKER_UNBOUND);
  1456. spin_unlock_irq(&gcwq->lock);
  1457. }
  1458. /**
  1459. * rebind_workers - rebind all workers of a gcwq to the associated CPU
  1460. * @gcwq: gcwq of interest
  1461. *
  1462. * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
  1463. * is different for idle and busy ones.
  1464. *
  1465. * Idle ones will be removed from the idle_list and woken up. They will
  1466. * add themselves back after completing rebind. This ensures that the
  1467. * idle_list doesn't contain any unbound workers when re-bound busy workers
  1468. * try to perform local wake-ups for concurrency management.
  1469. *
  1470. * Busy workers can rebind after they finish their current work items.
  1471. * Queueing the rebind work item at the head of the scheduled list is
  1472. * enough. Note that nr_running will be properly bumped as busy workers
  1473. * rebind.
  1474. *
  1475. * On return, all non-manager workers are scheduled for rebind - see
  1476. * manage_workers() for the manager special case. Any idle worker
  1477. * including the manager will not appear on @idle_list until rebind is
  1478. * complete, making local wake-ups safe.
  1479. */
  1480. static void rebind_workers(struct global_cwq *gcwq)
  1481. {
  1482. struct worker_pool *pool;
  1483. struct worker *worker, *n;
  1484. struct hlist_node *pos;
  1485. int i;
  1486. lockdep_assert_held(&gcwq->lock);
  1487. for_each_worker_pool(pool, gcwq)
  1488. lockdep_assert_held(&pool->assoc_mutex);
  1489. /* dequeue and kick idle ones */
  1490. for_each_worker_pool(pool, gcwq) {
  1491. list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
  1492. /*
  1493. * idle workers should be off @pool->idle_list
  1494. * until rebind is complete to avoid receiving
  1495. * premature local wake-ups.
  1496. */
  1497. list_del_init(&worker->entry);
  1498. /*
  1499. * worker_thread() will see the above dequeuing
  1500. * and call idle_worker_rebind().
  1501. */
  1502. wake_up_process(worker->task);
  1503. }
  1504. }
  1505. /* rebind busy workers */
  1506. for_each_busy_worker(worker, i, pos, gcwq) {
  1507. struct work_struct *rebind_work = &worker->rebind_work;
  1508. struct workqueue_struct *wq;
  1509. if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
  1510. work_data_bits(rebind_work)))
  1511. continue;
  1512. debug_work_activate(rebind_work);
  1513. /*
  1514. * wq doesn't really matter but let's keep @worker->pool
  1515. * and @cwq->pool consistent for sanity.
  1516. */
  1517. if (worker_pool_pri(worker->pool))
  1518. wq = system_highpri_wq;
  1519. else
  1520. wq = system_wq;
  1521. insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
  1522. worker->scheduled.next,
  1523. work_color_to_flags(WORK_NO_COLOR));
  1524. }
  1525. }
  1526. static struct worker *alloc_worker(void)
  1527. {
  1528. struct worker *worker;
  1529. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  1530. if (worker) {
  1531. INIT_LIST_HEAD(&worker->entry);
  1532. INIT_LIST_HEAD(&worker->scheduled);
  1533. INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
  1534. /* on creation a worker is in !idle && prep state */
  1535. worker->flags = WORKER_PREP;
  1536. }
  1537. return worker;
  1538. }
  1539. /**
  1540. * create_worker - create a new workqueue worker
  1541. * @pool: pool the new worker will belong to
  1542. *
  1543. * Create a new worker which is bound to @pool. The returned worker
  1544. * can be started by calling start_worker() or destroyed using
  1545. * destroy_worker().
  1546. *
  1547. * CONTEXT:
  1548. * Might sleep. Does GFP_KERNEL allocations.
  1549. *
  1550. * RETURNS:
  1551. * Pointer to the newly created worker.
  1552. */
  1553. static struct worker *create_worker(struct worker_pool *pool)
  1554. {
  1555. struct global_cwq *gcwq = pool->gcwq;
  1556. const char *pri = worker_pool_pri(pool) ? "H" : "";
  1557. struct worker *worker = NULL;
  1558. int id = -1;
  1559. spin_lock_irq(&gcwq->lock);
  1560. while (ida_get_new(&pool->worker_ida, &id)) {
  1561. spin_unlock_irq(&gcwq->lock);
  1562. if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
  1563. goto fail;
  1564. spin_lock_irq(&gcwq->lock);
  1565. }
  1566. spin_unlock_irq(&gcwq->lock);
  1567. worker = alloc_worker();
  1568. if (!worker)
  1569. goto fail;
  1570. worker->pool = pool;
  1571. worker->id = id;
  1572. if (gcwq->cpu != WORK_CPU_UNBOUND)
  1573. worker->task = kthread_create_on_node(worker_thread,
  1574. worker, cpu_to_node(gcwq->cpu),
  1575. "kworker/%u:%d%s", gcwq->cpu, id, pri);
  1576. else
  1577. worker->task = kthread_create(worker_thread, worker,
  1578. "kworker/u:%d%s", id, pri);
  1579. if (IS_ERR(worker->task))
  1580. goto fail;
  1581. if (worker_pool_pri(pool))
  1582. set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
  1583. /*
  1584. * Determine CPU binding of the new worker depending on
  1585. * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
  1586. * flag remains stable across this function. See the comments
  1587. * above the flag definition for details.
  1588. *
  1589. * As an unbound worker may later become a regular one if CPU comes
  1590. * online, make sure every worker has %PF_THREAD_BOUND set.
  1591. */
  1592. if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
  1593. kthread_bind(worker->task, gcwq->cpu);
  1594. } else {
  1595. worker->task->flags |= PF_THREAD_BOUND;
  1596. worker->flags |= WORKER_UNBOUND;
  1597. }
  1598. return worker;
  1599. fail:
  1600. if (id >= 0) {
  1601. spin_lock_irq(&gcwq->lock);
  1602. ida_remove(&pool->worker_ida, id);
  1603. spin_unlock_irq(&gcwq->lock);
  1604. }
  1605. kfree(worker);
  1606. return NULL;
  1607. }
  1608. /**
  1609. * start_worker - start a newly created worker
  1610. * @worker: worker to start
  1611. *
  1612. * Make the gcwq aware of @worker and start it.
  1613. *
  1614. * CONTEXT:
  1615. * spin_lock_irq(gcwq->lock).
  1616. */
  1617. static void start_worker(struct worker *worker)
  1618. {
  1619. worker->flags |= WORKER_STARTED;
  1620. worker->pool->nr_workers++;
  1621. worker_enter_idle(worker);
  1622. wake_up_process(worker->task);
  1623. }
  1624. /**
  1625. * destroy_worker - destroy a workqueue worker
  1626. * @worker: worker to be destroyed
  1627. *
  1628. * Destroy @worker and adjust @gcwq stats accordingly.
  1629. *
  1630. * CONTEXT:
  1631. * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  1632. */
  1633. static void destroy_worker(struct worker *worker)
  1634. {
  1635. struct worker_pool *pool = worker->pool;
  1636. struct global_cwq *gcwq = pool->gcwq;
  1637. int id = worker->id;
  1638. /* sanity check frenzy */
  1639. BUG_ON(worker->current_work);
  1640. BUG_ON(!list_empty(&worker->scheduled));
  1641. if (worker->flags & WORKER_STARTED)
  1642. pool->nr_workers--;
  1643. if (worker->flags & WORKER_IDLE)
  1644. pool->nr_idle--;
  1645. list_del_init(&worker->entry);
  1646. worker->flags |= WORKER_DIE;
  1647. spin_unlock_irq(&gcwq->lock);
  1648. kthread_stop(worker->task);
  1649. kfree(worker);
  1650. spin_lock_irq(&gcwq->lock);
  1651. ida_remove(&pool->worker_ida, id);
  1652. }
  1653. static void idle_worker_timeout(unsigned long __pool)
  1654. {
  1655. struct worker_pool *pool = (void *)__pool;
  1656. struct global_cwq *gcwq = pool->gcwq;
  1657. spin_lock_irq(&gcwq->lock);
  1658. if (too_many_workers(pool)) {
  1659. struct worker *worker;
  1660. unsigned long expires;
  1661. /* idle_list is kept in LIFO order, check the last one */
  1662. worker = list_entry(pool->idle_list.prev, struct worker, entry);
  1663. expires = worker->last_active + IDLE_WORKER_TIMEOUT;
  1664. if (time_before(jiffies, expires))
  1665. mod_timer(&pool->idle_timer, expires);
  1666. else {
  1667. /* it's been idle for too long, wake up manager */
  1668. pool->flags |= POOL_MANAGE_WORKERS;
  1669. wake_up_worker(pool);
  1670. }
  1671. }
  1672. spin_unlock_irq(&gcwq->lock);
  1673. }
  1674. static bool send_mayday(struct work_struct *work)
  1675. {
  1676. struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  1677. struct workqueue_struct *wq = cwq->wq;
  1678. unsigned int cpu;
  1679. if (!(wq->flags & WQ_RESCUER))
  1680. return false;
  1681. /* mayday mayday mayday */
  1682. cpu = cwq->pool->gcwq->cpu;
  1683. /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
  1684. if (cpu == WORK_CPU_UNBOUND)
  1685. cpu = 0;
  1686. if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
  1687. wake_up_process(wq->rescuer->task);
  1688. return true;
  1689. }
  1690. static void gcwq_mayday_timeout(unsigned long __pool)
  1691. {
  1692. struct worker_pool *pool = (void *)__pool;
  1693. struct global_cwq *gcwq = pool->gcwq;
  1694. struct work_struct *work;
  1695. spin_lock_irq(&gcwq->lock);
  1696. if (need_to_create_worker(pool)) {
  1697. /*
  1698. * We've been trying to create a new worker but
  1699. * haven't been successful. We might be hitting an
  1700. * allocation deadlock. Send distress signals to
  1701. * rescuers.
  1702. */
  1703. list_for_each_entry(work, &pool->worklist, entry)
  1704. send_mayday(work);
  1705. }
  1706. spin_unlock_irq(&gcwq->lock);
  1707. mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
  1708. }
  1709. /**
  1710. * maybe_create_worker - create a new worker if necessary
  1711. * @pool: pool to create a new worker for
  1712. *
  1713. * Create a new worker for @pool if necessary. @pool is guaranteed to
  1714. * have at least one idle worker on return from this function. If
  1715. * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
  1716. * sent to all rescuers with works scheduled on @pool to resolve
  1717. * possible allocation deadlock.
  1718. *
  1719. * On return, need_to_create_worker() is guaranteed to be false and
  1720. * may_start_working() true.
  1721. *
  1722. * LOCKING:
  1723. * spin_lock_irq(gcwq->lock) which may be released and regrabbed
  1724. * multiple times. Does GFP_KERNEL allocations. Called only from
  1725. * manager.
  1726. *
  1727. * RETURNS:
  1728. * false if no action was taken and gcwq->lock stayed locked, true
  1729. * otherwise.
  1730. */
  1731. static bool maybe_create_worker(struct worker_pool *pool)
  1732. __releases(&gcwq->lock)
  1733. __acquires(&gcwq->lock)
  1734. {
  1735. struct global_cwq *gcwq = pool->gcwq;
  1736. if (!need_to_create_worker(pool))
  1737. return false;
  1738. restart:
  1739. spin_unlock_irq(&gcwq->lock);
  1740. /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
  1741. mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
  1742. while (true) {
  1743. struct worker *worker;
  1744. worker = create_worker(pool);
  1745. if (worker) {
  1746. del_timer_sync(&pool->mayday_timer);
  1747. spin_lock_irq(&gcwq->lock);
  1748. start_worker(worker);
  1749. BUG_ON(need_to_create_worker(pool));
  1750. return true;
  1751. }
  1752. if (!need_to_create_worker(pool))
  1753. break;
  1754. __set_current_state(TASK_INTERRUPTIBLE);
  1755. schedule_timeout(CREATE_COOLDOWN);
  1756. if (!need_to_create_worker(pool))
  1757. break;
  1758. }
  1759. del_timer_sync(&pool->mayday_timer);
  1760. spin_lock_irq(&gcwq->lock);
  1761. if (need_to_create_worker(pool))
  1762. goto restart;
  1763. return true;
  1764. }
  1765. /**
  1766. * maybe_destroy_worker - destroy workers which have been idle for a while
  1767. * @pool: pool to destroy workers for
  1768. *
  1769. * Destroy @pool workers which have been idle for longer than
  1770. * IDLE_WORKER_TIMEOUT.
  1771. *
  1772. * LOCKING:
  1773. * spin_lock_irq(gcwq->lock) which may be released and regrabbed
  1774. * multiple times. Called only from manager.
  1775. *
  1776. * RETURNS:
  1777. * false if no action was taken and gcwq->lock stayed locked, true
  1778. * otherwise.
  1779. */
  1780. static bool maybe_destroy_workers(struct worker_pool *pool)
  1781. {
  1782. bool ret = false;
  1783. while (too_many_workers(pool)) {
  1784. struct worker *worker;
  1785. unsigned long expires;
  1786. worker = list_entry(pool->idle_list.prev, struct worker, entry);
  1787. expires = worker->last_active + IDLE_WORKER_TIMEOUT;
  1788. if (time_before(jiffies, expires)) {
  1789. mod_timer(&pool->idle_timer, expires);
  1790. break;
  1791. }
  1792. destroy_worker(worker);
  1793. ret = true;
  1794. }
  1795. return ret;
  1796. }
  1797. /**
  1798. * manage_workers - manage worker pool
  1799. * @worker: self
  1800. *
  1801. * Assume the manager role and manage gcwq worker pool @worker belongs
  1802. * to. At any given time, there can be only zero or one manager per
  1803. * gcwq. The exclusion is handled automatically by this function.
  1804. *
  1805. * The caller can safely start processing works on false return. On
  1806. * true return, it's guaranteed that need_to_create_worker() is false
  1807. * and may_start_working() is true.
  1808. *
  1809. * CONTEXT:
  1810. * spin_lock_irq(gcwq->lock) which may be released and regrabbed
  1811. * multiple times. Does GFP_KERNEL allocations.
  1812. *
  1813. * RETURNS:
  1814. * false if no action was taken and gcwq->lock stayed locked, true if
  1815. * some action was taken.
  1816. */
  1817. static bool manage_workers(struct worker *worker)
  1818. {
  1819. struct worker_pool *pool = worker->pool;
  1820. bool ret = false;
  1821. if (pool->flags & POOL_MANAGING_WORKERS)
  1822. return ret;
  1823. pool->flags |= POOL_MANAGING_WORKERS;
  1824. /*
  1825. * To simplify both worker management and CPU hotplug, hold off
  1826. * management while hotplug is in progress. CPU hotplug path can't
  1827. * grab %POOL_MANAGING_WORKERS to achieve this because that can
  1828. * lead to idle worker depletion (all become busy thinking someone
  1829. * else is managing) which in turn can result in deadlock under
  1830. * extreme circumstances. Use @pool->assoc_mutex to synchronize
  1831. * manager against CPU hotplug.
  1832. *
  1833. * assoc_mutex would always be free unless CPU hotplug is in
  1834. * progress. trylock first without dropping @gcwq->lock.
  1835. */
  1836. if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
  1837. spin_unlock_irq(&pool->gcwq->lock);
  1838. mutex_lock(&pool->assoc_mutex);
  1839. /*
  1840. * CPU hotplug could have happened while we were waiting
  1841. * for assoc_mutex. Hotplug itself can't handle us
  1842. * because manager isn't either on idle or busy list, and
  1843. * @gcwq's state and ours could have deviated.
  1844. *
  1845. * As hotplug is now excluded via assoc_mutex, we can
  1846. * simply try to bind. It will succeed or fail depending
  1847. * on @gcwq's current state. Try it and adjust
  1848. * %WORKER_UNBOUND accordingly.
  1849. */
  1850. if (worker_maybe_bind_and_lock(worker))
  1851. worker->flags &= ~WORKER_UNBOUND;
  1852. else
  1853. worker->flags |= WORKER_UNBOUND;
  1854. ret = true;
  1855. }
  1856. pool->flags &= ~POOL_MANAGE_WORKERS;
  1857. /*
  1858. * Destroy and then create so that may_start_working() is true
  1859. * on return.
  1860. */
  1861. ret |= maybe_destroy_workers(pool);
  1862. ret |= maybe_create_worker(pool);
  1863. pool->flags &= ~POOL_MANAGING_WORKERS;
  1864. mutex_unlock(&pool->assoc_mutex);
  1865. return ret;
  1866. }
  1867. /**
  1868. * process_one_work - process single work
  1869. * @worker: self
  1870. * @work: work to process
  1871. *
  1872. * Process @work. This function contains all the logics necessary to
  1873. * process a single work including synchronization against and
  1874. * interaction with other workers on the same cpu, queueing and
  1875. * flushing. As long as context requirement is met, any worker can
  1876. * call this function to process a work.
  1877. *
  1878. * CONTEXT:
  1879. * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  1880. */
  1881. static void process_one_work(struct worker *worker, struct work_struct *work)
  1882. __releases(&gcwq->lock)
  1883. __acquires(&gcwq->lock)
  1884. {
  1885. struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  1886. struct worker_pool *pool = worker->pool;
  1887. struct global_cwq *gcwq = pool->gcwq;
  1888. struct hlist_head *bwh = busy_worker_head(gcwq, work);
  1889. bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
  1890. work_func_t f = work->func;
  1891. int work_color;
  1892. struct worker *collision;
  1893. #ifdef CONFIG_LOCKDEP
  1894. /*
  1895. * It is permissible to free the struct work_struct from
  1896. * inside the function that is called from it, this we need to
  1897. * take into account for lockdep too. To avoid bogus "held
  1898. * lock freed" warnings as well as problems when looking into
  1899. * work->lockdep_map, make a copy and use that here.
  1900. */
  1901. struct lockdep_map lockdep_map;
  1902. lockdep_copy_map(&lockdep_map, &work->lockdep_map);
  1903. #endif
  1904. /*
  1905. * Ensure we're on the correct CPU. DISASSOCIATED test is
  1906. * necessary to avoid spurious warnings from rescuers servicing the
  1907. * unbound or a disassociated gcwq.
  1908. */
  1909. WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
  1910. !(gcwq->flags & GCWQ_DISASSOCIATED) &&
  1911. raw_smp_processor_id() != gcwq->cpu);
  1912. /*
  1913. * A single work shouldn't be executed concurrently by
  1914. * multiple workers on a single cpu. Check whether anyone is
  1915. * already processing the work. If so, defer the work to the
  1916. * currently executing one.
  1917. */
  1918. collision = __find_worker_executing_work(gcwq, bwh, work);
  1919. if (unlikely(collision)) {
  1920. move_linked_works(work, &collision->scheduled, NULL);
  1921. return;
  1922. }
  1923. /* claim and dequeue */
  1924. debug_work_deactivate(work);
  1925. hlist_add_head(&worker->hentry, bwh);
  1926. worker->current_work = work;
  1927. worker->current_cwq = cwq;
  1928. work_color = get_work_color(work);
  1929. list_del_init(&work->entry);
  1930. /*
  1931. * CPU intensive works don't participate in concurrency
  1932. * management. They're the scheduler's responsibility.
  1933. */
  1934. if (unlikely(cpu_intensive))
  1935. worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
  1936. /*
  1937. * Unbound gcwq isn't concurrency managed and work items should be
  1938. * executed ASAP. Wake up another worker if necessary.
  1939. */
  1940. if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
  1941. wake_up_worker(pool);
  1942. /*
  1943. * Record the last CPU and clear PENDING which should be the last
  1944. * update to @work. Also, do this inside @gcwq->lock so that
  1945. * PENDING and queued state changes happen together while IRQ is
  1946. * disabled.
  1947. */
  1948. set_work_cpu_and_clear_pending(work, gcwq->cpu);
  1949. spin_unlock_irq(&gcwq->lock);
  1950. lock_map_acquire_read(&cwq->wq->lockdep_map);
  1951. lock_map_acquire(&lockdep_map);
  1952. trace_workqueue_execute_start(work);
  1953. f(work);
  1954. /*
  1955. * While we must be careful to not use "work" after this, the trace
  1956. * point will only record its address.
  1957. */
  1958. trace_workqueue_execute_end(work);
  1959. lock_map_release(&lockdep_map);
  1960. lock_map_release(&cwq->wq->lockdep_map);
  1961. if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
  1962. pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
  1963. " last function: %pf\n",
  1964. current->comm, preempt_count(), task_pid_nr(current), f);
  1965. debug_show_held_locks(current);
  1966. dump_stack();
  1967. }
  1968. spin_lock_irq(&gcwq->lock);
  1969. /* clear cpu intensive status */
  1970. if (unlikely(cpu_intensive))
  1971. worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
  1972. /* we're done with it, release */
  1973. hlist_del_init(&worker->hentry);
  1974. worker->current_work = NULL;
  1975. worker->current_cwq = NULL;
  1976. cwq_dec_nr_in_flight(cwq, work_color);
  1977. }
  1978. /**
  1979. * process_scheduled_works - process scheduled works
  1980. * @worker: self
  1981. *
  1982. * Process all scheduled works. Please note that the scheduled list
  1983. * may change while processing a work, so this function repeatedly
  1984. * fetches a work from the top and executes it.
  1985. *
  1986. * CONTEXT:
  1987. * spin_lock_irq(gcwq->lock) which may be released and regrabbed
  1988. * multiple times.
  1989. */
  1990. static void process_scheduled_works(struct worker *worker)
  1991. {
  1992. while (!list_empty(&worker->scheduled)) {
  1993. struct work_struct *work = list_first_entry(&worker->scheduled,
  1994. struct work_struct, entry);
  1995. process_one_work(worker, work);
  1996. }
  1997. }
  1998. /**
  1999. * worker_thread - the worker thread function
  2000. * @__worker: self
  2001. *
  2002. * The gcwq worker thread function. There's a single dynamic pool of
  2003. * these per each cpu. These workers process all works regardless of
  2004. * their specific target workqueue. The only exception is works which
  2005. * belong to workqueues with a rescuer which will be explained in
  2006. * rescuer_thread().
  2007. */
  2008. static int worker_thread(void *__worker)
  2009. {
  2010. struct worker *worker = __worker;
  2011. struct worker_pool *pool = worker->pool;
  2012. struct global_cwq *gcwq = pool->gcwq;
  2013. /* tell the scheduler that this is a workqueue worker */
  2014. worker->task->flags |= PF_WQ_WORKER;
  2015. woke_up:
  2016. spin_lock_irq(&gcwq->lock);
  2017. /* we are off idle list if destruction or rebind is requested */
  2018. if (unlikely(list_empty(&worker->entry))) {
  2019. spin_unlock_irq(&gcwq->lock);
  2020. /* if DIE is set, destruction is requested */
  2021. if (worker->flags & WORKER_DIE) {
  2022. worker->task->flags &= ~PF_WQ_WORKER;
  2023. return 0;
  2024. }
  2025. /* otherwise, rebind */
  2026. idle_worker_rebind(worker);
  2027. goto woke_up;
  2028. }
  2029. worker_leave_idle(worker);
  2030. recheck:
  2031. /* no more worker necessary? */
  2032. if (!need_more_worker(pool))
  2033. goto sleep;
  2034. /* do we need to manage? */
  2035. if (unlikely(!may_start_working(pool)) && manage_workers(worker))
  2036. goto recheck;
  2037. /*
  2038. * ->scheduled list can only be filled while a worker is
  2039. * preparing to process a work or actually processing it.
  2040. * Make sure nobody diddled with it while I was sleeping.
  2041. */
  2042. BUG_ON(!list_empty(&worker->scheduled));
  2043. /*
  2044. * When control reaches this point, we're guaranteed to have
  2045. * at least one idle worker or that someone else has already
  2046. * assumed the manager role.
  2047. */
  2048. worker_clr_flags(worker, WORKER_PREP);
  2049. do {
  2050. struct work_struct *work =
  2051. list_first_entry(&pool->worklist,
  2052. struct work_struct, entry);
  2053. if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
  2054. /* optimization path, not strictly necessary */
  2055. process_one_work(worker, work);
  2056. if (unlikely(!list_empty(&worker->scheduled)))
  2057. process_scheduled_works(worker);
  2058. } else {
  2059. move_linked_works(work, &worker->scheduled, NULL);
  2060. process_scheduled_works(worker);
  2061. }
  2062. } while (keep_working(pool));
  2063. worker_set_flags(worker, WORKER_PREP, false);
  2064. sleep:
  2065. if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
  2066. goto recheck;
  2067. /*
  2068. * gcwq->lock is held and there's no work to process and no
  2069. * need to manage, sleep. Workers are woken up only while
  2070. * holding gcwq->lock or from local cpu, so setting the
  2071. * current state before releasing gcwq->lock is enough to
  2072. * prevent losing any event.
  2073. */
  2074. worker_enter_idle(worker);
  2075. __set_current_state(TASK_INTERRUPTIBLE);
  2076. spin_unlock_irq(&gcwq->lock);
  2077. schedule();
  2078. goto woke_up;
  2079. }
  2080. /**
  2081. * rescuer_thread - the rescuer thread function
  2082. * @__wq: the associated workqueue
  2083. *
  2084. * Workqueue rescuer thread function. There's one rescuer for each
  2085. * workqueue which has WQ_RESCUER set.
  2086. *
  2087. * Regular work processing on a gcwq may block trying to create a new
  2088. * worker which uses GFP_KERNEL allocation which has slight chance of
  2089. * developing into deadlock if some works currently on the same queue
  2090. * need to be processed to satisfy the GFP_KERNEL allocation. This is
  2091. * the problem rescuer solves.
  2092. *
  2093. * When such condition is possible, the gcwq summons rescuers of all
  2094. * workqueues which have works queued on the gcwq and let them process
  2095. * those works so that forward progress can be guaranteed.
  2096. *
  2097. * This should happen rarely.
  2098. */
  2099. static int rescuer_thread(void *__wq)
  2100. {
  2101. struct workqueue_struct *wq = __wq;
  2102. struct worker *rescuer = wq->rescuer;
  2103. struct list_head *scheduled = &rescuer->scheduled;
  2104. bool is_unbound = wq->flags & WQ_UNBOUND;
  2105. unsigned int cpu;
  2106. set_user_nice(current, RESCUER_NICE_LEVEL);
  2107. repeat:
  2108. set_current_state(TASK_INTERRUPTIBLE);
  2109. if (kthread_should_stop()) {
  2110. __set_current_state(TASK_RUNNING);
  2111. return 0;
  2112. }
  2113. /*
  2114. * See whether any cpu is asking for help. Unbounded
  2115. * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
  2116. */
  2117. for_each_mayday_cpu(cpu, wq->mayday_mask) {
  2118. unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
  2119. struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
  2120. struct worker_pool *pool = cwq->pool;
  2121. struct global_cwq *gcwq = pool->gcwq;
  2122. struct work_struct *work, *n;
  2123. __set_current_state(TASK_RUNNING);
  2124. mayday_clear_cpu(cpu, wq->mayday_mask);
  2125. /* migrate to the target cpu if possible */
  2126. rescuer->pool = pool;
  2127. worker_maybe_bind_and_lock(rescuer);
  2128. /*
  2129. * Slurp in all works issued via this workqueue and
  2130. * process'em.
  2131. */
  2132. BUG_ON(!list_empty(&rescuer->scheduled));
  2133. list_for_each_entry_safe(work, n, &pool->worklist, entry)
  2134. if (get_work_cwq(work) == cwq)
  2135. move_linked_works(work, scheduled, &n);
  2136. process_scheduled_works(rescuer);
  2137. /*
  2138. * Leave this gcwq. If keep_working() is %true, notify a
  2139. * regular worker; otherwise, we end up with 0 concurrency
  2140. * and stalling the execution.
  2141. */
  2142. if (keep_working(pool))
  2143. wake_up_worker(pool);
  2144. spin_unlock_irq(&gcwq->lock);
  2145. }
  2146. schedule();
  2147. goto repeat;
  2148. }
  2149. struct wq_barrier {
  2150. struct work_struct work;
  2151. struct completion done;
  2152. };
  2153. static void wq_barrier_func(struct work_struct *work)
  2154. {
  2155. struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
  2156. complete(&barr->done);
  2157. }
  2158. /**
  2159. * insert_wq_barrier - insert a barrier work
  2160. * @cwq: cwq to insert barrier into
  2161. * @barr: wq_barrier to insert
  2162. * @target: target work to attach @barr to
  2163. * @worker: worker currently executing @target, NULL if @target is not executing
  2164. *
  2165. * @barr is linked to @target such that @barr is completed only after
  2166. * @target finishes execution. Please note that the ordering
  2167. * guarantee is observed only with respect to @target and on the local
  2168. * cpu.
  2169. *
  2170. * Currently, a queued barrier can't be canceled. This is because
  2171. * try_to_grab_pending() can't determine whether the work to be
  2172. * grabbed is at the head of the queue and thus can't clear LINKED
  2173. * flag of the previous work while there must be a valid next work
  2174. * after a work with LINKED flag set.
  2175. *
  2176. * Note that when @worker is non-NULL, @target may be modified
  2177. * underneath us, so we can't reliably determine cwq from @target.
  2178. *
  2179. * CONTEXT:
  2180. * spin_lock_irq(gcwq->lock).
  2181. */
  2182. static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  2183. struct wq_barrier *barr,
  2184. struct work_struct *target, struct worker *worker)
  2185. {
  2186. struct list_head *head;
  2187. unsigned int linked = 0;
  2188. /*
  2189. * debugobject calls are safe here even with gcwq->lock locked
  2190. * as we know for sure that this will not trigger any of the
  2191. * checks and call back into the fixup functions where we
  2192. * might deadlock.
  2193. */
  2194. INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
  2195. __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
  2196. init_completion(&barr->done);
  2197. /*
  2198. * If @target is currently being executed, schedule the
  2199. * barrier to the worker; otherwise, put it after @target.
  2200. */
  2201. if (worker)
  2202. head = worker->scheduled.next;
  2203. else {
  2204. unsigned long *bits = work_data_bits(target);
  2205. head = target->entry.next;
  2206. /* there can already be other linked works, inherit and set */
  2207. linked = *bits & WORK_STRUCT_LINKED;
  2208. __set_bit(WORK_STRUCT_LINKED_BIT, bits);
  2209. }
  2210. debug_work_activate(&barr->work);
  2211. insert_work(cwq, &barr->work, head,
  2212. work_color_to_flags(WORK_NO_COLOR) | linked);
  2213. }
  2214. /**
  2215. * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
  2216. * @wq: workqueue being flushed
  2217. * @flush_color: new flush color, < 0 for no-op
  2218. * @work_color: new work color, < 0 for no-op
  2219. *
  2220. * Prepare cwqs for workqueue flushing.
  2221. *
  2222. * If @flush_color is non-negative, flush_color on all cwqs should be
  2223. * -1. If no cwq has in-flight commands at the specified color, all
  2224. * cwq->flush_color's stay at -1 and %false is returned. If any cwq
  2225. * has in flight commands, its cwq->flush_color is set to
  2226. * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
  2227. * wakeup logic is armed and %true is returned.
  2228. *
  2229. * The caller should have initialized @wq->first_flusher prior to
  2230. * calling this function with non-negative @flush_color. If
  2231. * @flush_color is negative, no flush color update is done and %false
  2232. * is returned.
  2233. *
  2234. * If @work_color is non-negative, all cwqs should have the same
  2235. * work_color which is previous to @work_color and all will be
  2236. * advanced to @work_color.
  2237. *
  2238. * CONTEXT:
  2239. * mutex_lock(wq->flush_mutex).
  2240. *
  2241. * RETURNS:
  2242. * %true if @flush_color >= 0 and there's something to flush. %false
  2243. * otherwise.
  2244. */
  2245. static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
  2246. int flush_color, int work_color)
  2247. {
  2248. bool wait = false;
  2249. unsigned int cpu;
  2250. if (flush_color >= 0) {
  2251. BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
  2252. atomic_set(&wq->nr_cwqs_to_flush, 1);
  2253. }
  2254. for_each_cwq_cpu(cpu, wq) {
  2255. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2256. struct global_cwq *gcwq = cwq->pool->gcwq;
  2257. spin_lock_irq(&gcwq->lock);
  2258. if (flush_color >= 0) {
  2259. BUG_ON(cwq->flush_color != -1);
  2260. if (cwq->nr_in_flight[flush_color]) {
  2261. cwq->flush_color = flush_color;
  2262. atomic_inc(&wq->nr_cwqs_to_flush);
  2263. wait = true;
  2264. }
  2265. }
  2266. if (work_color >= 0) {
  2267. BUG_ON(work_color != work_next_color(cwq->work_color));
  2268. cwq->work_color = work_color;
  2269. }
  2270. spin_unlock_irq(&gcwq->lock);
  2271. }
  2272. if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
  2273. complete(&wq->first_flusher->done);
  2274. return wait;
  2275. }
  2276. /**
  2277. * flush_workqueue - ensure that any scheduled work has run to completion.
  2278. * @wq: workqueue to flush
  2279. *
  2280. * Forces execution of the workqueue and blocks until its completion.
  2281. * This is typically used in driver shutdown handlers.
  2282. *
  2283. * We sleep until all works which were queued on entry have been handled,
  2284. * but we are not livelocked by new incoming ones.
  2285. */
  2286. void flush_workqueue(struct workqueue_struct *wq)
  2287. {
  2288. struct wq_flusher this_flusher = {
  2289. .list = LIST_HEAD_INIT(this_flusher.list),
  2290. .flush_color = -1,
  2291. .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
  2292. };
  2293. int next_color;
  2294. lock_map_acquire(&wq->lockdep_map);
  2295. lock_map_release(&wq->lockdep_map);
  2296. mutex_lock(&wq->flush_mutex);
  2297. /*
  2298. * Start-to-wait phase
  2299. */
  2300. next_color = work_next_color(wq->work_color);
  2301. if (next_color != wq->flush_color) {
  2302. /*
  2303. * Color space is not full. The current work_color
  2304. * becomes our flush_color and work_color is advanced
  2305. * by one.
  2306. */
  2307. BUG_ON(!list_empty(&wq->flusher_overflow));
  2308. this_flusher.flush_color = wq->work_color;
  2309. wq->work_color = next_color;
  2310. if (!wq->first_flusher) {
  2311. /* no flush in progress, become the first flusher */
  2312. BUG_ON(wq->flush_color != this_flusher.flush_color);
  2313. wq->first_flusher = &this_flusher;
  2314. if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
  2315. wq->work_color)) {
  2316. /* nothing to flush, done */
  2317. wq->flush_color = next_color;
  2318. wq->first_flusher = NULL;
  2319. goto out_unlock;
  2320. }
  2321. } else {
  2322. /* wait in queue */
  2323. BUG_ON(wq->flush_color == this_flusher.flush_color);
  2324. list_add_tail(&this_flusher.list, &wq->flusher_queue);
  2325. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  2326. }
  2327. } else {
  2328. /*
  2329. * Oops, color space is full, wait on overflow queue.
  2330. * The next flush completion will assign us
  2331. * flush_color and transfer to flusher_queue.
  2332. */
  2333. list_add_tail(&this_flusher.list, &wq->flusher_overflow);
  2334. }
  2335. mutex_unlock(&wq->flush_mutex);
  2336. wait_for_completion(&this_flusher.done);
  2337. /*
  2338. * Wake-up-and-cascade phase
  2339. *
  2340. * First flushers are responsible for cascading flushes and
  2341. * handling overflow. Non-first flushers can simply return.
  2342. */
  2343. if (wq->first_flusher != &this_flusher)
  2344. return;
  2345. mutex_lock(&wq->flush_mutex);
  2346. /* we might have raced, check again with mutex held */
  2347. if (wq->first_flusher != &this_flusher)
  2348. goto out_unlock;
  2349. wq->first_flusher = NULL;
  2350. BUG_ON(!list_empty(&this_flusher.list));
  2351. BUG_ON(wq->flush_color != this_flusher.flush_color);
  2352. while (true) {
  2353. struct wq_flusher *next, *tmp;
  2354. /* complete all the flushers sharing the current flush color */
  2355. list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
  2356. if (next->flush_color != wq->flush_color)
  2357. break;
  2358. list_del_init(&next->list);
  2359. complete(&next->done);
  2360. }
  2361. BUG_ON(!list_empty(&wq->flusher_overflow) &&
  2362. wq->flush_color != work_next_color(wq->work_color));
  2363. /* this flush_color is finished, advance by one */
  2364. wq->flush_color = work_next_color(wq->flush_color);
  2365. /* one color has been freed, handle overflow queue */
  2366. if (!list_empty(&wq->flusher_overflow)) {
  2367. /*
  2368. * Assign the same color to all overflowed
  2369. * flushers, advance work_color and append to
  2370. * flusher_queue. This is the start-to-wait
  2371. * phase for these overflowed flushers.
  2372. */
  2373. list_for_each_entry(tmp, &wq->flusher_overflow, list)
  2374. tmp->flush_color = wq->work_color;
  2375. wq->work_color = work_next_color(wq->work_color);
  2376. list_splice_tail_init(&wq->flusher_overflow,
  2377. &wq->flusher_queue);
  2378. flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  2379. }
  2380. if (list_empty(&wq->flusher_queue)) {
  2381. BUG_ON(wq->flush_color != wq->work_color);
  2382. break;
  2383. }
  2384. /*
  2385. * Need to flush more colors. Make the next flusher
  2386. * the new first flusher and arm cwqs.
  2387. */
  2388. BUG_ON(wq->flush_color == wq->work_color);
  2389. BUG_ON(wq->flush_color != next->flush_color);
  2390. list_del_init(&next->list);
  2391. wq->first_flusher = next;
  2392. if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
  2393. break;
  2394. /*
  2395. * Meh... this color is already done, clear first
  2396. * flusher and repeat cascading.
  2397. */
  2398. wq->first_flusher = NULL;
  2399. }
  2400. out_unlock:
  2401. mutex_unlock(&wq->flush_mutex);
  2402. }
  2403. EXPORT_SYMBOL_GPL(flush_workqueue);
  2404. /**
  2405. * drain_workqueue - drain a workqueue
  2406. * @wq: workqueue to drain
  2407. *
  2408. * Wait until the workqueue becomes empty. While draining is in progress,
  2409. * only chain queueing is allowed. IOW, only currently pending or running
  2410. * work items on @wq can queue further work items on it. @wq is flushed
  2411. * repeatedly until it becomes empty. The number of flushing is detemined
  2412. * by the depth of chaining and should be relatively short. Whine if it
  2413. * takes too long.
  2414. */
  2415. void drain_workqueue(struct workqueue_struct *wq)
  2416. {
  2417. unsigned int flush_cnt = 0;
  2418. unsigned int cpu;
  2419. /*
  2420. * __queue_work() needs to test whether there are drainers, is much
  2421. * hotter than drain_workqueue() and already looks at @wq->flags.
  2422. * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
  2423. */
  2424. spin_lock(&workqueue_lock);
  2425. if (!wq->nr_drainers++)
  2426. wq->flags |= WQ_DRAINING;
  2427. spin_unlock(&workqueue_lock);
  2428. reflush:
  2429. flush_workqueue(wq);
  2430. for_each_cwq_cpu(cpu, wq) {
  2431. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2432. bool drained;
  2433. spin_lock_irq(&cwq->pool->gcwq->lock);
  2434. drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
  2435. spin_unlock_irq(&cwq->pool->gcwq->lock);
  2436. if (drained)
  2437. continue;
  2438. if (++flush_cnt == 10 ||
  2439. (flush_cnt % 100 == 0 && flush_cnt <= 1000))
  2440. pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
  2441. wq->name, flush_cnt);
  2442. goto reflush;
  2443. }
  2444. spin_lock(&workqueue_lock);
  2445. if (!--wq->nr_drainers)
  2446. wq->flags &= ~WQ_DRAINING;
  2447. spin_unlock(&workqueue_lock);
  2448. }
  2449. EXPORT_SYMBOL_GPL(drain_workqueue);
  2450. static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
  2451. {
  2452. struct worker *worker = NULL;
  2453. struct global_cwq *gcwq;
  2454. struct cpu_workqueue_struct *cwq;
  2455. might_sleep();
  2456. gcwq = get_work_gcwq(work);
  2457. if (!gcwq)
  2458. return false;
  2459. spin_lock_irq(&gcwq->lock);
  2460. if (!list_empty(&work->entry)) {
  2461. /*
  2462. * See the comment near try_to_grab_pending()->smp_rmb().
  2463. * If it was re-queued to a different gcwq under us, we
  2464. * are not going to wait.
  2465. */
  2466. smp_rmb();
  2467. cwq = get_work_cwq(work);
  2468. if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
  2469. goto already_gone;
  2470. } else {
  2471. worker = find_worker_executing_work(gcwq, work);
  2472. if (!worker)
  2473. goto already_gone;
  2474. cwq = worker->current_cwq;
  2475. }
  2476. insert_wq_barrier(cwq, barr, work, worker);
  2477. spin_unlock_irq(&gcwq->lock);
  2478. /*
  2479. * If @max_active is 1 or rescuer is in use, flushing another work
  2480. * item on the same workqueue may lead to deadlock. Make sure the
  2481. * flusher is not running on the same workqueue by verifying write
  2482. * access.
  2483. */
  2484. if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
  2485. lock_map_acquire(&cwq->wq->lockdep_map);
  2486. else
  2487. lock_map_acquire_read(&cwq->wq->lockdep_map);
  2488. lock_map_release(&cwq->wq->lockdep_map);
  2489. return true;
  2490. already_gone:
  2491. spin_unlock_irq(&gcwq->lock);
  2492. return false;
  2493. }
  2494. /**
  2495. * flush_work - wait for a work to finish executing the last queueing instance
  2496. * @work: the work to flush
  2497. *
  2498. * Wait until @work has finished execution. @work is guaranteed to be idle
  2499. * on return if it hasn't been requeued since flush started.
  2500. *
  2501. * RETURNS:
  2502. * %true if flush_work() waited for the work to finish execution,
  2503. * %false if it was already idle.
  2504. */
  2505. bool flush_work(struct work_struct *work)
  2506. {
  2507. struct wq_barrier barr;
  2508. lock_map_acquire(&work->lockdep_map);
  2509. lock_map_release(&work->lockdep_map);
  2510. if (start_flush_work(work, &barr)) {
  2511. wait_for_completion(&barr.done);
  2512. destroy_work_on_stack(&barr.work);
  2513. return true;
  2514. } else {
  2515. return false;
  2516. }
  2517. }
  2518. EXPORT_SYMBOL_GPL(flush_work);
  2519. static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
  2520. {
  2521. unsigned long flags;
  2522. int ret;
  2523. do {
  2524. ret = try_to_grab_pending(work, is_dwork, &flags);
  2525. /*
  2526. * If someone else is canceling, wait for the same event it
  2527. * would be waiting for before retrying.
  2528. */
  2529. if (unlikely(ret == -ENOENT))
  2530. flush_work(work);
  2531. } while (unlikely(ret < 0));
  2532. /* tell other tasks trying to grab @work to back off */
  2533. mark_work_canceling(work);
  2534. local_irq_restore(flags);
  2535. flush_work(work);
  2536. clear_work_data(work);
  2537. return ret;
  2538. }
  2539. /**
  2540. * cancel_work_sync - cancel a work and wait for it to finish
  2541. * @work: the work to cancel
  2542. *
  2543. * Cancel @work and wait for its execution to finish. This function
  2544. * can be used even if the work re-queues itself or migrates to
  2545. * another workqueue. On return from this function, @work is
  2546. * guaranteed to be not pending or executing on any CPU.
  2547. *
  2548. * cancel_work_sync(&delayed_work->work) must not be used for
  2549. * delayed_work's. Use cancel_delayed_work_sync() instead.
  2550. *
  2551. * The caller must ensure that the workqueue on which @work was last
  2552. * queued can't be destroyed before this function returns.
  2553. *
  2554. * RETURNS:
  2555. * %true if @work was pending, %false otherwise.
  2556. */
  2557. bool cancel_work_sync(struct work_struct *work)
  2558. {
  2559. return __cancel_work_timer(work, false);
  2560. }
  2561. EXPORT_SYMBOL_GPL(cancel_work_sync);
  2562. /**
  2563. * flush_delayed_work - wait for a dwork to finish executing the last queueing
  2564. * @dwork: the delayed work to flush
  2565. *
  2566. * Delayed timer is cancelled and the pending work is queued for
  2567. * immediate execution. Like flush_work(), this function only
  2568. * considers the last queueing instance of @dwork.
  2569. *
  2570. * RETURNS:
  2571. * %true if flush_work() waited for the work to finish execution,
  2572. * %false if it was already idle.
  2573. */
  2574. bool flush_delayed_work(struct delayed_work *dwork)
  2575. {
  2576. local_irq_disable();
  2577. if (del_timer_sync(&dwork->timer))
  2578. __queue_work(dwork->cpu,
  2579. get_work_cwq(&dwork->work)->wq, &dwork->work);
  2580. local_irq_enable();
  2581. return flush_work(&dwork->work);
  2582. }
  2583. EXPORT_SYMBOL(flush_delayed_work);
  2584. /**
  2585. * cancel_delayed_work - cancel a delayed work
  2586. * @dwork: delayed_work to cancel
  2587. *
  2588. * Kill off a pending delayed_work. Returns %true if @dwork was pending
  2589. * and canceled; %false if wasn't pending. Note that the work callback
  2590. * function may still be running on return, unless it returns %true and the
  2591. * work doesn't re-arm itself. Explicitly flush or use
  2592. * cancel_delayed_work_sync() to wait on it.
  2593. *
  2594. * This function is safe to call from any context including IRQ handler.
  2595. */
  2596. bool cancel_delayed_work(struct delayed_work *dwork)
  2597. {
  2598. unsigned long flags;
  2599. int ret;
  2600. do {
  2601. ret = try_to_grab_pending(&dwork->work, true, &flags);
  2602. } while (unlikely(ret == -EAGAIN));
  2603. if (unlikely(ret < 0))
  2604. return false;
  2605. set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
  2606. local_irq_restore(flags);
  2607. return ret;
  2608. }
  2609. EXPORT_SYMBOL(cancel_delayed_work);
  2610. /**
  2611. * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
  2612. * @dwork: the delayed work cancel
  2613. *
  2614. * This is cancel_work_sync() for delayed works.
  2615. *
  2616. * RETURNS:
  2617. * %true if @dwork was pending, %false otherwise.
  2618. */
  2619. bool cancel_delayed_work_sync(struct delayed_work *dwork)
  2620. {
  2621. return __cancel_work_timer(&dwork->work, true);
  2622. }
  2623. EXPORT_SYMBOL(cancel_delayed_work_sync);
  2624. /**
  2625. * schedule_work_on - put work task on a specific cpu
  2626. * @cpu: cpu to put the work task on
  2627. * @work: job to be done
  2628. *
  2629. * This puts a job on a specific cpu
  2630. */
  2631. bool schedule_work_on(int cpu, struct work_struct *work)
  2632. {
  2633. return queue_work_on(cpu, system_wq, work);
  2634. }
  2635. EXPORT_SYMBOL(schedule_work_on);
  2636. /**
  2637. * schedule_work - put work task in global workqueue
  2638. * @work: job to be done
  2639. *
  2640. * Returns %false if @work was already on the kernel-global workqueue and
  2641. * %true otherwise.
  2642. *
  2643. * This puts a job in the kernel-global workqueue if it was not already
  2644. * queued and leaves it in the same position on the kernel-global
  2645. * workqueue otherwise.
  2646. */
  2647. bool schedule_work(struct work_struct *work)
  2648. {
  2649. return queue_work(system_wq, work);
  2650. }
  2651. EXPORT_SYMBOL(schedule_work);
  2652. /**
  2653. * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  2654. * @cpu: cpu to use
  2655. * @dwork: job to be done
  2656. * @delay: number of jiffies to wait
  2657. *
  2658. * After waiting for a given time this puts a job in the kernel-global
  2659. * workqueue on the specified CPU.
  2660. */
  2661. bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
  2662. unsigned long delay)
  2663. {
  2664. return queue_delayed_work_on(cpu, system_wq, dwork, delay);
  2665. }
  2666. EXPORT_SYMBOL(schedule_delayed_work_on);
  2667. /**
  2668. * schedule_delayed_work - put work task in global workqueue after delay
  2669. * @dwork: job to be done
  2670. * @delay: number of jiffies to wait or 0 for immediate execution
  2671. *
  2672. * After waiting for a given time this puts a job in the kernel-global
  2673. * workqueue.
  2674. */
  2675. bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
  2676. {
  2677. return queue_delayed_work(system_wq, dwork, delay);
  2678. }
  2679. EXPORT_SYMBOL(schedule_delayed_work);
  2680. /**
  2681. * schedule_on_each_cpu - execute a function synchronously on each online CPU
  2682. * @func: the function to call
  2683. *
  2684. * schedule_on_each_cpu() executes @func on each online CPU using the
  2685. * system workqueue and blocks until all CPUs have completed.
  2686. * schedule_on_each_cpu() is very slow.
  2687. *
  2688. * RETURNS:
  2689. * 0 on success, -errno on failure.
  2690. */
  2691. int schedule_on_each_cpu(work_func_t func)
  2692. {
  2693. int cpu;
  2694. struct work_struct __percpu *works;
  2695. works = alloc_percpu(struct work_struct);
  2696. if (!works)
  2697. return -ENOMEM;
  2698. get_online_cpus();
  2699. for_each_online_cpu(cpu) {
  2700. struct work_struct *work = per_cpu_ptr(works, cpu);
  2701. INIT_WORK(work, func);
  2702. schedule_work_on(cpu, work);
  2703. }
  2704. for_each_online_cpu(cpu)
  2705. flush_work(per_cpu_ptr(works, cpu));
  2706. put_online_cpus();
  2707. free_percpu(works);
  2708. return 0;
  2709. }
  2710. /**
  2711. * flush_scheduled_work - ensure that any scheduled work has run to completion.
  2712. *
  2713. * Forces execution of the kernel-global workqueue and blocks until its
  2714. * completion.
  2715. *
  2716. * Think twice before calling this function! It's very easy to get into
  2717. * trouble if you don't take great care. Either of the following situations
  2718. * will lead to deadlock:
  2719. *
  2720. * One of the work items currently on the workqueue needs to acquire
  2721. * a lock held by your code or its caller.
  2722. *
  2723. * Your code is running in the context of a work routine.
  2724. *
  2725. * They will be detected by lockdep when they occur, but the first might not
  2726. * occur very often. It depends on what work items are on the workqueue and
  2727. * what locks they need, which you have no control over.
  2728. *
  2729. * In most situations flushing the entire workqueue is overkill; you merely
  2730. * need to know that a particular work item isn't queued and isn't running.
  2731. * In such cases you should use cancel_delayed_work_sync() or
  2732. * cancel_work_sync() instead.
  2733. */
  2734. void flush_scheduled_work(void)
  2735. {
  2736. flush_workqueue(system_wq);
  2737. }
  2738. EXPORT_SYMBOL(flush_scheduled_work);
  2739. /**
  2740. * execute_in_process_context - reliably execute the routine with user context
  2741. * @fn: the function to execute
  2742. * @ew: guaranteed storage for the execute work structure (must
  2743. * be available when the work executes)
  2744. *
  2745. * Executes the function immediately if process context is available,
  2746. * otherwise schedules the function for delayed execution.
  2747. *
  2748. * Returns: 0 - function was executed
  2749. * 1 - function was scheduled for execution
  2750. */
  2751. int execute_in_process_context(work_func_t fn, struct execute_work *ew)
  2752. {
  2753. if (!in_interrupt()) {
  2754. fn(&ew->work);
  2755. return 0;
  2756. }
  2757. INIT_WORK(&ew->work, fn);
  2758. schedule_work(&ew->work);
  2759. return 1;
  2760. }
  2761. EXPORT_SYMBOL_GPL(execute_in_process_context);
  2762. int keventd_up(void)
  2763. {
  2764. return system_wq != NULL;
  2765. }
  2766. static int alloc_cwqs(struct workqueue_struct *wq)
  2767. {
  2768. /*
  2769. * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
  2770. * Make sure that the alignment isn't lower than that of
  2771. * unsigned long long.
  2772. */
  2773. const size_t size = sizeof(struct cpu_workqueue_struct);
  2774. const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
  2775. __alignof__(unsigned long long));
  2776. if (!(wq->flags & WQ_UNBOUND))
  2777. wq->cpu_wq.pcpu = __alloc_percpu(size, align);
  2778. else {
  2779. void *ptr;
  2780. /*
  2781. * Allocate enough room to align cwq and put an extra
  2782. * pointer at the end pointing back to the originally
  2783. * allocated pointer which will be used for free.
  2784. */
  2785. ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
  2786. if (ptr) {
  2787. wq->cpu_wq.single = PTR_ALIGN(ptr, align);
  2788. *(void **)(wq->cpu_wq.single + 1) = ptr;
  2789. }
  2790. }
  2791. /* just in case, make sure it's actually aligned */
  2792. BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
  2793. return wq->cpu_wq.v ? 0 : -ENOMEM;
  2794. }
  2795. static void free_cwqs(struct workqueue_struct *wq)
  2796. {
  2797. if (!(wq->flags & WQ_UNBOUND))
  2798. free_percpu(wq->cpu_wq.pcpu);
  2799. else if (wq->cpu_wq.single) {
  2800. /* the pointer to free is stored right after the cwq */
  2801. kfree(*(void **)(wq->cpu_wq.single + 1));
  2802. }
  2803. }
  2804. static int wq_clamp_max_active(int max_active, unsigned int flags,
  2805. const char *name)
  2806. {
  2807. int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
  2808. if (max_active < 1 || max_active > lim)
  2809. pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
  2810. max_active, name, 1, lim);
  2811. return clamp_val(max_active, 1, lim);
  2812. }
  2813. struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
  2814. unsigned int flags,
  2815. int max_active,
  2816. struct lock_class_key *key,
  2817. const char *lock_name, ...)
  2818. {
  2819. va_list args, args1;
  2820. struct workqueue_struct *wq;
  2821. unsigned int cpu;
  2822. size_t namelen;
  2823. /* determine namelen, allocate wq and format name */
  2824. va_start(args, lock_name);
  2825. va_copy(args1, args);
  2826. namelen = vsnprintf(NULL, 0, fmt, args) + 1;
  2827. wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
  2828. if (!wq)
  2829. goto err;
  2830. vsnprintf(wq->name, namelen, fmt, args1);
  2831. va_end(args);
  2832. va_end(args1);
  2833. /*
  2834. * Workqueues which may be used during memory reclaim should
  2835. * have a rescuer to guarantee forward progress.
  2836. */
  2837. if (flags & WQ_MEM_RECLAIM)
  2838. flags |= WQ_RESCUER;
  2839. max_active = max_active ?: WQ_DFL_ACTIVE;
  2840. max_active = wq_clamp_max_active(max_active, flags, wq->name);
  2841. /* init wq */
  2842. wq->flags = flags;
  2843. wq->saved_max_active = max_active;
  2844. mutex_init(&wq->flush_mutex);
  2845. atomic_set(&wq->nr_cwqs_to_flush, 0);
  2846. INIT_LIST_HEAD(&wq->flusher_queue);
  2847. INIT_LIST_HEAD(&wq->flusher_overflow);
  2848. lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
  2849. INIT_LIST_HEAD(&wq->list);
  2850. if (alloc_cwqs(wq) < 0)
  2851. goto err;
  2852. for_each_cwq_cpu(cpu, wq) {
  2853. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2854. struct global_cwq *gcwq = get_gcwq(cpu);
  2855. int pool_idx = (bool)(flags & WQ_HIGHPRI);
  2856. BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
  2857. cwq->pool = &gcwq->pools[pool_idx];
  2858. cwq->wq = wq;
  2859. cwq->flush_color = -1;
  2860. cwq->max_active = max_active;
  2861. INIT_LIST_HEAD(&cwq->delayed_works);
  2862. }
  2863. if (flags & WQ_RESCUER) {
  2864. struct worker *rescuer;
  2865. if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
  2866. goto err;
  2867. wq->rescuer = rescuer = alloc_worker();
  2868. if (!rescuer)
  2869. goto err;
  2870. rescuer->task = kthread_create(rescuer_thread, wq, "%s",
  2871. wq->name);
  2872. if (IS_ERR(rescuer->task))
  2873. goto err;
  2874. rescuer->task->flags |= PF_THREAD_BOUND;
  2875. wake_up_process(rescuer->task);
  2876. }
  2877. /*
  2878. * workqueue_lock protects global freeze state and workqueues
  2879. * list. Grab it, set max_active accordingly and add the new
  2880. * workqueue to workqueues list.
  2881. */
  2882. spin_lock(&workqueue_lock);
  2883. if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
  2884. for_each_cwq_cpu(cpu, wq)
  2885. get_cwq(cpu, wq)->max_active = 0;
  2886. list_add(&wq->list, &workqueues);
  2887. spin_unlock(&workqueue_lock);
  2888. return wq;
  2889. err:
  2890. if (wq) {
  2891. free_cwqs(wq);
  2892. free_mayday_mask(wq->mayday_mask);
  2893. kfree(wq->rescuer);
  2894. kfree(wq);
  2895. }
  2896. return NULL;
  2897. }
  2898. EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
  2899. /**
  2900. * destroy_workqueue - safely terminate a workqueue
  2901. * @wq: target workqueue
  2902. *
  2903. * Safely destroy a workqueue. All work currently pending will be done first.
  2904. */
  2905. void destroy_workqueue(struct workqueue_struct *wq)
  2906. {
  2907. unsigned int cpu;
  2908. /* drain it before proceeding with destruction */
  2909. drain_workqueue(wq);
  2910. /*
  2911. * wq list is used to freeze wq, remove from list after
  2912. * flushing is complete in case freeze races us.
  2913. */
  2914. spin_lock(&workqueue_lock);
  2915. list_del(&wq->list);
  2916. spin_unlock(&workqueue_lock);
  2917. /* sanity check */
  2918. for_each_cwq_cpu(cpu, wq) {
  2919. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2920. int i;
  2921. for (i = 0; i < WORK_NR_COLORS; i++)
  2922. BUG_ON(cwq->nr_in_flight[i]);
  2923. BUG_ON(cwq->nr_active);
  2924. BUG_ON(!list_empty(&cwq->delayed_works));
  2925. }
  2926. if (wq->flags & WQ_RESCUER) {
  2927. kthread_stop(wq->rescuer->task);
  2928. free_mayday_mask(wq->mayday_mask);
  2929. kfree(wq->rescuer);
  2930. }
  2931. free_cwqs(wq);
  2932. kfree(wq);
  2933. }
  2934. EXPORT_SYMBOL_GPL(destroy_workqueue);
  2935. /**
  2936. * cwq_set_max_active - adjust max_active of a cwq
  2937. * @cwq: target cpu_workqueue_struct
  2938. * @max_active: new max_active value.
  2939. *
  2940. * Set @cwq->max_active to @max_active and activate delayed works if
  2941. * increased.
  2942. *
  2943. * CONTEXT:
  2944. * spin_lock_irq(gcwq->lock).
  2945. */
  2946. static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
  2947. {
  2948. cwq->max_active = max_active;
  2949. while (!list_empty(&cwq->delayed_works) &&
  2950. cwq->nr_active < cwq->max_active)
  2951. cwq_activate_first_delayed(cwq);
  2952. }
  2953. /**
  2954. * workqueue_set_max_active - adjust max_active of a workqueue
  2955. * @wq: target workqueue
  2956. * @max_active: new max_active value.
  2957. *
  2958. * Set max_active of @wq to @max_active.
  2959. *
  2960. * CONTEXT:
  2961. * Don't call from IRQ context.
  2962. */
  2963. void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
  2964. {
  2965. unsigned int cpu;
  2966. max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
  2967. spin_lock(&workqueue_lock);
  2968. wq->saved_max_active = max_active;
  2969. for_each_cwq_cpu(cpu, wq) {
  2970. struct global_cwq *gcwq = get_gcwq(cpu);
  2971. spin_lock_irq(&gcwq->lock);
  2972. if (!(wq->flags & WQ_FREEZABLE) ||
  2973. !(gcwq->flags & GCWQ_FREEZING))
  2974. cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
  2975. spin_unlock_irq(&gcwq->lock);
  2976. }
  2977. spin_unlock(&workqueue_lock);
  2978. }
  2979. EXPORT_SYMBOL_GPL(workqueue_set_max_active);
  2980. /**
  2981. * workqueue_congested - test whether a workqueue is congested
  2982. * @cpu: CPU in question
  2983. * @wq: target workqueue
  2984. *
  2985. * Test whether @wq's cpu workqueue for @cpu is congested. There is
  2986. * no synchronization around this function and the test result is
  2987. * unreliable and only useful as advisory hints or for debugging.
  2988. *
  2989. * RETURNS:
  2990. * %true if congested, %false otherwise.
  2991. */
  2992. bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
  2993. {
  2994. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2995. return !list_empty(&cwq->delayed_works);
  2996. }
  2997. EXPORT_SYMBOL_GPL(workqueue_congested);
  2998. /**
  2999. * work_cpu - return the last known associated cpu for @work
  3000. * @work: the work of interest
  3001. *
  3002. * RETURNS:
  3003. * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
  3004. */
  3005. unsigned int work_cpu(struct work_struct *work)
  3006. {
  3007. struct global_cwq *gcwq = get_work_gcwq(work);
  3008. return gcwq ? gcwq->cpu : WORK_CPU_NONE;
  3009. }
  3010. EXPORT_SYMBOL_GPL(work_cpu);
  3011. /**
  3012. * work_busy - test whether a work is currently pending or running
  3013. * @work: the work to be tested
  3014. *
  3015. * Test whether @work is currently pending or running. There is no
  3016. * synchronization around this function and the test result is
  3017. * unreliable and only useful as advisory hints or for debugging.
  3018. * Especially for reentrant wqs, the pending state might hide the
  3019. * running state.
  3020. *
  3021. * RETURNS:
  3022. * OR'd bitmask of WORK_BUSY_* bits.
  3023. */
  3024. unsigned int work_busy(struct work_struct *work)
  3025. {
  3026. struct global_cwq *gcwq = get_work_gcwq(work);
  3027. unsigned long flags;
  3028. unsigned int ret = 0;
  3029. if (!gcwq)
  3030. return 0;
  3031. spin_lock_irqsave(&gcwq->lock, flags);
  3032. if (work_pending(work))
  3033. ret |= WORK_BUSY_PENDING;
  3034. if (find_worker_executing_work(gcwq, work))
  3035. ret |= WORK_BUSY_RUNNING;
  3036. spin_unlock_irqrestore(&gcwq->lock, flags);
  3037. return ret;
  3038. }
  3039. EXPORT_SYMBOL_GPL(work_busy);
  3040. /*
  3041. * CPU hotplug.
  3042. *
  3043. * There are two challenges in supporting CPU hotplug. Firstly, there
  3044. * are a lot of assumptions on strong associations among work, cwq and
  3045. * gcwq which make migrating pending and scheduled works very
  3046. * difficult to implement without impacting hot paths. Secondly,
  3047. * gcwqs serve mix of short, long and very long running works making
  3048. * blocked draining impractical.
  3049. *
  3050. * This is solved by allowing a gcwq to be disassociated from the CPU
  3051. * running as an unbound one and allowing it to be reattached later if the
  3052. * cpu comes back online.
  3053. */
  3054. /* claim manager positions of all pools */
  3055. static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
  3056. {
  3057. struct worker_pool *pool;
  3058. for_each_worker_pool(pool, gcwq)
  3059. mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
  3060. spin_lock_irq(&gcwq->lock);
  3061. }
  3062. /* release manager positions */
  3063. static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
  3064. {
  3065. struct worker_pool *pool;
  3066. spin_unlock_irq(&gcwq->lock);
  3067. for_each_worker_pool(pool, gcwq)
  3068. mutex_unlock(&pool->assoc_mutex);
  3069. }
  3070. static void gcwq_unbind_fn(struct work_struct *work)
  3071. {
  3072. struct global_cwq *gcwq = get_gcwq(smp_processor_id());
  3073. struct worker_pool *pool;
  3074. struct worker *worker;
  3075. struct hlist_node *pos;
  3076. int i;
  3077. BUG_ON(gcwq->cpu != smp_processor_id());
  3078. gcwq_claim_assoc_and_lock(gcwq);
  3079. /*
  3080. * We've claimed all manager positions. Make all workers unbound
  3081. * and set DISASSOCIATED. Before this, all workers except for the
  3082. * ones which are still executing works from before the last CPU
  3083. * down must be on the cpu. After this, they may become diasporas.
  3084. */
  3085. for_each_worker_pool(pool, gcwq)
  3086. list_for_each_entry(worker, &pool->idle_list, entry)
  3087. worker->flags |= WORKER_UNBOUND;
  3088. for_each_busy_worker(worker, i, pos, gcwq)
  3089. worker->flags |= WORKER_UNBOUND;
  3090. gcwq->flags |= GCWQ_DISASSOCIATED;
  3091. gcwq_release_assoc_and_unlock(gcwq);
  3092. /*
  3093. * Call schedule() so that we cross rq->lock and thus can guarantee
  3094. * sched callbacks see the %WORKER_UNBOUND flag. This is necessary
  3095. * as scheduler callbacks may be invoked from other cpus.
  3096. */
  3097. schedule();
  3098. /*
  3099. * Sched callbacks are disabled now. Zap nr_running. After this,
  3100. * nr_running stays zero and need_more_worker() and keep_working()
  3101. * are always true as long as the worklist is not empty. @gcwq now
  3102. * behaves as unbound (in terms of concurrency management) gcwq
  3103. * which is served by workers tied to the CPU.
  3104. *
  3105. * On return from this function, the current worker would trigger
  3106. * unbound chain execution of pending work items if other workers
  3107. * didn't already.
  3108. */
  3109. for_each_worker_pool(pool, gcwq)
  3110. atomic_set(get_pool_nr_running(pool), 0);
  3111. }
  3112. /*
  3113. * Workqueues should be brought up before normal priority CPU notifiers.
  3114. * This will be registered high priority CPU notifier.
  3115. */
  3116. static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
  3117. unsigned long action,
  3118. void *hcpu)
  3119. {
  3120. unsigned int cpu = (unsigned long)hcpu;
  3121. struct global_cwq *gcwq = get_gcwq(cpu);
  3122. struct worker_pool *pool;
  3123. switch (action & ~CPU_TASKS_FROZEN) {
  3124. case CPU_UP_PREPARE:
  3125. for_each_worker_pool(pool, gcwq) {
  3126. struct worker *worker;
  3127. if (pool->nr_workers)
  3128. continue;
  3129. worker = create_worker(pool);
  3130. if (!worker)
  3131. return NOTIFY_BAD;
  3132. spin_lock_irq(&gcwq->lock);
  3133. start_worker(worker);
  3134. spin_unlock_irq(&gcwq->lock);
  3135. }
  3136. break;
  3137. case CPU_DOWN_FAILED:
  3138. case CPU_ONLINE:
  3139. gcwq_claim_assoc_and_lock(gcwq);
  3140. gcwq->flags &= ~GCWQ_DISASSOCIATED;
  3141. rebind_workers(gcwq);
  3142. gcwq_release_assoc_and_unlock(gcwq);
  3143. break;
  3144. }
  3145. return NOTIFY_OK;
  3146. }
  3147. /*
  3148. * Workqueues should be brought down after normal priority CPU notifiers.
  3149. * This will be registered as low priority CPU notifier.
  3150. */
  3151. static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
  3152. unsigned long action,
  3153. void *hcpu)
  3154. {
  3155. unsigned int cpu = (unsigned long)hcpu;
  3156. struct work_struct unbind_work;
  3157. switch (action & ~CPU_TASKS_FROZEN) {
  3158. case CPU_DOWN_PREPARE:
  3159. /* unbinding should happen on the local CPU */
  3160. INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
  3161. queue_work_on(cpu, system_highpri_wq, &unbind_work);
  3162. flush_work(&unbind_work);
  3163. break;
  3164. }
  3165. return NOTIFY_OK;
  3166. }
  3167. #ifdef CONFIG_SMP
  3168. struct work_for_cpu {
  3169. struct work_struct work;
  3170. long (*fn)(void *);
  3171. void *arg;
  3172. long ret;
  3173. };
  3174. static void work_for_cpu_fn(struct work_struct *work)
  3175. {
  3176. struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
  3177. wfc->ret = wfc->fn(wfc->arg);
  3178. }
  3179. /**
  3180. * work_on_cpu - run a function in user context on a particular cpu
  3181. * @cpu: the cpu to run on
  3182. * @fn: the function to run
  3183. * @arg: the function arg
  3184. *
  3185. * This will return the value @fn returns.
  3186. * It is up to the caller to ensure that the cpu doesn't go offline.
  3187. * The caller must not hold any locks which would prevent @fn from completing.
  3188. */
  3189. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  3190. {
  3191. struct work_for_cpu wfc = { .fn = fn, .arg = arg };
  3192. INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
  3193. schedule_work_on(cpu, &wfc.work);
  3194. flush_work(&wfc.work);
  3195. return wfc.ret;
  3196. }
  3197. EXPORT_SYMBOL_GPL(work_on_cpu);
  3198. #endif /* CONFIG_SMP */
  3199. #ifdef CONFIG_FREEZER
  3200. /**
  3201. * freeze_workqueues_begin - begin freezing workqueues
  3202. *
  3203. * Start freezing workqueues. After this function returns, all freezable
  3204. * workqueues will queue new works to their frozen_works list instead of
  3205. * gcwq->worklist.
  3206. *
  3207. * CONTEXT:
  3208. * Grabs and releases workqueue_lock and gcwq->lock's.
  3209. */
  3210. void freeze_workqueues_begin(void)
  3211. {
  3212. unsigned int cpu;
  3213. spin_lock(&workqueue_lock);
  3214. BUG_ON(workqueue_freezing);
  3215. workqueue_freezing = true;
  3216. for_each_gcwq_cpu(cpu) {
  3217. struct global_cwq *gcwq = get_gcwq(cpu);
  3218. struct workqueue_struct *wq;
  3219. spin_lock_irq(&gcwq->lock);
  3220. BUG_ON(gcwq->flags & GCWQ_FREEZING);
  3221. gcwq->flags |= GCWQ_FREEZING;
  3222. list_for_each_entry(wq, &workqueues, list) {
  3223. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3224. if (cwq && wq->flags & WQ_FREEZABLE)
  3225. cwq->max_active = 0;
  3226. }
  3227. spin_unlock_irq(&gcwq->lock);
  3228. }
  3229. spin_unlock(&workqueue_lock);
  3230. }
  3231. /**
  3232. * freeze_workqueues_busy - are freezable workqueues still busy?
  3233. *
  3234. * Check whether freezing is complete. This function must be called
  3235. * between freeze_workqueues_begin() and thaw_workqueues().
  3236. *
  3237. * CONTEXT:
  3238. * Grabs and releases workqueue_lock.
  3239. *
  3240. * RETURNS:
  3241. * %true if some freezable workqueues are still busy. %false if freezing
  3242. * is complete.
  3243. */
  3244. bool freeze_workqueues_busy(void)
  3245. {
  3246. unsigned int cpu;
  3247. bool busy = false;
  3248. spin_lock(&workqueue_lock);
  3249. BUG_ON(!workqueue_freezing);
  3250. for_each_gcwq_cpu(cpu) {
  3251. struct workqueue_struct *wq;
  3252. /*
  3253. * nr_active is monotonically decreasing. It's safe
  3254. * to peek without lock.
  3255. */
  3256. list_for_each_entry(wq, &workqueues, list) {
  3257. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3258. if (!cwq || !(wq->flags & WQ_FREEZABLE))
  3259. continue;
  3260. BUG_ON(cwq->nr_active < 0);
  3261. if (cwq->nr_active) {
  3262. busy = true;
  3263. goto out_unlock;
  3264. }
  3265. }
  3266. }
  3267. out_unlock:
  3268. spin_unlock(&workqueue_lock);
  3269. return busy;
  3270. }
  3271. /**
  3272. * thaw_workqueues - thaw workqueues
  3273. *
  3274. * Thaw workqueues. Normal queueing is restored and all collected
  3275. * frozen works are transferred to their respective gcwq worklists.
  3276. *
  3277. * CONTEXT:
  3278. * Grabs and releases workqueue_lock and gcwq->lock's.
  3279. */
  3280. void thaw_workqueues(void)
  3281. {
  3282. unsigned int cpu;
  3283. spin_lock(&workqueue_lock);
  3284. if (!workqueue_freezing)
  3285. goto out_unlock;
  3286. for_each_gcwq_cpu(cpu) {
  3287. struct global_cwq *gcwq = get_gcwq(cpu);
  3288. struct worker_pool *pool;
  3289. struct workqueue_struct *wq;
  3290. spin_lock_irq(&gcwq->lock);
  3291. BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
  3292. gcwq->flags &= ~GCWQ_FREEZING;
  3293. list_for_each_entry(wq, &workqueues, list) {
  3294. struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3295. if (!cwq || !(wq->flags & WQ_FREEZABLE))
  3296. continue;
  3297. /* restore max_active and repopulate worklist */
  3298. cwq_set_max_active(cwq, wq->saved_max_active);
  3299. }
  3300. for_each_worker_pool(pool, gcwq)
  3301. wake_up_worker(pool);
  3302. spin_unlock_irq(&gcwq->lock);
  3303. }
  3304. workqueue_freezing = false;
  3305. out_unlock:
  3306. spin_unlock(&workqueue_lock);
  3307. }
  3308. #endif /* CONFIG_FREEZER */
  3309. static int __init init_workqueues(void)
  3310. {
  3311. unsigned int cpu;
  3312. int i;
  3313. /* make sure we have enough bits for OFFQ CPU number */
  3314. BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
  3315. WORK_CPU_LAST);
  3316. cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
  3317. hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
  3318. /* initialize gcwqs */
  3319. for_each_gcwq_cpu(cpu) {
  3320. struct global_cwq *gcwq = get_gcwq(cpu);
  3321. struct worker_pool *pool;
  3322. spin_lock_init(&gcwq->lock);
  3323. gcwq->cpu = cpu;
  3324. gcwq->flags |= GCWQ_DISASSOCIATED;
  3325. for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
  3326. INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
  3327. for_each_worker_pool(pool, gcwq) {
  3328. pool->gcwq = gcwq;
  3329. INIT_LIST_HEAD(&pool->worklist);
  3330. INIT_LIST_HEAD(&pool->idle_list);
  3331. init_timer_deferrable(&pool->idle_timer);
  3332. pool->idle_timer.function = idle_worker_timeout;
  3333. pool->idle_timer.data = (unsigned long)pool;
  3334. setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
  3335. (unsigned long)pool);
  3336. mutex_init(&pool->assoc_mutex);
  3337. ida_init(&pool->worker_ida);
  3338. }
  3339. }
  3340. /* create the initial worker */
  3341. for_each_online_gcwq_cpu(cpu) {
  3342. struct global_cwq *gcwq = get_gcwq(cpu);
  3343. struct worker_pool *pool;
  3344. if (cpu != WORK_CPU_UNBOUND)
  3345. gcwq->flags &= ~GCWQ_DISASSOCIATED;
  3346. for_each_worker_pool(pool, gcwq) {
  3347. struct worker *worker;
  3348. worker = create_worker(pool);
  3349. BUG_ON(!worker);
  3350. spin_lock_irq(&gcwq->lock);
  3351. start_worker(worker);
  3352. spin_unlock_irq(&gcwq->lock);
  3353. }
  3354. }
  3355. system_wq = alloc_workqueue("events", 0, 0);
  3356. system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
  3357. system_long_wq = alloc_workqueue("events_long", 0, 0);
  3358. system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
  3359. WQ_UNBOUND_MAX_ACTIVE);
  3360. system_freezable_wq = alloc_workqueue("events_freezable",
  3361. WQ_FREEZABLE, 0);
  3362. BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
  3363. !system_unbound_wq || !system_freezable_wq);
  3364. return 0;
  3365. }
  3366. early_initcall(init_workqueues);