rcutree.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23. *
  24. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26. *
  27. * For detailed explanation of Read-Copy Update mechanism see -
  28. * Documentation/RCU
  29. */
  30. #include <linux/types.h>
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/smp.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/sched.h>
  38. #include <linux/nmi.h>
  39. #include <linux/atomic.h>
  40. #include <linux/bitops.h>
  41. #include <linux/export.h>
  42. #include <linux/completion.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/percpu.h>
  45. #include <linux/notifier.h>
  46. #include <linux/cpu.h>
  47. #include <linux/mutex.h>
  48. #include <linux/time.h>
  49. #include <linux/kernel_stat.h>
  50. #include <linux/wait.h>
  51. #include <linux/kthread.h>
  52. #include <linux/prefetch.h>
  53. #include <linux/delay.h>
  54. #include <linux/stop_machine.h>
  55. #include "rcutree.h"
  56. #include <trace/events/rcu.h>
  57. #include "rcu.h"
  58. /* Data structures. */
  59. static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
  60. #define RCU_STATE_INITIALIZER(structname) { \
  61. .level = { &structname##_state.node[0] }, \
  62. .levelcnt = { \
  63. NUM_RCU_LVL_0, /* root of hierarchy. */ \
  64. NUM_RCU_LVL_1, \
  65. NUM_RCU_LVL_2, \
  66. NUM_RCU_LVL_3, \
  67. NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
  68. }, \
  69. .fqs_state = RCU_GP_IDLE, \
  70. .gpnum = -300, \
  71. .completed = -300, \
  72. .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
  73. .orphan_nxttail = &structname##_state.orphan_nxtlist, \
  74. .orphan_donetail = &structname##_state.orphan_donelist, \
  75. .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
  76. .n_force_qs = 0, \
  77. .n_force_qs_ngp = 0, \
  78. .name = #structname, \
  79. }
  80. struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
  81. DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
  82. struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
  83. DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
  84. static struct rcu_state *rcu_state;
  85. /*
  86. * The rcu_scheduler_active variable transitions from zero to one just
  87. * before the first task is spawned. So when this variable is zero, RCU
  88. * can assume that there is but one task, allowing RCU to (for example)
  89. * optimized synchronize_sched() to a simple barrier(). When this variable
  90. * is one, RCU must actually do all the hard work required to detect real
  91. * grace periods. This variable is also used to suppress boot-time false
  92. * positives from lockdep-RCU error checking.
  93. */
  94. int rcu_scheduler_active __read_mostly;
  95. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  96. /*
  97. * The rcu_scheduler_fully_active variable transitions from zero to one
  98. * during the early_initcall() processing, which is after the scheduler
  99. * is capable of creating new tasks. So RCU processing (for example,
  100. * creating tasks for RCU priority boosting) must be delayed until after
  101. * rcu_scheduler_fully_active transitions from zero to one. We also
  102. * currently delay invocation of any RCU callbacks until after this point.
  103. *
  104. * It might later prove better for people registering RCU callbacks during
  105. * early boot to take responsibility for these callbacks, but one step at
  106. * a time.
  107. */
  108. static int rcu_scheduler_fully_active __read_mostly;
  109. #ifdef CONFIG_RCU_BOOST
  110. /*
  111. * Control variables for per-CPU and per-rcu_node kthreads. These
  112. * handle all flavors of RCU.
  113. */
  114. static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
  115. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  116. DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
  117. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  118. DEFINE_PER_CPU(char, rcu_cpu_has_work);
  119. #endif /* #ifdef CONFIG_RCU_BOOST */
  120. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
  121. static void invoke_rcu_core(void);
  122. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
  123. /*
  124. * Track the rcutorture test sequence number and the update version
  125. * number within a given test. The rcutorture_testseq is incremented
  126. * on every rcutorture module load and unload, so has an odd value
  127. * when a test is running. The rcutorture_vernum is set to zero
  128. * when rcutorture starts and is incremented on each rcutorture update.
  129. * These variables enable correlating rcutorture output with the
  130. * RCU tracing information.
  131. */
  132. unsigned long rcutorture_testseq;
  133. unsigned long rcutorture_vernum;
  134. /* State information for rcu_barrier() and friends. */
  135. static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  136. static atomic_t rcu_barrier_cpu_count;
  137. static DEFINE_MUTEX(rcu_barrier_mutex);
  138. static struct completion rcu_barrier_completion;
  139. /*
  140. * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
  141. * permit this function to be invoked without holding the root rcu_node
  142. * structure's ->lock, but of course results can be subject to change.
  143. */
  144. static int rcu_gp_in_progress(struct rcu_state *rsp)
  145. {
  146. return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
  147. }
  148. /*
  149. * Note a quiescent state. Because we do not need to know
  150. * how many quiescent states passed, just if there was at least
  151. * one since the start of the grace period, this just sets a flag.
  152. * The caller must have disabled preemption.
  153. */
  154. void rcu_sched_qs(int cpu)
  155. {
  156. struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
  157. rdp->passed_quiesce_gpnum = rdp->gpnum;
  158. barrier();
  159. if (rdp->passed_quiesce == 0)
  160. trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
  161. rdp->passed_quiesce = 1;
  162. }
  163. void rcu_bh_qs(int cpu)
  164. {
  165. struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
  166. rdp->passed_quiesce_gpnum = rdp->gpnum;
  167. barrier();
  168. if (rdp->passed_quiesce == 0)
  169. trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
  170. rdp->passed_quiesce = 1;
  171. }
  172. /*
  173. * Note a context switch. This is a quiescent state for RCU-sched,
  174. * and requires special handling for preemptible RCU.
  175. * The caller must have disabled preemption.
  176. */
  177. void rcu_note_context_switch(int cpu)
  178. {
  179. trace_rcu_utilization("Start context switch");
  180. rcu_sched_qs(cpu);
  181. trace_rcu_utilization("End context switch");
  182. }
  183. EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  184. DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  185. .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
  186. .dynticks = ATOMIC_INIT(1),
  187. };
  188. static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
  189. static int qhimark = 10000; /* If this many pending, ignore blimit. */
  190. static int qlowmark = 100; /* Once only this many pending, use blimit. */
  191. module_param(blimit, int, 0);
  192. module_param(qhimark, int, 0);
  193. module_param(qlowmark, int, 0);
  194. int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
  195. int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
  196. module_param(rcu_cpu_stall_suppress, int, 0644);
  197. module_param(rcu_cpu_stall_timeout, int, 0644);
  198. static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
  199. static int rcu_pending(int cpu);
  200. /*
  201. * Return the number of RCU-sched batches processed thus far for debug & stats.
  202. */
  203. long rcu_batches_completed_sched(void)
  204. {
  205. return rcu_sched_state.completed;
  206. }
  207. EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  208. /*
  209. * Return the number of RCU BH batches processed thus far for debug & stats.
  210. */
  211. long rcu_batches_completed_bh(void)
  212. {
  213. return rcu_bh_state.completed;
  214. }
  215. EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  216. /*
  217. * Force a quiescent state for RCU BH.
  218. */
  219. void rcu_bh_force_quiescent_state(void)
  220. {
  221. force_quiescent_state(&rcu_bh_state, 0);
  222. }
  223. EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  224. /*
  225. * Record the number of times rcutorture tests have been initiated and
  226. * terminated. This information allows the debugfs tracing stats to be
  227. * correlated to the rcutorture messages, even when the rcutorture module
  228. * is being repeatedly loaded and unloaded. In other words, we cannot
  229. * store this state in rcutorture itself.
  230. */
  231. void rcutorture_record_test_transition(void)
  232. {
  233. rcutorture_testseq++;
  234. rcutorture_vernum = 0;
  235. }
  236. EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  237. /*
  238. * Record the number of writer passes through the current rcutorture test.
  239. * This is also used to correlate debugfs tracing stats with the rcutorture
  240. * messages.
  241. */
  242. void rcutorture_record_progress(unsigned long vernum)
  243. {
  244. rcutorture_vernum++;
  245. }
  246. EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  247. /*
  248. * Force a quiescent state for RCU-sched.
  249. */
  250. void rcu_sched_force_quiescent_state(void)
  251. {
  252. force_quiescent_state(&rcu_sched_state, 0);
  253. }
  254. EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  255. /*
  256. * Does the CPU have callbacks ready to be invoked?
  257. */
  258. static int
  259. cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  260. {
  261. return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
  262. }
  263. /*
  264. * Does the current CPU require a yet-as-unscheduled grace period?
  265. */
  266. static int
  267. cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  268. {
  269. return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
  270. }
  271. /*
  272. * Return the root node of the specified rcu_state structure.
  273. */
  274. static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  275. {
  276. return &rsp->node[0];
  277. }
  278. /*
  279. * If the specified CPU is offline, tell the caller that it is in
  280. * a quiescent state. Otherwise, whack it with a reschedule IPI.
  281. * Grace periods can end up waiting on an offline CPU when that
  282. * CPU is in the process of coming online -- it will be added to the
  283. * rcu_node bitmasks before it actually makes it online. The same thing
  284. * can happen while a CPU is in the process of coming online. Because this
  285. * race is quite rare, we check for it after detecting that the grace
  286. * period has been delayed rather than checking each and every CPU
  287. * each and every time we start a new grace period.
  288. */
  289. static int rcu_implicit_offline_qs(struct rcu_data *rdp)
  290. {
  291. /*
  292. * If the CPU is offline for more than a jiffy, it is in a quiescent
  293. * state. We can trust its state not to change because interrupts
  294. * are disabled. The reason for the jiffy's worth of slack is to
  295. * handle CPUs initializing on the way up and finding their way
  296. * to the idle loop on the way down.
  297. */
  298. if (cpu_is_offline(rdp->cpu) &&
  299. ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) {
  300. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
  301. rdp->offline_fqs++;
  302. return 1;
  303. }
  304. return 0;
  305. }
  306. /*
  307. * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
  308. *
  309. * If the new value of the ->dynticks_nesting counter now is zero,
  310. * we really have entered idle, and must do the appropriate accounting.
  311. * The caller must have disabled interrupts.
  312. */
  313. static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
  314. {
  315. trace_rcu_dyntick("Start", oldval, 0);
  316. if (!is_idle_task(current)) {
  317. struct task_struct *idle = idle_task(smp_processor_id());
  318. trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
  319. ftrace_dump(DUMP_ALL);
  320. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  321. current->pid, current->comm,
  322. idle->pid, idle->comm); /* must be idle task! */
  323. }
  324. rcu_prepare_for_idle(smp_processor_id());
  325. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  326. smp_mb__before_atomic_inc(); /* See above. */
  327. atomic_inc(&rdtp->dynticks);
  328. smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
  329. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  330. /*
  331. * The idle task is not permitted to enter the idle loop while
  332. * in an RCU read-side critical section.
  333. */
  334. rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
  335. "Illegal idle entry in RCU read-side critical section.");
  336. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
  337. "Illegal idle entry in RCU-bh read-side critical section.");
  338. rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
  339. "Illegal idle entry in RCU-sched read-side critical section.");
  340. }
  341. /**
  342. * rcu_idle_enter - inform RCU that current CPU is entering idle
  343. *
  344. * Enter idle mode, in other words, -leave- the mode in which RCU
  345. * read-side critical sections can occur. (Though RCU read-side
  346. * critical sections can occur in irq handlers in idle, a possibility
  347. * handled by irq_enter() and irq_exit().)
  348. *
  349. * We crowbar the ->dynticks_nesting field to zero to allow for
  350. * the possibility of usermode upcalls having messed up our count
  351. * of interrupt nesting level during the prior busy period.
  352. */
  353. void rcu_idle_enter(void)
  354. {
  355. unsigned long flags;
  356. long long oldval;
  357. struct rcu_dynticks *rdtp;
  358. local_irq_save(flags);
  359. rdtp = &__get_cpu_var(rcu_dynticks);
  360. oldval = rdtp->dynticks_nesting;
  361. WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
  362. if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
  363. rdtp->dynticks_nesting = 0;
  364. else
  365. rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
  366. rcu_idle_enter_common(rdtp, oldval);
  367. local_irq_restore(flags);
  368. }
  369. EXPORT_SYMBOL_GPL(rcu_idle_enter);
  370. /**
  371. * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
  372. *
  373. * Exit from an interrupt handler, which might possibly result in entering
  374. * idle mode, in other words, leaving the mode in which read-side critical
  375. * sections can occur.
  376. *
  377. * This code assumes that the idle loop never does anything that might
  378. * result in unbalanced calls to irq_enter() and irq_exit(). If your
  379. * architecture violates this assumption, RCU will give you what you
  380. * deserve, good and hard. But very infrequently and irreproducibly.
  381. *
  382. * Use things like work queues to work around this limitation.
  383. *
  384. * You have been warned.
  385. */
  386. void rcu_irq_exit(void)
  387. {
  388. unsigned long flags;
  389. long long oldval;
  390. struct rcu_dynticks *rdtp;
  391. local_irq_save(flags);
  392. rdtp = &__get_cpu_var(rcu_dynticks);
  393. oldval = rdtp->dynticks_nesting;
  394. rdtp->dynticks_nesting--;
  395. WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
  396. if (rdtp->dynticks_nesting)
  397. trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
  398. else
  399. rcu_idle_enter_common(rdtp, oldval);
  400. local_irq_restore(flags);
  401. }
  402. /*
  403. * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
  404. *
  405. * If the new value of the ->dynticks_nesting counter was previously zero,
  406. * we really have exited idle, and must do the appropriate accounting.
  407. * The caller must have disabled interrupts.
  408. */
  409. static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
  410. {
  411. smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
  412. atomic_inc(&rdtp->dynticks);
  413. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  414. smp_mb__after_atomic_inc(); /* See above. */
  415. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  416. rcu_cleanup_after_idle(smp_processor_id());
  417. trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
  418. if (!is_idle_task(current)) {
  419. struct task_struct *idle = idle_task(smp_processor_id());
  420. trace_rcu_dyntick("Error on exit: not idle task",
  421. oldval, rdtp->dynticks_nesting);
  422. ftrace_dump(DUMP_ALL);
  423. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  424. current->pid, current->comm,
  425. idle->pid, idle->comm); /* must be idle task! */
  426. }
  427. }
  428. /**
  429. * rcu_idle_exit - inform RCU that current CPU is leaving idle
  430. *
  431. * Exit idle mode, in other words, -enter- the mode in which RCU
  432. * read-side critical sections can occur.
  433. *
  434. * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
  435. * allow for the possibility of usermode upcalls messing up our count
  436. * of interrupt nesting level during the busy period that is just
  437. * now starting.
  438. */
  439. void rcu_idle_exit(void)
  440. {
  441. unsigned long flags;
  442. struct rcu_dynticks *rdtp;
  443. long long oldval;
  444. local_irq_save(flags);
  445. rdtp = &__get_cpu_var(rcu_dynticks);
  446. oldval = rdtp->dynticks_nesting;
  447. WARN_ON_ONCE(oldval < 0);
  448. if (oldval & DYNTICK_TASK_NEST_MASK)
  449. rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  450. else
  451. rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  452. rcu_idle_exit_common(rdtp, oldval);
  453. local_irq_restore(flags);
  454. }
  455. EXPORT_SYMBOL_GPL(rcu_idle_exit);
  456. /**
  457. * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
  458. *
  459. * Enter an interrupt handler, which might possibly result in exiting
  460. * idle mode, in other words, entering the mode in which read-side critical
  461. * sections can occur.
  462. *
  463. * Note that the Linux kernel is fully capable of entering an interrupt
  464. * handler that it never exits, for example when doing upcalls to
  465. * user mode! This code assumes that the idle loop never does upcalls to
  466. * user mode. If your architecture does do upcalls from the idle loop (or
  467. * does anything else that results in unbalanced calls to the irq_enter()
  468. * and irq_exit() functions), RCU will give you what you deserve, good
  469. * and hard. But very infrequently and irreproducibly.
  470. *
  471. * Use things like work queues to work around this limitation.
  472. *
  473. * You have been warned.
  474. */
  475. void rcu_irq_enter(void)
  476. {
  477. unsigned long flags;
  478. struct rcu_dynticks *rdtp;
  479. long long oldval;
  480. local_irq_save(flags);
  481. rdtp = &__get_cpu_var(rcu_dynticks);
  482. oldval = rdtp->dynticks_nesting;
  483. rdtp->dynticks_nesting++;
  484. WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
  485. if (oldval)
  486. trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
  487. else
  488. rcu_idle_exit_common(rdtp, oldval);
  489. local_irq_restore(flags);
  490. }
  491. /**
  492. * rcu_nmi_enter - inform RCU of entry to NMI context
  493. *
  494. * If the CPU was idle with dynamic ticks active, and there is no
  495. * irq handler running, this updates rdtp->dynticks_nmi to let the
  496. * RCU grace-period handling know that the CPU is active.
  497. */
  498. void rcu_nmi_enter(void)
  499. {
  500. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  501. if (rdtp->dynticks_nmi_nesting == 0 &&
  502. (atomic_read(&rdtp->dynticks) & 0x1))
  503. return;
  504. rdtp->dynticks_nmi_nesting++;
  505. smp_mb__before_atomic_inc(); /* Force delay from prior write. */
  506. atomic_inc(&rdtp->dynticks);
  507. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  508. smp_mb__after_atomic_inc(); /* See above. */
  509. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  510. }
  511. /**
  512. * rcu_nmi_exit - inform RCU of exit from NMI context
  513. *
  514. * If the CPU was idle with dynamic ticks active, and there is no
  515. * irq handler running, this updates rdtp->dynticks_nmi to let the
  516. * RCU grace-period handling know that the CPU is no longer active.
  517. */
  518. void rcu_nmi_exit(void)
  519. {
  520. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  521. if (rdtp->dynticks_nmi_nesting == 0 ||
  522. --rdtp->dynticks_nmi_nesting != 0)
  523. return;
  524. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  525. smp_mb__before_atomic_inc(); /* See above. */
  526. atomic_inc(&rdtp->dynticks);
  527. smp_mb__after_atomic_inc(); /* Force delay to next write. */
  528. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  529. }
  530. #ifdef CONFIG_PROVE_RCU
  531. /**
  532. * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
  533. *
  534. * If the current CPU is in its idle loop and is neither in an interrupt
  535. * or NMI handler, return true.
  536. */
  537. int rcu_is_cpu_idle(void)
  538. {
  539. int ret;
  540. preempt_disable();
  541. ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
  542. preempt_enable();
  543. return ret;
  544. }
  545. EXPORT_SYMBOL(rcu_is_cpu_idle);
  546. #ifdef CONFIG_HOTPLUG_CPU
  547. /*
  548. * Is the current CPU online? Disable preemption to avoid false positives
  549. * that could otherwise happen due to the current CPU number being sampled,
  550. * this task being preempted, its old CPU being taken offline, resuming
  551. * on some other CPU, then determining that its old CPU is now offline.
  552. * It is OK to use RCU on an offline processor during initial boot, hence
  553. * the check for rcu_scheduler_fully_active. Note also that it is OK
  554. * for a CPU coming online to use RCU for one jiffy prior to marking itself
  555. * online in the cpu_online_mask. Similarly, it is OK for a CPU going
  556. * offline to continue to use RCU for one jiffy after marking itself
  557. * offline in the cpu_online_mask. This leniency is necessary given the
  558. * non-atomic nature of the online and offline processing, for example,
  559. * the fact that a CPU enters the scheduler after completing the CPU_DYING
  560. * notifiers.
  561. *
  562. * This is also why RCU internally marks CPUs online during the
  563. * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
  564. *
  565. * Disable checking if in an NMI handler because we cannot safely report
  566. * errors from NMI handlers anyway.
  567. */
  568. bool rcu_lockdep_current_cpu_online(void)
  569. {
  570. struct rcu_data *rdp;
  571. struct rcu_node *rnp;
  572. bool ret;
  573. if (in_nmi())
  574. return 1;
  575. preempt_disable();
  576. rdp = &__get_cpu_var(rcu_sched_data);
  577. rnp = rdp->mynode;
  578. ret = (rdp->grpmask & rnp->qsmaskinit) ||
  579. !rcu_scheduler_fully_active;
  580. preempt_enable();
  581. return ret;
  582. }
  583. EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
  584. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  585. #endif /* #ifdef CONFIG_PROVE_RCU */
  586. /**
  587. * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
  588. *
  589. * If the current CPU is idle or running at a first-level (not nested)
  590. * interrupt from idle, return true. The caller must have at least
  591. * disabled preemption.
  592. */
  593. int rcu_is_cpu_rrupt_from_idle(void)
  594. {
  595. return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
  596. }
  597. /*
  598. * Snapshot the specified CPU's dynticks counter so that we can later
  599. * credit them with an implicit quiescent state. Return 1 if this CPU
  600. * is in dynticks idle mode, which is an extended quiescent state.
  601. */
  602. static int dyntick_save_progress_counter(struct rcu_data *rdp)
  603. {
  604. rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
  605. return (rdp->dynticks_snap & 0x1) == 0;
  606. }
  607. /*
  608. * Return true if the specified CPU has passed through a quiescent
  609. * state by virtue of being in or having passed through an dynticks
  610. * idle state since the last call to dyntick_save_progress_counter()
  611. * for this same CPU.
  612. */
  613. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  614. {
  615. unsigned int curr;
  616. unsigned int snap;
  617. curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
  618. snap = (unsigned int)rdp->dynticks_snap;
  619. /*
  620. * If the CPU passed through or entered a dynticks idle phase with
  621. * no active irq/NMI handlers, then we can safely pretend that the CPU
  622. * already acknowledged the request to pass through a quiescent
  623. * state. Either way, that CPU cannot possibly be in an RCU
  624. * read-side critical section that started before the beginning
  625. * of the current RCU grace period.
  626. */
  627. if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
  628. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
  629. rdp->dynticks_fqs++;
  630. return 1;
  631. }
  632. /* Go check for the CPU being offline. */
  633. return rcu_implicit_offline_qs(rdp);
  634. }
  635. static int jiffies_till_stall_check(void)
  636. {
  637. int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
  638. /*
  639. * Limit check must be consistent with the Kconfig limits
  640. * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  641. */
  642. if (till_stall_check < 3) {
  643. ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
  644. till_stall_check = 3;
  645. } else if (till_stall_check > 300) {
  646. ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
  647. till_stall_check = 300;
  648. }
  649. return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  650. }
  651. static void record_gp_stall_check_time(struct rcu_state *rsp)
  652. {
  653. rsp->gp_start = jiffies;
  654. rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
  655. }
  656. static void print_other_cpu_stall(struct rcu_state *rsp)
  657. {
  658. int cpu;
  659. long delta;
  660. unsigned long flags;
  661. int ndetected;
  662. struct rcu_node *rnp = rcu_get_root(rsp);
  663. /* Only let one CPU complain about others per time interval. */
  664. raw_spin_lock_irqsave(&rnp->lock, flags);
  665. delta = jiffies - rsp->jiffies_stall;
  666. if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
  667. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  668. return;
  669. }
  670. rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
  671. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  672. /*
  673. * OK, time to rat on our buddy...
  674. * See Documentation/RCU/stallwarn.txt for info on how to debug
  675. * RCU CPU stall warnings.
  676. */
  677. printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:",
  678. rsp->name);
  679. print_cpu_stall_info_begin();
  680. rcu_for_each_leaf_node(rsp, rnp) {
  681. raw_spin_lock_irqsave(&rnp->lock, flags);
  682. ndetected += rcu_print_task_stall(rnp);
  683. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  684. if (rnp->qsmask == 0)
  685. continue;
  686. for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  687. if (rnp->qsmask & (1UL << cpu)) {
  688. print_cpu_stall_info(rsp, rnp->grplo + cpu);
  689. ndetected++;
  690. }
  691. }
  692. /*
  693. * Now rat on any tasks that got kicked up to the root rcu_node
  694. * due to CPU offlining.
  695. */
  696. rnp = rcu_get_root(rsp);
  697. raw_spin_lock_irqsave(&rnp->lock, flags);
  698. ndetected = rcu_print_task_stall(rnp);
  699. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  700. print_cpu_stall_info_end();
  701. printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n",
  702. smp_processor_id(), (long)(jiffies - rsp->gp_start));
  703. if (ndetected == 0)
  704. printk(KERN_ERR "INFO: Stall ended before state dump start\n");
  705. else if (!trigger_all_cpu_backtrace())
  706. dump_stack();
  707. /* If so configured, complain about tasks blocking the grace period. */
  708. rcu_print_detail_task_stall(rsp);
  709. force_quiescent_state(rsp, 0); /* Kick them all. */
  710. }
  711. static void print_cpu_stall(struct rcu_state *rsp)
  712. {
  713. unsigned long flags;
  714. struct rcu_node *rnp = rcu_get_root(rsp);
  715. /*
  716. * OK, time to rat on ourselves...
  717. * See Documentation/RCU/stallwarn.txt for info on how to debug
  718. * RCU CPU stall warnings.
  719. */
  720. printk(KERN_ERR "INFO: %s self-detected stall on CPU", rsp->name);
  721. print_cpu_stall_info_begin();
  722. print_cpu_stall_info(rsp, smp_processor_id());
  723. print_cpu_stall_info_end();
  724. printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start);
  725. if (!trigger_all_cpu_backtrace())
  726. dump_stack();
  727. raw_spin_lock_irqsave(&rnp->lock, flags);
  728. if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
  729. rsp->jiffies_stall = jiffies +
  730. 3 * jiffies_till_stall_check() + 3;
  731. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  732. set_need_resched(); /* kick ourselves to get things going. */
  733. }
  734. static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  735. {
  736. unsigned long j;
  737. unsigned long js;
  738. struct rcu_node *rnp;
  739. if (rcu_cpu_stall_suppress)
  740. return;
  741. j = ACCESS_ONCE(jiffies);
  742. js = ACCESS_ONCE(rsp->jiffies_stall);
  743. rnp = rdp->mynode;
  744. if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
  745. /* We haven't checked in, so go dump stack. */
  746. print_cpu_stall(rsp);
  747. } else if (rcu_gp_in_progress(rsp) &&
  748. ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
  749. /* They had a few time units to dump stack, so complain. */
  750. print_other_cpu_stall(rsp);
  751. }
  752. }
  753. static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  754. {
  755. rcu_cpu_stall_suppress = 1;
  756. return NOTIFY_DONE;
  757. }
  758. /**
  759. * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  760. *
  761. * Set the stall-warning timeout way off into the future, thus preventing
  762. * any RCU CPU stall-warning messages from appearing in the current set of
  763. * RCU grace periods.
  764. *
  765. * The caller must disable hard irqs.
  766. */
  767. void rcu_cpu_stall_reset(void)
  768. {
  769. rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  770. rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  771. rcu_preempt_stall_reset();
  772. }
  773. static struct notifier_block rcu_panic_block = {
  774. .notifier_call = rcu_panic,
  775. };
  776. static void __init check_cpu_stall_init(void)
  777. {
  778. atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  779. }
  780. /*
  781. * Update CPU-local rcu_data state to record the newly noticed grace period.
  782. * This is used both when we started the grace period and when we notice
  783. * that someone else started the grace period. The caller must hold the
  784. * ->lock of the leaf rcu_node structure corresponding to the current CPU,
  785. * and must have irqs disabled.
  786. */
  787. static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  788. {
  789. if (rdp->gpnum != rnp->gpnum) {
  790. /*
  791. * If the current grace period is waiting for this CPU,
  792. * set up to detect a quiescent state, otherwise don't
  793. * go looking for one.
  794. */
  795. rdp->gpnum = rnp->gpnum;
  796. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
  797. if (rnp->qsmask & rdp->grpmask) {
  798. rdp->qs_pending = 1;
  799. rdp->passed_quiesce = 0;
  800. } else
  801. rdp->qs_pending = 0;
  802. zero_cpu_stall_ticks(rdp);
  803. }
  804. }
  805. static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
  806. {
  807. unsigned long flags;
  808. struct rcu_node *rnp;
  809. local_irq_save(flags);
  810. rnp = rdp->mynode;
  811. if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
  812. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  813. local_irq_restore(flags);
  814. return;
  815. }
  816. __note_new_gpnum(rsp, rnp, rdp);
  817. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  818. }
  819. /*
  820. * Did someone else start a new RCU grace period start since we last
  821. * checked? Update local state appropriately if so. Must be called
  822. * on the CPU corresponding to rdp.
  823. */
  824. static int
  825. check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
  826. {
  827. unsigned long flags;
  828. int ret = 0;
  829. local_irq_save(flags);
  830. if (rdp->gpnum != rsp->gpnum) {
  831. note_new_gpnum(rsp, rdp);
  832. ret = 1;
  833. }
  834. local_irq_restore(flags);
  835. return ret;
  836. }
  837. /*
  838. * Advance this CPU's callbacks, but only if the current grace period
  839. * has ended. This may be called only from the CPU to whom the rdp
  840. * belongs. In addition, the corresponding leaf rcu_node structure's
  841. * ->lock must be held by the caller, with irqs disabled.
  842. */
  843. static void
  844. __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  845. {
  846. /* Did another grace period end? */
  847. if (rdp->completed != rnp->completed) {
  848. /* Advance callbacks. No harm if list empty. */
  849. rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
  850. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
  851. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  852. /* Remember that we saw this grace-period completion. */
  853. rdp->completed = rnp->completed;
  854. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
  855. /*
  856. * If we were in an extended quiescent state, we may have
  857. * missed some grace periods that others CPUs handled on
  858. * our behalf. Catch up with this state to avoid noting
  859. * spurious new grace periods. If another grace period
  860. * has started, then rnp->gpnum will have advanced, so
  861. * we will detect this later on.
  862. */
  863. if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
  864. rdp->gpnum = rdp->completed;
  865. /*
  866. * If RCU does not need a quiescent state from this CPU,
  867. * then make sure that this CPU doesn't go looking for one.
  868. */
  869. if ((rnp->qsmask & rdp->grpmask) == 0)
  870. rdp->qs_pending = 0;
  871. }
  872. }
  873. /*
  874. * Advance this CPU's callbacks, but only if the current grace period
  875. * has ended. This may be called only from the CPU to whom the rdp
  876. * belongs.
  877. */
  878. static void
  879. rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
  880. {
  881. unsigned long flags;
  882. struct rcu_node *rnp;
  883. local_irq_save(flags);
  884. rnp = rdp->mynode;
  885. if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
  886. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  887. local_irq_restore(flags);
  888. return;
  889. }
  890. __rcu_process_gp_end(rsp, rnp, rdp);
  891. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  892. }
  893. /*
  894. * Do per-CPU grace-period initialization for running CPU. The caller
  895. * must hold the lock of the leaf rcu_node structure corresponding to
  896. * this CPU.
  897. */
  898. static void
  899. rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  900. {
  901. /* Prior grace period ended, so advance callbacks for current CPU. */
  902. __rcu_process_gp_end(rsp, rnp, rdp);
  903. /*
  904. * Because this CPU just now started the new grace period, we know
  905. * that all of its callbacks will be covered by this upcoming grace
  906. * period, even the ones that were registered arbitrarily recently.
  907. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
  908. *
  909. * Other CPUs cannot be sure exactly when the grace period started.
  910. * Therefore, their recently registered callbacks must pass through
  911. * an additional RCU_NEXT_READY stage, so that they will be handled
  912. * by the next RCU grace period.
  913. */
  914. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  915. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  916. /* Set state so that this CPU will detect the next quiescent state. */
  917. __note_new_gpnum(rsp, rnp, rdp);
  918. }
  919. /*
  920. * Start a new RCU grace period if warranted, re-initializing the hierarchy
  921. * in preparation for detecting the next grace period. The caller must hold
  922. * the root node's ->lock, which is released before return. Hard irqs must
  923. * be disabled.
  924. *
  925. * Note that it is legal for a dying CPU (which is marked as offline) to
  926. * invoke this function. This can happen when the dying CPU reports its
  927. * quiescent state.
  928. */
  929. static void
  930. rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
  931. __releases(rcu_get_root(rsp)->lock)
  932. {
  933. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  934. struct rcu_node *rnp = rcu_get_root(rsp);
  935. if (!rcu_scheduler_fully_active ||
  936. !cpu_needs_another_gp(rsp, rdp)) {
  937. /*
  938. * Either the scheduler hasn't yet spawned the first
  939. * non-idle task or this CPU does not need another
  940. * grace period. Either way, don't start a new grace
  941. * period.
  942. */
  943. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  944. return;
  945. }
  946. if (rsp->fqs_active) {
  947. /*
  948. * This CPU needs a grace period, but force_quiescent_state()
  949. * is running. Tell it to start one on this CPU's behalf.
  950. */
  951. rsp->fqs_need_gp = 1;
  952. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  953. return;
  954. }
  955. /* Advance to a new grace period and initialize state. */
  956. rsp->gpnum++;
  957. trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
  958. WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
  959. rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
  960. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  961. record_gp_stall_check_time(rsp);
  962. raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
  963. /* Exclude any concurrent CPU-hotplug operations. */
  964. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  965. /*
  966. * Set the quiescent-state-needed bits in all the rcu_node
  967. * structures for all currently online CPUs in breadth-first
  968. * order, starting from the root rcu_node structure. This
  969. * operation relies on the layout of the hierarchy within the
  970. * rsp->node[] array. Note that other CPUs will access only
  971. * the leaves of the hierarchy, which still indicate that no
  972. * grace period is in progress, at least until the corresponding
  973. * leaf node has been initialized. In addition, we have excluded
  974. * CPU-hotplug operations.
  975. *
  976. * Note that the grace period cannot complete until we finish
  977. * the initialization process, as there will be at least one
  978. * qsmask bit set in the root node until that time, namely the
  979. * one corresponding to this CPU, due to the fact that we have
  980. * irqs disabled.
  981. */
  982. rcu_for_each_node_breadth_first(rsp, rnp) {
  983. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  984. rcu_preempt_check_blocked_tasks(rnp);
  985. rnp->qsmask = rnp->qsmaskinit;
  986. rnp->gpnum = rsp->gpnum;
  987. rnp->completed = rsp->completed;
  988. if (rnp == rdp->mynode)
  989. rcu_start_gp_per_cpu(rsp, rnp, rdp);
  990. rcu_preempt_boost_start_gp(rnp);
  991. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  992. rnp->level, rnp->grplo,
  993. rnp->grphi, rnp->qsmask);
  994. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  995. }
  996. rnp = rcu_get_root(rsp);
  997. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  998. rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
  999. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1000. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  1001. }
  1002. /*
  1003. * Report a full set of quiescent states to the specified rcu_state
  1004. * data structure. This involves cleaning up after the prior grace
  1005. * period and letting rcu_start_gp() start up the next grace period
  1006. * if one is needed. Note that the caller must hold rnp->lock, as
  1007. * required by rcu_start_gp(), which will release it.
  1008. */
  1009. static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  1010. __releases(rcu_get_root(rsp)->lock)
  1011. {
  1012. unsigned long gp_duration;
  1013. struct rcu_node *rnp = rcu_get_root(rsp);
  1014. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1015. WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
  1016. /*
  1017. * Ensure that all grace-period and pre-grace-period activity
  1018. * is seen before the assignment to rsp->completed.
  1019. */
  1020. smp_mb(); /* See above block comment. */
  1021. gp_duration = jiffies - rsp->gp_start;
  1022. if (gp_duration > rsp->gp_max)
  1023. rsp->gp_max = gp_duration;
  1024. /*
  1025. * We know the grace period is complete, but to everyone else
  1026. * it appears to still be ongoing. But it is also the case
  1027. * that to everyone else it looks like there is nothing that
  1028. * they can do to advance the grace period. It is therefore
  1029. * safe for us to drop the lock in order to mark the grace
  1030. * period as completed in all of the rcu_node structures.
  1031. *
  1032. * But if this CPU needs another grace period, it will take
  1033. * care of this while initializing the next grace period.
  1034. * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
  1035. * because the callbacks have not yet been advanced: Those
  1036. * callbacks are waiting on the grace period that just now
  1037. * completed.
  1038. */
  1039. if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
  1040. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1041. /*
  1042. * Propagate new ->completed value to rcu_node structures
  1043. * so that other CPUs don't have to wait until the start
  1044. * of the next grace period to process their callbacks.
  1045. */
  1046. rcu_for_each_node_breadth_first(rsp, rnp) {
  1047. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1048. rnp->completed = rsp->gpnum;
  1049. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1050. }
  1051. rnp = rcu_get_root(rsp);
  1052. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1053. }
  1054. rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
  1055. trace_rcu_grace_period(rsp->name, rsp->completed, "end");
  1056. rsp->fqs_state = RCU_GP_IDLE;
  1057. rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
  1058. }
  1059. /*
  1060. * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  1061. * Allows quiescent states for a group of CPUs to be reported at one go
  1062. * to the specified rcu_node structure, though all the CPUs in the group
  1063. * must be represented by the same rcu_node structure (which need not be
  1064. * a leaf rcu_node structure, though it often will be). That structure's
  1065. * lock must be held upon entry, and it is released before return.
  1066. */
  1067. static void
  1068. rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  1069. struct rcu_node *rnp, unsigned long flags)
  1070. __releases(rnp->lock)
  1071. {
  1072. struct rcu_node *rnp_c;
  1073. /* Walk up the rcu_node hierarchy. */
  1074. for (;;) {
  1075. if (!(rnp->qsmask & mask)) {
  1076. /* Our bit has already been cleared, so done. */
  1077. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1078. return;
  1079. }
  1080. rnp->qsmask &= ~mask;
  1081. trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  1082. mask, rnp->qsmask, rnp->level,
  1083. rnp->grplo, rnp->grphi,
  1084. !!rnp->gp_tasks);
  1085. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  1086. /* Other bits still set at this level, so done. */
  1087. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1088. return;
  1089. }
  1090. mask = rnp->grpmask;
  1091. if (rnp->parent == NULL) {
  1092. /* No more levels. Exit loop holding root lock. */
  1093. break;
  1094. }
  1095. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1096. rnp_c = rnp;
  1097. rnp = rnp->parent;
  1098. raw_spin_lock_irqsave(&rnp->lock, flags);
  1099. WARN_ON_ONCE(rnp_c->qsmask);
  1100. }
  1101. /*
  1102. * Get here if we are the last CPU to pass through a quiescent
  1103. * state for this grace period. Invoke rcu_report_qs_rsp()
  1104. * to clean up and start the next grace period if one is needed.
  1105. */
  1106. rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
  1107. }
  1108. /*
  1109. * Record a quiescent state for the specified CPU to that CPU's rcu_data
  1110. * structure. This must be either called from the specified CPU, or
  1111. * called when the specified CPU is known to be offline (and when it is
  1112. * also known that no other CPU is concurrently trying to help the offline
  1113. * CPU). The lastcomp argument is used to make sure we are still in the
  1114. * grace period of interest. We don't want to end the current grace period
  1115. * based on quiescent states detected in an earlier grace period!
  1116. */
  1117. static void
  1118. rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
  1119. {
  1120. unsigned long flags;
  1121. unsigned long mask;
  1122. struct rcu_node *rnp;
  1123. rnp = rdp->mynode;
  1124. raw_spin_lock_irqsave(&rnp->lock, flags);
  1125. if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
  1126. /*
  1127. * The grace period in which this quiescent state was
  1128. * recorded has ended, so don't report it upwards.
  1129. * We will instead need a new quiescent state that lies
  1130. * within the current grace period.
  1131. */
  1132. rdp->passed_quiesce = 0; /* need qs for new gp. */
  1133. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1134. return;
  1135. }
  1136. mask = rdp->grpmask;
  1137. if ((rnp->qsmask & mask) == 0) {
  1138. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1139. } else {
  1140. rdp->qs_pending = 0;
  1141. /*
  1142. * This GP can't end until cpu checks in, so all of our
  1143. * callbacks can be processed during the next GP.
  1144. */
  1145. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  1146. rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
  1147. }
  1148. }
  1149. /*
  1150. * Check to see if there is a new grace period of which this CPU
  1151. * is not yet aware, and if so, set up local rcu_data state for it.
  1152. * Otherwise, see if this CPU has just passed through its first
  1153. * quiescent state for this grace period, and record that fact if so.
  1154. */
  1155. static void
  1156. rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  1157. {
  1158. /* If there is now a new grace period, record and return. */
  1159. if (check_for_new_grace_period(rsp, rdp))
  1160. return;
  1161. /*
  1162. * Does this CPU still need to do its part for current grace period?
  1163. * If no, return and let the other CPUs do their part as well.
  1164. */
  1165. if (!rdp->qs_pending)
  1166. return;
  1167. /*
  1168. * Was there a quiescent state since the beginning of the grace
  1169. * period? If no, then exit and wait for the next call.
  1170. */
  1171. if (!rdp->passed_quiesce)
  1172. return;
  1173. /*
  1174. * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  1175. * judge of that).
  1176. */
  1177. rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
  1178. }
  1179. #ifdef CONFIG_HOTPLUG_CPU
  1180. /*
  1181. * Send the specified CPU's RCU callbacks to the orphanage. The
  1182. * specified CPU must be offline, and the caller must hold the
  1183. * ->onofflock.
  1184. */
  1185. static void
  1186. rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
  1187. struct rcu_node *rnp, struct rcu_data *rdp)
  1188. {
  1189. int i;
  1190. /*
  1191. * Orphan the callbacks. First adjust the counts. This is safe
  1192. * because ->onofflock excludes _rcu_barrier()'s adoption of
  1193. * the callbacks, thus no memory barrier is required.
  1194. */
  1195. if (rdp->nxtlist != NULL) {
  1196. rsp->qlen_lazy += rdp->qlen_lazy;
  1197. rsp->qlen += rdp->qlen;
  1198. rdp->n_cbs_orphaned += rdp->qlen;
  1199. rdp->qlen_lazy = 0;
  1200. rdp->qlen = 0;
  1201. }
  1202. /*
  1203. * Next, move those callbacks still needing a grace period to
  1204. * the orphanage, where some other CPU will pick them up.
  1205. * Some of the callbacks might have gone partway through a grace
  1206. * period, but that is too bad. They get to start over because we
  1207. * cannot assume that grace periods are synchronized across CPUs.
  1208. * We don't bother updating the ->nxttail[] array yet, instead
  1209. * we just reset the whole thing later on.
  1210. */
  1211. if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
  1212. *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
  1213. rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
  1214. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  1215. }
  1216. /*
  1217. * Then move the ready-to-invoke callbacks to the orphanage,
  1218. * where some other CPU will pick them up. These will not be
  1219. * required to pass though another grace period: They are done.
  1220. */
  1221. if (rdp->nxtlist != NULL) {
  1222. *rsp->orphan_donetail = rdp->nxtlist;
  1223. rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
  1224. }
  1225. /* Finally, initialize the rcu_data structure's list to empty. */
  1226. rdp->nxtlist = NULL;
  1227. for (i = 0; i < RCU_NEXT_SIZE; i++)
  1228. rdp->nxttail[i] = &rdp->nxtlist;
  1229. }
  1230. /*
  1231. * Adopt the RCU callbacks from the specified rcu_state structure's
  1232. * orphanage. The caller must hold the ->onofflock.
  1233. */
  1234. static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
  1235. {
  1236. int i;
  1237. struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
  1238. /*
  1239. * If there is an rcu_barrier() operation in progress, then
  1240. * only the task doing that operation is permitted to adopt
  1241. * callbacks. To do otherwise breaks rcu_barrier() and friends
  1242. * by causing them to fail to wait for the callbacks in the
  1243. * orphanage.
  1244. */
  1245. if (rsp->rcu_barrier_in_progress &&
  1246. rsp->rcu_barrier_in_progress != current)
  1247. return;
  1248. /* Do the accounting first. */
  1249. rdp->qlen_lazy += rsp->qlen_lazy;
  1250. rdp->qlen += rsp->qlen;
  1251. rdp->n_cbs_adopted += rsp->qlen;
  1252. rsp->qlen_lazy = 0;
  1253. rsp->qlen = 0;
  1254. /*
  1255. * We do not need a memory barrier here because the only way we
  1256. * can get here if there is an rcu_barrier() in flight is if
  1257. * we are the task doing the rcu_barrier().
  1258. */
  1259. /* First adopt the ready-to-invoke callbacks. */
  1260. if (rsp->orphan_donelist != NULL) {
  1261. *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
  1262. *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
  1263. for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
  1264. if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
  1265. rdp->nxttail[i] = rsp->orphan_donetail;
  1266. rsp->orphan_donelist = NULL;
  1267. rsp->orphan_donetail = &rsp->orphan_donelist;
  1268. }
  1269. /* And then adopt the callbacks that still need a grace period. */
  1270. if (rsp->orphan_nxtlist != NULL) {
  1271. *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
  1272. rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
  1273. rsp->orphan_nxtlist = NULL;
  1274. rsp->orphan_nxttail = &rsp->orphan_nxtlist;
  1275. }
  1276. }
  1277. /*
  1278. * Trace the fact that this CPU is going offline.
  1279. */
  1280. static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  1281. {
  1282. RCU_TRACE(unsigned long mask);
  1283. RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
  1284. RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
  1285. RCU_TRACE(mask = rdp->grpmask);
  1286. trace_rcu_grace_period(rsp->name,
  1287. rnp->gpnum + 1 - !!(rnp->qsmask & mask),
  1288. "cpuofl");
  1289. }
  1290. /*
  1291. * The CPU has been completely removed, and some other CPU is reporting
  1292. * this fact from process context. Do the remainder of the cleanup,
  1293. * including orphaning the outgoing CPU's RCU callbacks, and also
  1294. * adopting them, if there is no _rcu_barrier() instance running.
  1295. * There can only be one CPU hotplug operation at a time, so no other
  1296. * CPU can be attempting to update rcu_cpu_kthread_task.
  1297. */
  1298. static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
  1299. {
  1300. unsigned long flags;
  1301. unsigned long mask;
  1302. int need_report = 0;
  1303. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1304. struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
  1305. /* Adjust any no-longer-needed kthreads. */
  1306. rcu_stop_cpu_kthread(cpu);
  1307. rcu_node_kthread_setaffinity(rnp, -1);
  1308. /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
  1309. /* Exclude any attempts to start a new grace period. */
  1310. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  1311. /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
  1312. rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
  1313. rcu_adopt_orphan_cbs(rsp);
  1314. /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
  1315. mask = rdp->grpmask; /* rnp->grplo is constant. */
  1316. do {
  1317. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1318. rnp->qsmaskinit &= ~mask;
  1319. if (rnp->qsmaskinit != 0) {
  1320. if (rnp != rdp->mynode)
  1321. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1322. break;
  1323. }
  1324. if (rnp == rdp->mynode)
  1325. need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
  1326. else
  1327. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1328. mask = rnp->grpmask;
  1329. rnp = rnp->parent;
  1330. } while (rnp != NULL);
  1331. /*
  1332. * We still hold the leaf rcu_node structure lock here, and
  1333. * irqs are still disabled. The reason for this subterfuge is
  1334. * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
  1335. * held leads to deadlock.
  1336. */
  1337. raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
  1338. rnp = rdp->mynode;
  1339. if (need_report & RCU_OFL_TASKS_NORM_GP)
  1340. rcu_report_unblock_qs_rnp(rnp, flags);
  1341. else
  1342. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1343. if (need_report & RCU_OFL_TASKS_EXP_GP)
  1344. rcu_report_exp_rnp(rsp, rnp, true);
  1345. }
  1346. #else /* #ifdef CONFIG_HOTPLUG_CPU */
  1347. static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
  1348. {
  1349. }
  1350. static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  1351. {
  1352. }
  1353. static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
  1354. {
  1355. }
  1356. #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  1357. /*
  1358. * Invoke any RCU callbacks that have made it to the end of their grace
  1359. * period. Thottle as specified by rdp->blimit.
  1360. */
  1361. static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  1362. {
  1363. unsigned long flags;
  1364. struct rcu_head *next, *list, **tail;
  1365. int bl, count, count_lazy;
  1366. /* If no callbacks are ready, just return.*/
  1367. if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
  1368. trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
  1369. trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
  1370. need_resched(), is_idle_task(current),
  1371. rcu_is_callbacks_kthread());
  1372. return;
  1373. }
  1374. /*
  1375. * Extract the list of ready callbacks, disabling to prevent
  1376. * races with call_rcu() from interrupt handlers.
  1377. */
  1378. local_irq_save(flags);
  1379. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  1380. bl = rdp->blimit;
  1381. trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
  1382. list = rdp->nxtlist;
  1383. rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  1384. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  1385. tail = rdp->nxttail[RCU_DONE_TAIL];
  1386. for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
  1387. if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
  1388. rdp->nxttail[count] = &rdp->nxtlist;
  1389. local_irq_restore(flags);
  1390. /* Invoke callbacks. */
  1391. count = count_lazy = 0;
  1392. while (list) {
  1393. next = list->next;
  1394. prefetch(next);
  1395. debug_rcu_head_unqueue(list);
  1396. if (__rcu_reclaim(rsp->name, list))
  1397. count_lazy++;
  1398. list = next;
  1399. /* Stop only if limit reached and CPU has something to do. */
  1400. if (++count >= bl &&
  1401. (need_resched() ||
  1402. (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
  1403. break;
  1404. }
  1405. local_irq_save(flags);
  1406. trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
  1407. is_idle_task(current),
  1408. rcu_is_callbacks_kthread());
  1409. /* Update count, and requeue any remaining callbacks. */
  1410. if (list != NULL) {
  1411. *tail = rdp->nxtlist;
  1412. rdp->nxtlist = list;
  1413. for (count = 0; count < RCU_NEXT_SIZE; count++)
  1414. if (&rdp->nxtlist == rdp->nxttail[count])
  1415. rdp->nxttail[count] = tail;
  1416. else
  1417. break;
  1418. }
  1419. smp_mb(); /* List handling before counting for rcu_barrier(). */
  1420. rdp->qlen_lazy -= count_lazy;
  1421. rdp->qlen -= count;
  1422. rdp->n_cbs_invoked += count;
  1423. /* Reinstate batch limit if we have worked down the excess. */
  1424. if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  1425. rdp->blimit = blimit;
  1426. /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  1427. if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  1428. rdp->qlen_last_fqs_check = 0;
  1429. rdp->n_force_qs_snap = rsp->n_force_qs;
  1430. } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  1431. rdp->qlen_last_fqs_check = rdp->qlen;
  1432. local_irq_restore(flags);
  1433. /* Re-invoke RCU core processing if there are callbacks remaining. */
  1434. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1435. invoke_rcu_core();
  1436. }
  1437. /*
  1438. * Check to see if this CPU is in a non-context-switch quiescent state
  1439. * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  1440. * Also schedule RCU core processing.
  1441. *
  1442. * This function must be called from hardirq context. It is normally
  1443. * invoked from the scheduling-clock interrupt. If rcu_pending returns
  1444. * false, there is no point in invoking rcu_check_callbacks().
  1445. */
  1446. void rcu_check_callbacks(int cpu, int user)
  1447. {
  1448. trace_rcu_utilization("Start scheduler-tick");
  1449. increment_cpu_stall_ticks();
  1450. if (user || rcu_is_cpu_rrupt_from_idle()) {
  1451. /*
  1452. * Get here if this CPU took its interrupt from user
  1453. * mode or from the idle loop, and if this is not a
  1454. * nested interrupt. In this case, the CPU is in
  1455. * a quiescent state, so note it.
  1456. *
  1457. * No memory barrier is required here because both
  1458. * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  1459. * variables that other CPUs neither access nor modify,
  1460. * at least not while the corresponding CPU is online.
  1461. */
  1462. rcu_sched_qs(cpu);
  1463. rcu_bh_qs(cpu);
  1464. } else if (!in_softirq()) {
  1465. /*
  1466. * Get here if this CPU did not take its interrupt from
  1467. * softirq, in other words, if it is not interrupting
  1468. * a rcu_bh read-side critical section. This is an _bh
  1469. * critical section, so note it.
  1470. */
  1471. rcu_bh_qs(cpu);
  1472. }
  1473. rcu_preempt_check_callbacks(cpu);
  1474. if (rcu_pending(cpu))
  1475. invoke_rcu_core();
  1476. trace_rcu_utilization("End scheduler-tick");
  1477. }
  1478. /*
  1479. * Scan the leaf rcu_node structures, processing dyntick state for any that
  1480. * have not yet encountered a quiescent state, using the function specified.
  1481. * Also initiate boosting for any threads blocked on the root rcu_node.
  1482. *
  1483. * The caller must have suppressed start of new grace periods.
  1484. */
  1485. static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
  1486. {
  1487. unsigned long bit;
  1488. int cpu;
  1489. unsigned long flags;
  1490. unsigned long mask;
  1491. struct rcu_node *rnp;
  1492. rcu_for_each_leaf_node(rsp, rnp) {
  1493. mask = 0;
  1494. raw_spin_lock_irqsave(&rnp->lock, flags);
  1495. if (!rcu_gp_in_progress(rsp)) {
  1496. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1497. return;
  1498. }
  1499. if (rnp->qsmask == 0) {
  1500. rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
  1501. continue;
  1502. }
  1503. cpu = rnp->grplo;
  1504. bit = 1;
  1505. for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
  1506. if ((rnp->qsmask & bit) != 0 &&
  1507. f(per_cpu_ptr(rsp->rda, cpu)))
  1508. mask |= bit;
  1509. }
  1510. if (mask != 0) {
  1511. /* rcu_report_qs_rnp() releases rnp->lock. */
  1512. rcu_report_qs_rnp(mask, rsp, rnp, flags);
  1513. continue;
  1514. }
  1515. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1516. }
  1517. rnp = rcu_get_root(rsp);
  1518. if (rnp->qsmask == 0) {
  1519. raw_spin_lock_irqsave(&rnp->lock, flags);
  1520. rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
  1521. }
  1522. }
  1523. /*
  1524. * Force quiescent states on reluctant CPUs, and also detect which
  1525. * CPUs are in dyntick-idle mode.
  1526. */
  1527. static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  1528. {
  1529. unsigned long flags;
  1530. struct rcu_node *rnp = rcu_get_root(rsp);
  1531. trace_rcu_utilization("Start fqs");
  1532. if (!rcu_gp_in_progress(rsp)) {
  1533. trace_rcu_utilization("End fqs");
  1534. return; /* No grace period in progress, nothing to force. */
  1535. }
  1536. if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
  1537. rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
  1538. trace_rcu_utilization("End fqs");
  1539. return; /* Someone else is already on the job. */
  1540. }
  1541. if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
  1542. goto unlock_fqs_ret; /* no emergency and done recently. */
  1543. rsp->n_force_qs++;
  1544. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1545. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  1546. if(!rcu_gp_in_progress(rsp)) {
  1547. rsp->n_force_qs_ngp++;
  1548. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1549. goto unlock_fqs_ret; /* no GP in progress, time updated. */
  1550. }
  1551. rsp->fqs_active = 1;
  1552. switch (rsp->fqs_state) {
  1553. case RCU_GP_IDLE:
  1554. case RCU_GP_INIT:
  1555. break; /* grace period idle or initializing, ignore. */
  1556. case RCU_SAVE_DYNTICK:
  1557. if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
  1558. break; /* So gcc recognizes the dead code. */
  1559. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1560. /* Record dyntick-idle state. */
  1561. force_qs_rnp(rsp, dyntick_save_progress_counter);
  1562. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1563. if (rcu_gp_in_progress(rsp))
  1564. rsp->fqs_state = RCU_FORCE_QS;
  1565. break;
  1566. case RCU_FORCE_QS:
  1567. /* Check dyntick-idle state, send IPI to laggarts. */
  1568. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1569. force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
  1570. /* Leave state in case more forcing is required. */
  1571. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1572. break;
  1573. }
  1574. rsp->fqs_active = 0;
  1575. if (rsp->fqs_need_gp) {
  1576. raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
  1577. rsp->fqs_need_gp = 0;
  1578. rcu_start_gp(rsp, flags); /* releases rnp->lock */
  1579. trace_rcu_utilization("End fqs");
  1580. return;
  1581. }
  1582. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1583. unlock_fqs_ret:
  1584. raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
  1585. trace_rcu_utilization("End fqs");
  1586. }
  1587. /*
  1588. * This does the RCU core processing work for the specified rcu_state
  1589. * and rcu_data structures. This may be called only from the CPU to
  1590. * whom the rdp belongs.
  1591. */
  1592. static void
  1593. __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1594. {
  1595. unsigned long flags;
  1596. WARN_ON_ONCE(rdp->beenonline == 0);
  1597. /*
  1598. * If an RCU GP has gone long enough, go check for dyntick
  1599. * idle CPUs and, if needed, send resched IPIs.
  1600. */
  1601. if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1602. force_quiescent_state(rsp, 1);
  1603. /*
  1604. * Advance callbacks in response to end of earlier grace
  1605. * period that some other CPU ended.
  1606. */
  1607. rcu_process_gp_end(rsp, rdp);
  1608. /* Update RCU state based on any recent quiescent states. */
  1609. rcu_check_quiescent_state(rsp, rdp);
  1610. /* Does this CPU require a not-yet-started grace period? */
  1611. if (cpu_needs_another_gp(rsp, rdp)) {
  1612. raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
  1613. rcu_start_gp(rsp, flags); /* releases above lock */
  1614. }
  1615. /* If there are callbacks ready, invoke them. */
  1616. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1617. invoke_rcu_callbacks(rsp, rdp);
  1618. }
  1619. /*
  1620. * Do RCU core processing for the current CPU.
  1621. */
  1622. static void rcu_process_callbacks(struct softirq_action *unused)
  1623. {
  1624. trace_rcu_utilization("Start RCU core");
  1625. __rcu_process_callbacks(&rcu_sched_state,
  1626. &__get_cpu_var(rcu_sched_data));
  1627. __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
  1628. rcu_preempt_process_callbacks();
  1629. trace_rcu_utilization("End RCU core");
  1630. }
  1631. /*
  1632. * Schedule RCU callback invocation. If the specified type of RCU
  1633. * does not support RCU priority boosting, just do a direct call,
  1634. * otherwise wake up the per-CPU kernel kthread. Note that because we
  1635. * are running on the current CPU with interrupts disabled, the
  1636. * rcu_cpu_kthread_task cannot disappear out from under us.
  1637. */
  1638. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1639. {
  1640. if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
  1641. return;
  1642. if (likely(!rsp->boost)) {
  1643. rcu_do_batch(rsp, rdp);
  1644. return;
  1645. }
  1646. invoke_rcu_callbacks_kthread();
  1647. }
  1648. static void invoke_rcu_core(void)
  1649. {
  1650. raise_softirq(RCU_SOFTIRQ);
  1651. }
  1652. static void
  1653. __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
  1654. struct rcu_state *rsp, bool lazy)
  1655. {
  1656. unsigned long flags;
  1657. struct rcu_data *rdp;
  1658. WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
  1659. debug_rcu_head_queue(head);
  1660. head->func = func;
  1661. head->next = NULL;
  1662. smp_mb(); /* Ensure RCU update seen before callback registry. */
  1663. /*
  1664. * Opportunistically note grace-period endings and beginnings.
  1665. * Note that we might see a beginning right after we see an
  1666. * end, but never vice versa, since this CPU has to pass through
  1667. * a quiescent state betweentimes.
  1668. */
  1669. local_irq_save(flags);
  1670. rdp = this_cpu_ptr(rsp->rda);
  1671. /* Add the callback to our list. */
  1672. rdp->qlen++;
  1673. if (lazy)
  1674. rdp->qlen_lazy++;
  1675. else
  1676. rcu_idle_count_callbacks_posted();
  1677. smp_mb(); /* Count before adding callback for rcu_barrier(). */
  1678. *rdp->nxttail[RCU_NEXT_TAIL] = head;
  1679. rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
  1680. if (__is_kfree_rcu_offset((unsigned long)func))
  1681. trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
  1682. rdp->qlen_lazy, rdp->qlen);
  1683. else
  1684. trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
  1685. /* If interrupts were disabled, don't dive into RCU core. */
  1686. if (irqs_disabled_flags(flags)) {
  1687. local_irq_restore(flags);
  1688. return;
  1689. }
  1690. /*
  1691. * Force the grace period if too many callbacks or too long waiting.
  1692. * Enforce hysteresis, and don't invoke force_quiescent_state()
  1693. * if some other CPU has recently done so. Also, don't bother
  1694. * invoking force_quiescent_state() if the newly enqueued callback
  1695. * is the only one waiting for a grace period to complete.
  1696. */
  1697. if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
  1698. /* Are we ignoring a completed grace period? */
  1699. rcu_process_gp_end(rsp, rdp);
  1700. check_for_new_grace_period(rsp, rdp);
  1701. /* Start a new grace period if one not already started. */
  1702. if (!rcu_gp_in_progress(rsp)) {
  1703. unsigned long nestflag;
  1704. struct rcu_node *rnp_root = rcu_get_root(rsp);
  1705. raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
  1706. rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
  1707. } else {
  1708. /* Give the grace period a kick. */
  1709. rdp->blimit = LONG_MAX;
  1710. if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  1711. *rdp->nxttail[RCU_DONE_TAIL] != head)
  1712. force_quiescent_state(rsp, 0);
  1713. rdp->n_force_qs_snap = rsp->n_force_qs;
  1714. rdp->qlen_last_fqs_check = rdp->qlen;
  1715. }
  1716. } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1717. force_quiescent_state(rsp, 1);
  1718. local_irq_restore(flags);
  1719. }
  1720. /*
  1721. * Queue an RCU-sched callback for invocation after a grace period.
  1722. */
  1723. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1724. {
  1725. __call_rcu(head, func, &rcu_sched_state, 0);
  1726. }
  1727. EXPORT_SYMBOL_GPL(call_rcu_sched);
  1728. /*
  1729. * Queue an RCU callback for invocation after a quicker grace period.
  1730. */
  1731. void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1732. {
  1733. __call_rcu(head, func, &rcu_bh_state, 0);
  1734. }
  1735. EXPORT_SYMBOL_GPL(call_rcu_bh);
  1736. /*
  1737. * Because a context switch is a grace period for RCU-sched and RCU-bh,
  1738. * any blocking grace-period wait automatically implies a grace period
  1739. * if there is only one CPU online at any point time during execution
  1740. * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
  1741. * occasionally incorrectly indicate that there are multiple CPUs online
  1742. * when there was in fact only one the whole time, as this just adds
  1743. * some overhead: RCU still operates correctly.
  1744. *
  1745. * Of course, sampling num_online_cpus() with preemption enabled can
  1746. * give erroneous results if there are concurrent CPU-hotplug operations.
  1747. * For example, given a demonic sequence of preemptions in num_online_cpus()
  1748. * and CPU-hotplug operations, there could be two or more CPUs online at
  1749. * all times, but num_online_cpus() might well return one (or even zero).
  1750. *
  1751. * However, all such demonic sequences require at least one CPU-offline
  1752. * operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer
  1753. * is only a problem if there is an RCU read-side critical section executing
  1754. * throughout. But RCU-sched and RCU-bh read-side critical sections
  1755. * disable either preemption or bh, which prevents a CPU from going offline.
  1756. * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
  1757. * that there is only one CPU when in fact there was more than one throughout
  1758. * is when there were no RCU readers in the system. If there are no
  1759. * RCU readers, the grace period by definition can be of zero length,
  1760. * regardless of the number of online CPUs.
  1761. */
  1762. static inline int rcu_blocking_is_gp(void)
  1763. {
  1764. might_sleep(); /* Check for RCU read-side critical section. */
  1765. return num_online_cpus() <= 1;
  1766. }
  1767. /**
  1768. * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  1769. *
  1770. * Control will return to the caller some time after a full rcu-sched
  1771. * grace period has elapsed, in other words after all currently executing
  1772. * rcu-sched read-side critical sections have completed. These read-side
  1773. * critical sections are delimited by rcu_read_lock_sched() and
  1774. * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  1775. * local_irq_disable(), and so on may be used in place of
  1776. * rcu_read_lock_sched().
  1777. *
  1778. * This means that all preempt_disable code sequences, including NMI and
  1779. * hardware-interrupt handlers, in progress on entry will have completed
  1780. * before this primitive returns. However, this does not guarantee that
  1781. * softirq handlers will have completed, since in some kernels, these
  1782. * handlers can run in process context, and can block.
  1783. *
  1784. * This primitive provides the guarantees made by the (now removed)
  1785. * synchronize_kernel() API. In contrast, synchronize_rcu() only
  1786. * guarantees that rcu_read_lock() sections will have completed.
  1787. * In "classic RCU", these two guarantees happen to be one and
  1788. * the same, but can differ in realtime RCU implementations.
  1789. */
  1790. void synchronize_sched(void)
  1791. {
  1792. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  1793. !lock_is_held(&rcu_lock_map) &&
  1794. !lock_is_held(&rcu_sched_lock_map),
  1795. "Illegal synchronize_sched() in RCU-sched read-side critical section");
  1796. if (rcu_blocking_is_gp())
  1797. return;
  1798. wait_rcu_gp(call_rcu_sched);
  1799. }
  1800. EXPORT_SYMBOL_GPL(synchronize_sched);
  1801. /**
  1802. * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  1803. *
  1804. * Control will return to the caller some time after a full rcu_bh grace
  1805. * period has elapsed, in other words after all currently executing rcu_bh
  1806. * read-side critical sections have completed. RCU read-side critical
  1807. * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  1808. * and may be nested.
  1809. */
  1810. void synchronize_rcu_bh(void)
  1811. {
  1812. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  1813. !lock_is_held(&rcu_lock_map) &&
  1814. !lock_is_held(&rcu_sched_lock_map),
  1815. "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
  1816. if (rcu_blocking_is_gp())
  1817. return;
  1818. wait_rcu_gp(call_rcu_bh);
  1819. }
  1820. EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  1821. static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
  1822. static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
  1823. static int synchronize_sched_expedited_cpu_stop(void *data)
  1824. {
  1825. /*
  1826. * There must be a full memory barrier on each affected CPU
  1827. * between the time that try_stop_cpus() is called and the
  1828. * time that it returns.
  1829. *
  1830. * In the current initial implementation of cpu_stop, the
  1831. * above condition is already met when the control reaches
  1832. * this point and the following smp_mb() is not strictly
  1833. * necessary. Do smp_mb() anyway for documentation and
  1834. * robustness against future implementation changes.
  1835. */
  1836. smp_mb(); /* See above comment block. */
  1837. return 0;
  1838. }
  1839. /**
  1840. * synchronize_sched_expedited - Brute-force RCU-sched grace period
  1841. *
  1842. * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
  1843. * approach to force the grace period to end quickly. This consumes
  1844. * significant time on all CPUs and is unfriendly to real-time workloads,
  1845. * so is thus not recommended for any sort of common-case code. In fact,
  1846. * if you are using synchronize_sched_expedited() in a loop, please
  1847. * restructure your code to batch your updates, and then use a single
  1848. * synchronize_sched() instead.
  1849. *
  1850. * Note that it is illegal to call this function while holding any lock
  1851. * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
  1852. * to call this function from a CPU-hotplug notifier. Failing to observe
  1853. * these restriction will result in deadlock.
  1854. *
  1855. * This implementation can be thought of as an application of ticket
  1856. * locking to RCU, with sync_sched_expedited_started and
  1857. * sync_sched_expedited_done taking on the roles of the halves
  1858. * of the ticket-lock word. Each task atomically increments
  1859. * sync_sched_expedited_started upon entry, snapshotting the old value,
  1860. * then attempts to stop all the CPUs. If this succeeds, then each
  1861. * CPU will have executed a context switch, resulting in an RCU-sched
  1862. * grace period. We are then done, so we use atomic_cmpxchg() to
  1863. * update sync_sched_expedited_done to match our snapshot -- but
  1864. * only if someone else has not already advanced past our snapshot.
  1865. *
  1866. * On the other hand, if try_stop_cpus() fails, we check the value
  1867. * of sync_sched_expedited_done. If it has advanced past our
  1868. * initial snapshot, then someone else must have forced a grace period
  1869. * some time after we took our snapshot. In this case, our work is
  1870. * done for us, and we can simply return. Otherwise, we try again,
  1871. * but keep our initial snapshot for purposes of checking for someone
  1872. * doing our work for us.
  1873. *
  1874. * If we fail too many times in a row, we fall back to synchronize_sched().
  1875. */
  1876. void synchronize_sched_expedited(void)
  1877. {
  1878. int firstsnap, s, snap, trycount = 0;
  1879. /* Note that atomic_inc_return() implies full memory barrier. */
  1880. firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
  1881. get_online_cpus();
  1882. WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
  1883. /*
  1884. * Each pass through the following loop attempts to force a
  1885. * context switch on each CPU.
  1886. */
  1887. while (try_stop_cpus(cpu_online_mask,
  1888. synchronize_sched_expedited_cpu_stop,
  1889. NULL) == -EAGAIN) {
  1890. put_online_cpus();
  1891. /* No joy, try again later. Or just synchronize_sched(). */
  1892. if (trycount++ < 10)
  1893. udelay(trycount * num_online_cpus());
  1894. else {
  1895. synchronize_sched();
  1896. return;
  1897. }
  1898. /* Check to see if someone else did our work for us. */
  1899. s = atomic_read(&sync_sched_expedited_done);
  1900. if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
  1901. smp_mb(); /* ensure test happens before caller kfree */
  1902. return;
  1903. }
  1904. /*
  1905. * Refetching sync_sched_expedited_started allows later
  1906. * callers to piggyback on our grace period. We subtract
  1907. * 1 to get the same token that the last incrementer got.
  1908. * We retry after they started, so our grace period works
  1909. * for them, and they started after our first try, so their
  1910. * grace period works for us.
  1911. */
  1912. get_online_cpus();
  1913. snap = atomic_read(&sync_sched_expedited_started);
  1914. smp_mb(); /* ensure read is before try_stop_cpus(). */
  1915. }
  1916. /*
  1917. * Everyone up to our most recent fetch is covered by our grace
  1918. * period. Update the counter, but only if our work is still
  1919. * relevant -- which it won't be if someone who started later
  1920. * than we did beat us to the punch.
  1921. */
  1922. do {
  1923. s = atomic_read(&sync_sched_expedited_done);
  1924. if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
  1925. smp_mb(); /* ensure test happens before caller kfree */
  1926. break;
  1927. }
  1928. } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
  1929. put_online_cpus();
  1930. }
  1931. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  1932. /*
  1933. * Check to see if there is any immediate RCU-related work to be done
  1934. * by the current CPU, for the specified type of RCU, returning 1 if so.
  1935. * The checks are in order of increasing expense: checks that can be
  1936. * carried out against CPU-local state are performed first. However,
  1937. * we must check for CPU stalls first, else we might not get a chance.
  1938. */
  1939. static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  1940. {
  1941. struct rcu_node *rnp = rdp->mynode;
  1942. rdp->n_rcu_pending++;
  1943. /* Check for CPU stalls, if enabled. */
  1944. check_cpu_stall(rsp, rdp);
  1945. /* Is the RCU core waiting for a quiescent state from this CPU? */
  1946. if (rcu_scheduler_fully_active &&
  1947. rdp->qs_pending && !rdp->passed_quiesce) {
  1948. /*
  1949. * If force_quiescent_state() coming soon and this CPU
  1950. * needs a quiescent state, and this is either RCU-sched
  1951. * or RCU-bh, force a local reschedule.
  1952. */
  1953. rdp->n_rp_qs_pending++;
  1954. if (!rdp->preemptible &&
  1955. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
  1956. jiffies))
  1957. set_need_resched();
  1958. } else if (rdp->qs_pending && rdp->passed_quiesce) {
  1959. rdp->n_rp_report_qs++;
  1960. return 1;
  1961. }
  1962. /* Does this CPU have callbacks ready to invoke? */
  1963. if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  1964. rdp->n_rp_cb_ready++;
  1965. return 1;
  1966. }
  1967. /* Has RCU gone idle with this CPU needing another grace period? */
  1968. if (cpu_needs_another_gp(rsp, rdp)) {
  1969. rdp->n_rp_cpu_needs_gp++;
  1970. return 1;
  1971. }
  1972. /* Has another RCU grace period completed? */
  1973. if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
  1974. rdp->n_rp_gp_completed++;
  1975. return 1;
  1976. }
  1977. /* Has a new RCU grace period started? */
  1978. if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
  1979. rdp->n_rp_gp_started++;
  1980. return 1;
  1981. }
  1982. /* Has an RCU GP gone long enough to send resched IPIs &c? */
  1983. if (rcu_gp_in_progress(rsp) &&
  1984. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
  1985. rdp->n_rp_need_fqs++;
  1986. return 1;
  1987. }
  1988. /* nothing to do */
  1989. rdp->n_rp_need_nothing++;
  1990. return 0;
  1991. }
  1992. /*
  1993. * Check to see if there is any immediate RCU-related work to be done
  1994. * by the current CPU, returning 1 if so. This function is part of the
  1995. * RCU implementation; it is -not- an exported member of the RCU API.
  1996. */
  1997. static int rcu_pending(int cpu)
  1998. {
  1999. return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
  2000. __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
  2001. rcu_preempt_pending(cpu);
  2002. }
  2003. /*
  2004. * Check to see if any future RCU-related work will need to be done
  2005. * by the current CPU, even if none need be done immediately, returning
  2006. * 1 if so.
  2007. */
  2008. static int rcu_cpu_has_callbacks(int cpu)
  2009. {
  2010. /* RCU callbacks either ready or pending? */
  2011. return per_cpu(rcu_sched_data, cpu).nxtlist ||
  2012. per_cpu(rcu_bh_data, cpu).nxtlist ||
  2013. rcu_preempt_cpu_has_callbacks(cpu);
  2014. }
  2015. /*
  2016. * RCU callback function for _rcu_barrier(). If we are last, wake
  2017. * up the task executing _rcu_barrier().
  2018. */
  2019. static void rcu_barrier_callback(struct rcu_head *notused)
  2020. {
  2021. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  2022. complete(&rcu_barrier_completion);
  2023. }
  2024. /*
  2025. * Called with preemption disabled, and from cross-cpu IRQ context.
  2026. */
  2027. static void rcu_barrier_func(void *type)
  2028. {
  2029. int cpu = smp_processor_id();
  2030. struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
  2031. void (*call_rcu_func)(struct rcu_head *head,
  2032. void (*func)(struct rcu_head *head));
  2033. atomic_inc(&rcu_barrier_cpu_count);
  2034. call_rcu_func = type;
  2035. call_rcu_func(head, rcu_barrier_callback);
  2036. }
  2037. /*
  2038. * Orchestrate the specified type of RCU barrier, waiting for all
  2039. * RCU callbacks of the specified type to complete.
  2040. */
  2041. static void _rcu_barrier(struct rcu_state *rsp,
  2042. void (*call_rcu_func)(struct rcu_head *head,
  2043. void (*func)(struct rcu_head *head)))
  2044. {
  2045. int cpu;
  2046. unsigned long flags;
  2047. struct rcu_data *rdp;
  2048. struct rcu_head rh;
  2049. init_rcu_head_on_stack(&rh);
  2050. /* Take mutex to serialize concurrent rcu_barrier() requests. */
  2051. mutex_lock(&rcu_barrier_mutex);
  2052. smp_mb(); /* Prevent any prior operations from leaking in. */
  2053. /*
  2054. * Initialize the count to one rather than to zero in order to
  2055. * avoid a too-soon return to zero in case of a short grace period
  2056. * (or preemption of this task). Also flag this task as doing
  2057. * an rcu_barrier(). This will prevent anyone else from adopting
  2058. * orphaned callbacks, which could cause otherwise failure if a
  2059. * CPU went offline and quickly came back online. To see this,
  2060. * consider the following sequence of events:
  2061. *
  2062. * 1. We cause CPU 0 to post an rcu_barrier_callback() callback.
  2063. * 2. CPU 1 goes offline, orphaning its callbacks.
  2064. * 3. CPU 0 adopts CPU 1's orphaned callbacks.
  2065. * 4. CPU 1 comes back online.
  2066. * 5. We cause CPU 1 to post an rcu_barrier_callback() callback.
  2067. * 6. Both rcu_barrier_callback() callbacks are invoked, awakening
  2068. * us -- but before CPU 1's orphaned callbacks are invoked!!!
  2069. */
  2070. init_completion(&rcu_barrier_completion);
  2071. atomic_set(&rcu_barrier_cpu_count, 1);
  2072. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  2073. rsp->rcu_barrier_in_progress = current;
  2074. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  2075. /*
  2076. * Force every CPU with callbacks to register a new callback
  2077. * that will tell us when all the preceding callbacks have
  2078. * been invoked. If an offline CPU has callbacks, wait for
  2079. * it to either come back online or to finish orphaning those
  2080. * callbacks.
  2081. */
  2082. for_each_possible_cpu(cpu) {
  2083. preempt_disable();
  2084. rdp = per_cpu_ptr(rsp->rda, cpu);
  2085. if (cpu_is_offline(cpu)) {
  2086. preempt_enable();
  2087. while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
  2088. schedule_timeout_interruptible(1);
  2089. } else if (ACCESS_ONCE(rdp->qlen)) {
  2090. smp_call_function_single(cpu, rcu_barrier_func,
  2091. (void *)call_rcu_func, 1);
  2092. preempt_enable();
  2093. } else {
  2094. preempt_enable();
  2095. }
  2096. }
  2097. /*
  2098. * Now that all online CPUs have rcu_barrier_callback() callbacks
  2099. * posted, we can adopt all of the orphaned callbacks and place
  2100. * an rcu_barrier_callback() callback after them. When that is done,
  2101. * we are guaranteed to have an rcu_barrier_callback() callback
  2102. * following every callback that could possibly have been
  2103. * registered before _rcu_barrier() was called.
  2104. */
  2105. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  2106. rcu_adopt_orphan_cbs(rsp);
  2107. rsp->rcu_barrier_in_progress = NULL;
  2108. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  2109. atomic_inc(&rcu_barrier_cpu_count);
  2110. smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
  2111. call_rcu_func(&rh, rcu_barrier_callback);
  2112. /*
  2113. * Now that we have an rcu_barrier_callback() callback on each
  2114. * CPU, and thus each counted, remove the initial count.
  2115. */
  2116. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  2117. complete(&rcu_barrier_completion);
  2118. /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
  2119. wait_for_completion(&rcu_barrier_completion);
  2120. /* Other rcu_barrier() invocations can now safely proceed. */
  2121. mutex_unlock(&rcu_barrier_mutex);
  2122. destroy_rcu_head_on_stack(&rh);
  2123. }
  2124. /**
  2125. * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  2126. */
  2127. void rcu_barrier_bh(void)
  2128. {
  2129. _rcu_barrier(&rcu_bh_state, call_rcu_bh);
  2130. }
  2131. EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  2132. /**
  2133. * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
  2134. */
  2135. void rcu_barrier_sched(void)
  2136. {
  2137. _rcu_barrier(&rcu_sched_state, call_rcu_sched);
  2138. }
  2139. EXPORT_SYMBOL_GPL(rcu_barrier_sched);
  2140. /*
  2141. * Do boot-time initialization of a CPU's per-CPU RCU data.
  2142. */
  2143. static void __init
  2144. rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  2145. {
  2146. unsigned long flags;
  2147. int i;
  2148. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  2149. struct rcu_node *rnp = rcu_get_root(rsp);
  2150. /* Set up local state, ensuring consistent view of global state. */
  2151. raw_spin_lock_irqsave(&rnp->lock, flags);
  2152. rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
  2153. rdp->nxtlist = NULL;
  2154. for (i = 0; i < RCU_NEXT_SIZE; i++)
  2155. rdp->nxttail[i] = &rdp->nxtlist;
  2156. rdp->qlen_lazy = 0;
  2157. rdp->qlen = 0;
  2158. rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  2159. WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
  2160. WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
  2161. rdp->cpu = cpu;
  2162. rdp->rsp = rsp;
  2163. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2164. }
  2165. /*
  2166. * Initialize a CPU's per-CPU RCU data. Note that only one online or
  2167. * offline event can be happening at a given time. Note also that we
  2168. * can accept some slop in the rsp->completed access due to the fact
  2169. * that this CPU cannot possibly have any RCU callbacks in flight yet.
  2170. */
  2171. static void __cpuinit
  2172. rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
  2173. {
  2174. unsigned long flags;
  2175. unsigned long mask;
  2176. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  2177. struct rcu_node *rnp = rcu_get_root(rsp);
  2178. /* Set up local state, ensuring consistent view of global state. */
  2179. raw_spin_lock_irqsave(&rnp->lock, flags);
  2180. rdp->beenonline = 1; /* We have now been online. */
  2181. rdp->preemptible = preemptible;
  2182. rdp->qlen_last_fqs_check = 0;
  2183. rdp->n_force_qs_snap = rsp->n_force_qs;
  2184. rdp->blimit = blimit;
  2185. rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  2186. atomic_set(&rdp->dynticks->dynticks,
  2187. (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
  2188. rcu_prepare_for_idle_init(cpu);
  2189. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  2190. /*
  2191. * A new grace period might start here. If so, we won't be part
  2192. * of it, but that is OK, as we are currently in a quiescent state.
  2193. */
  2194. /* Exclude any attempts to start a new GP on large systems. */
  2195. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  2196. /* Add CPU to rcu_node bitmasks. */
  2197. rnp = rdp->mynode;
  2198. mask = rdp->grpmask;
  2199. do {
  2200. /* Exclude any attempts to start a new GP on small systems. */
  2201. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  2202. rnp->qsmaskinit |= mask;
  2203. mask = rnp->grpmask;
  2204. if (rnp == rdp->mynode) {
  2205. /*
  2206. * If there is a grace period in progress, we will
  2207. * set up to wait for it next time we run the
  2208. * RCU core code.
  2209. */
  2210. rdp->gpnum = rnp->completed;
  2211. rdp->completed = rnp->completed;
  2212. rdp->passed_quiesce = 0;
  2213. rdp->qs_pending = 0;
  2214. rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
  2215. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
  2216. }
  2217. raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
  2218. rnp = rnp->parent;
  2219. } while (rnp != NULL && !(rnp->qsmaskinit & mask));
  2220. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  2221. }
  2222. static void __cpuinit rcu_prepare_cpu(int cpu)
  2223. {
  2224. rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
  2225. rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
  2226. rcu_preempt_init_percpu_data(cpu);
  2227. }
  2228. /*
  2229. * Handle CPU online/offline notification events.
  2230. */
  2231. static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
  2232. unsigned long action, void *hcpu)
  2233. {
  2234. long cpu = (long)hcpu;
  2235. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
  2236. struct rcu_node *rnp = rdp->mynode;
  2237. trace_rcu_utilization("Start CPU hotplug");
  2238. switch (action) {
  2239. case CPU_UP_PREPARE:
  2240. case CPU_UP_PREPARE_FROZEN:
  2241. rcu_prepare_cpu(cpu);
  2242. rcu_prepare_kthreads(cpu);
  2243. break;
  2244. case CPU_ONLINE:
  2245. case CPU_DOWN_FAILED:
  2246. rcu_node_kthread_setaffinity(rnp, -1);
  2247. rcu_cpu_kthread_setrt(cpu, 1);
  2248. break;
  2249. case CPU_DOWN_PREPARE:
  2250. rcu_node_kthread_setaffinity(rnp, cpu);
  2251. rcu_cpu_kthread_setrt(cpu, 0);
  2252. break;
  2253. case CPU_DYING:
  2254. case CPU_DYING_FROZEN:
  2255. /*
  2256. * The whole machine is "stopped" except this CPU, so we can
  2257. * touch any data without introducing corruption. We send the
  2258. * dying CPU's callbacks to an arbitrarily chosen online CPU.
  2259. */
  2260. rcu_cleanup_dying_cpu(&rcu_bh_state);
  2261. rcu_cleanup_dying_cpu(&rcu_sched_state);
  2262. rcu_preempt_cleanup_dying_cpu();
  2263. rcu_cleanup_after_idle(cpu);
  2264. break;
  2265. case CPU_DEAD:
  2266. case CPU_DEAD_FROZEN:
  2267. case CPU_UP_CANCELED:
  2268. case CPU_UP_CANCELED_FROZEN:
  2269. rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
  2270. rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
  2271. rcu_preempt_cleanup_dead_cpu(cpu);
  2272. break;
  2273. default:
  2274. break;
  2275. }
  2276. trace_rcu_utilization("End CPU hotplug");
  2277. return NOTIFY_OK;
  2278. }
  2279. /*
  2280. * This function is invoked towards the end of the scheduler's initialization
  2281. * process. Before this is called, the idle task might contain
  2282. * RCU read-side critical sections (during which time, this idle
  2283. * task is booting the system). After this function is called, the
  2284. * idle tasks are prohibited from containing RCU read-side critical
  2285. * sections. This function also enables RCU lockdep checking.
  2286. */
  2287. void rcu_scheduler_starting(void)
  2288. {
  2289. WARN_ON(num_online_cpus() != 1);
  2290. WARN_ON(nr_context_switches() > 0);
  2291. rcu_scheduler_active = 1;
  2292. }
  2293. /*
  2294. * Compute the per-level fanout, either using the exact fanout specified
  2295. * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
  2296. */
  2297. #ifdef CONFIG_RCU_FANOUT_EXACT
  2298. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  2299. {
  2300. int i;
  2301. for (i = NUM_RCU_LVLS - 1; i > 0; i--)
  2302. rsp->levelspread[i] = CONFIG_RCU_FANOUT;
  2303. rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF;
  2304. }
  2305. #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
  2306. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  2307. {
  2308. int ccur;
  2309. int cprv;
  2310. int i;
  2311. cprv = NR_CPUS;
  2312. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  2313. ccur = rsp->levelcnt[i];
  2314. rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
  2315. cprv = ccur;
  2316. }
  2317. }
  2318. #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
  2319. /*
  2320. * Helper function for rcu_init() that initializes one rcu_state structure.
  2321. */
  2322. static void __init rcu_init_one(struct rcu_state *rsp,
  2323. struct rcu_data __percpu *rda)
  2324. {
  2325. static char *buf[] = { "rcu_node_level_0",
  2326. "rcu_node_level_1",
  2327. "rcu_node_level_2",
  2328. "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
  2329. int cpustride = 1;
  2330. int i;
  2331. int j;
  2332. struct rcu_node *rnp;
  2333. BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
  2334. /* Initialize the level-tracking arrays. */
  2335. for (i = 1; i < NUM_RCU_LVLS; i++)
  2336. rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
  2337. rcu_init_levelspread(rsp);
  2338. /* Initialize the elements themselves, starting from the leaves. */
  2339. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  2340. cpustride *= rsp->levelspread[i];
  2341. rnp = rsp->level[i];
  2342. for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
  2343. raw_spin_lock_init(&rnp->lock);
  2344. lockdep_set_class_and_name(&rnp->lock,
  2345. &rcu_node_class[i], buf[i]);
  2346. rnp->gpnum = 0;
  2347. rnp->qsmask = 0;
  2348. rnp->qsmaskinit = 0;
  2349. rnp->grplo = j * cpustride;
  2350. rnp->grphi = (j + 1) * cpustride - 1;
  2351. if (rnp->grphi >= NR_CPUS)
  2352. rnp->grphi = NR_CPUS - 1;
  2353. if (i == 0) {
  2354. rnp->grpnum = 0;
  2355. rnp->grpmask = 0;
  2356. rnp->parent = NULL;
  2357. } else {
  2358. rnp->grpnum = j % rsp->levelspread[i - 1];
  2359. rnp->grpmask = 1UL << rnp->grpnum;
  2360. rnp->parent = rsp->level[i - 1] +
  2361. j / rsp->levelspread[i - 1];
  2362. }
  2363. rnp->level = i;
  2364. INIT_LIST_HEAD(&rnp->blkd_tasks);
  2365. }
  2366. }
  2367. rsp->rda = rda;
  2368. rnp = rsp->level[NUM_RCU_LVLS - 1];
  2369. for_each_possible_cpu(i) {
  2370. while (i > rnp->grphi)
  2371. rnp++;
  2372. per_cpu_ptr(rsp->rda, i)->mynode = rnp;
  2373. rcu_boot_init_percpu_data(i, rsp);
  2374. }
  2375. }
  2376. void __init rcu_init(void)
  2377. {
  2378. int cpu;
  2379. rcu_bootup_announce();
  2380. rcu_init_one(&rcu_sched_state, &rcu_sched_data);
  2381. rcu_init_one(&rcu_bh_state, &rcu_bh_data);
  2382. __rcu_init_preempt();
  2383. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  2384. /*
  2385. * We don't need protection against CPU-hotplug here because
  2386. * this is called early in boot, before either interrupts
  2387. * or the scheduler are operational.
  2388. */
  2389. cpu_notifier(rcu_cpu_notify, 0);
  2390. for_each_online_cpu(cpu)
  2391. rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
  2392. check_cpu_stall_init();
  2393. }
  2394. #include "rcutree_plugin.h"