rcutree.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23. *
  24. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26. *
  27. * For detailed explanation of Read-Copy Update mechanism see -
  28. * Documentation/RCU
  29. */
  30. #include <linux/types.h>
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/smp.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/sched.h>
  38. #include <linux/nmi.h>
  39. #include <linux/atomic.h>
  40. #include <linux/bitops.h>
  41. #include <linux/export.h>
  42. #include <linux/completion.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/percpu.h>
  45. #include <linux/notifier.h>
  46. #include <linux/cpu.h>
  47. #include <linux/mutex.h>
  48. #include <linux/time.h>
  49. #include <linux/kernel_stat.h>
  50. #include <linux/wait.h>
  51. #include <linux/kthread.h>
  52. #include <linux/prefetch.h>
  53. #include "rcutree.h"
  54. #include <trace/events/rcu.h>
  55. #include "rcu.h"
  56. /* Data structures. */
  57. static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
  58. #define RCU_STATE_INITIALIZER(structname) { \
  59. .level = { &structname##_state.node[0] }, \
  60. .levelcnt = { \
  61. NUM_RCU_LVL_0, /* root of hierarchy. */ \
  62. NUM_RCU_LVL_1, \
  63. NUM_RCU_LVL_2, \
  64. NUM_RCU_LVL_3, \
  65. NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
  66. }, \
  67. .fqs_state = RCU_GP_IDLE, \
  68. .gpnum = -300, \
  69. .completed = -300, \
  70. .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
  71. .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
  72. .n_force_qs = 0, \
  73. .n_force_qs_ngp = 0, \
  74. .name = #structname, \
  75. }
  76. struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
  77. DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
  78. struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
  79. DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
  80. static struct rcu_state *rcu_state;
  81. /*
  82. * The rcu_scheduler_active variable transitions from zero to one just
  83. * before the first task is spawned. So when this variable is zero, RCU
  84. * can assume that there is but one task, allowing RCU to (for example)
  85. * optimized synchronize_sched() to a simple barrier(). When this variable
  86. * is one, RCU must actually do all the hard work required to detect real
  87. * grace periods. This variable is also used to suppress boot-time false
  88. * positives from lockdep-RCU error checking.
  89. */
  90. int rcu_scheduler_active __read_mostly;
  91. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  92. /*
  93. * The rcu_scheduler_fully_active variable transitions from zero to one
  94. * during the early_initcall() processing, which is after the scheduler
  95. * is capable of creating new tasks. So RCU processing (for example,
  96. * creating tasks for RCU priority boosting) must be delayed until after
  97. * rcu_scheduler_fully_active transitions from zero to one. We also
  98. * currently delay invocation of any RCU callbacks until after this point.
  99. *
  100. * It might later prove better for people registering RCU callbacks during
  101. * early boot to take responsibility for these callbacks, but one step at
  102. * a time.
  103. */
  104. static int rcu_scheduler_fully_active __read_mostly;
  105. #ifdef CONFIG_RCU_BOOST
  106. /*
  107. * Control variables for per-CPU and per-rcu_node kthreads. These
  108. * handle all flavors of RCU.
  109. */
  110. static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
  111. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  112. DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
  113. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  114. DEFINE_PER_CPU(char, rcu_cpu_has_work);
  115. #endif /* #ifdef CONFIG_RCU_BOOST */
  116. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
  117. static void invoke_rcu_core(void);
  118. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
  119. /*
  120. * Track the rcutorture test sequence number and the update version
  121. * number within a given test. The rcutorture_testseq is incremented
  122. * on every rcutorture module load and unload, so has an odd value
  123. * when a test is running. The rcutorture_vernum is set to zero
  124. * when rcutorture starts and is incremented on each rcutorture update.
  125. * These variables enable correlating rcutorture output with the
  126. * RCU tracing information.
  127. */
  128. unsigned long rcutorture_testseq;
  129. unsigned long rcutorture_vernum;
  130. /*
  131. * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
  132. * permit this function to be invoked without holding the root rcu_node
  133. * structure's ->lock, but of course results can be subject to change.
  134. */
  135. static int rcu_gp_in_progress(struct rcu_state *rsp)
  136. {
  137. return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
  138. }
  139. /*
  140. * Note a quiescent state. Because we do not need to know
  141. * how many quiescent states passed, just if there was at least
  142. * one since the start of the grace period, this just sets a flag.
  143. * The caller must have disabled preemption.
  144. */
  145. void rcu_sched_qs(int cpu)
  146. {
  147. struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
  148. rdp->passed_quiesce_gpnum = rdp->gpnum;
  149. barrier();
  150. if (rdp->passed_quiesce == 0)
  151. trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
  152. rdp->passed_quiesce = 1;
  153. }
  154. void rcu_bh_qs(int cpu)
  155. {
  156. struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
  157. rdp->passed_quiesce_gpnum = rdp->gpnum;
  158. barrier();
  159. if (rdp->passed_quiesce == 0)
  160. trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
  161. rdp->passed_quiesce = 1;
  162. }
  163. /*
  164. * Note a context switch. This is a quiescent state for RCU-sched,
  165. * and requires special handling for preemptible RCU.
  166. * The caller must have disabled preemption.
  167. */
  168. void rcu_note_context_switch(int cpu)
  169. {
  170. trace_rcu_utilization("Start context switch");
  171. rcu_sched_qs(cpu);
  172. rcu_preempt_note_context_switch(cpu);
  173. trace_rcu_utilization("End context switch");
  174. }
  175. EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  176. DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  177. .dynticks_nesting = DYNTICK_TASK_NESTING,
  178. .dynticks = ATOMIC_INIT(1),
  179. };
  180. static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
  181. static int qhimark = 10000; /* If this many pending, ignore blimit. */
  182. static int qlowmark = 100; /* Once only this many pending, use blimit. */
  183. module_param(blimit, int, 0);
  184. module_param(qhimark, int, 0);
  185. module_param(qlowmark, int, 0);
  186. int rcu_cpu_stall_suppress __read_mostly;
  187. module_param(rcu_cpu_stall_suppress, int, 0644);
  188. static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
  189. static int rcu_pending(int cpu);
  190. /*
  191. * Return the number of RCU-sched batches processed thus far for debug & stats.
  192. */
  193. long rcu_batches_completed_sched(void)
  194. {
  195. return rcu_sched_state.completed;
  196. }
  197. EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  198. /*
  199. * Return the number of RCU BH batches processed thus far for debug & stats.
  200. */
  201. long rcu_batches_completed_bh(void)
  202. {
  203. return rcu_bh_state.completed;
  204. }
  205. EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  206. /*
  207. * Force a quiescent state for RCU BH.
  208. */
  209. void rcu_bh_force_quiescent_state(void)
  210. {
  211. force_quiescent_state(&rcu_bh_state, 0);
  212. }
  213. EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  214. /*
  215. * Record the number of times rcutorture tests have been initiated and
  216. * terminated. This information allows the debugfs tracing stats to be
  217. * correlated to the rcutorture messages, even when the rcutorture module
  218. * is being repeatedly loaded and unloaded. In other words, we cannot
  219. * store this state in rcutorture itself.
  220. */
  221. void rcutorture_record_test_transition(void)
  222. {
  223. rcutorture_testseq++;
  224. rcutorture_vernum = 0;
  225. }
  226. EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  227. /*
  228. * Record the number of writer passes through the current rcutorture test.
  229. * This is also used to correlate debugfs tracing stats with the rcutorture
  230. * messages.
  231. */
  232. void rcutorture_record_progress(unsigned long vernum)
  233. {
  234. rcutorture_vernum++;
  235. }
  236. EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  237. /*
  238. * Force a quiescent state for RCU-sched.
  239. */
  240. void rcu_sched_force_quiescent_state(void)
  241. {
  242. force_quiescent_state(&rcu_sched_state, 0);
  243. }
  244. EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  245. /*
  246. * Does the CPU have callbacks ready to be invoked?
  247. */
  248. static int
  249. cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  250. {
  251. return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
  252. }
  253. /*
  254. * Does the current CPU require a yet-as-unscheduled grace period?
  255. */
  256. static int
  257. cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  258. {
  259. return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
  260. }
  261. /*
  262. * Return the root node of the specified rcu_state structure.
  263. */
  264. static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  265. {
  266. return &rsp->node[0];
  267. }
  268. #ifdef CONFIG_SMP
  269. /*
  270. * If the specified CPU is offline, tell the caller that it is in
  271. * a quiescent state. Otherwise, whack it with a reschedule IPI.
  272. * Grace periods can end up waiting on an offline CPU when that
  273. * CPU is in the process of coming online -- it will be added to the
  274. * rcu_node bitmasks before it actually makes it online. The same thing
  275. * can happen while a CPU is in the process of coming online. Because this
  276. * race is quite rare, we check for it after detecting that the grace
  277. * period has been delayed rather than checking each and every CPU
  278. * each and every time we start a new grace period.
  279. */
  280. static int rcu_implicit_offline_qs(struct rcu_data *rdp)
  281. {
  282. /*
  283. * If the CPU is offline, it is in a quiescent state. We can
  284. * trust its state not to change because interrupts are disabled.
  285. */
  286. if (cpu_is_offline(rdp->cpu)) {
  287. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
  288. rdp->offline_fqs++;
  289. return 1;
  290. }
  291. /*
  292. * The CPU is online, so send it a reschedule IPI. This forces
  293. * it through the scheduler, and (inefficiently) also handles cases
  294. * where idle loops fail to inform RCU about the CPU being idle.
  295. */
  296. if (rdp->cpu != smp_processor_id())
  297. smp_send_reschedule(rdp->cpu);
  298. else
  299. set_need_resched();
  300. rdp->resched_ipi++;
  301. return 0;
  302. }
  303. #endif /* #ifdef CONFIG_SMP */
  304. /*
  305. * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle
  306. *
  307. * If the new value of the ->dynticks_nesting counter now is zero,
  308. * we really have entered idle, and must do the appropriate accounting.
  309. * The caller must have disabled interrupts.
  310. */
  311. static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
  312. {
  313. if (rdtp->dynticks_nesting) {
  314. trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
  315. return;
  316. }
  317. trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
  318. if (!is_idle_task(current)) {
  319. struct task_struct *idle = idle_task(smp_processor_id());
  320. trace_rcu_dyntick("Error on entry: not idle task",
  321. oldval, rdtp->dynticks_nesting);
  322. ftrace_dump(DUMP_ALL);
  323. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  324. current->pid, current->comm,
  325. idle->pid, idle->comm); /* must be idle task! */
  326. }
  327. rcu_prepare_for_idle(smp_processor_id());
  328. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  329. smp_mb__before_atomic_inc(); /* See above. */
  330. atomic_inc(&rdtp->dynticks);
  331. smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
  332. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  333. }
  334. /**
  335. * rcu_idle_enter - inform RCU that current CPU is entering idle
  336. *
  337. * Enter idle mode, in other words, -leave- the mode in which RCU
  338. * read-side critical sections can occur. (Though RCU read-side
  339. * critical sections can occur in irq handlers in idle, a possibility
  340. * handled by irq_enter() and irq_exit().)
  341. *
  342. * We crowbar the ->dynticks_nesting field to zero to allow for
  343. * the possibility of usermode upcalls having messed up our count
  344. * of interrupt nesting level during the prior busy period.
  345. */
  346. void rcu_idle_enter(void)
  347. {
  348. unsigned long flags;
  349. long long oldval;
  350. struct rcu_dynticks *rdtp;
  351. local_irq_save(flags);
  352. rdtp = &__get_cpu_var(rcu_dynticks);
  353. oldval = rdtp->dynticks_nesting;
  354. rdtp->dynticks_nesting = 0;
  355. rcu_idle_enter_common(rdtp, oldval);
  356. local_irq_restore(flags);
  357. }
  358. /**
  359. * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
  360. *
  361. * Exit from an interrupt handler, which might possibly result in entering
  362. * idle mode, in other words, leaving the mode in which read-side critical
  363. * sections can occur.
  364. *
  365. * This code assumes that the idle loop never does anything that might
  366. * result in unbalanced calls to irq_enter() and irq_exit(). If your
  367. * architecture violates this assumption, RCU will give you what you
  368. * deserve, good and hard. But very infrequently and irreproducibly.
  369. *
  370. * Use things like work queues to work around this limitation.
  371. *
  372. * You have been warned.
  373. */
  374. void rcu_irq_exit(void)
  375. {
  376. unsigned long flags;
  377. long long oldval;
  378. struct rcu_dynticks *rdtp;
  379. local_irq_save(flags);
  380. rdtp = &__get_cpu_var(rcu_dynticks);
  381. oldval = rdtp->dynticks_nesting;
  382. rdtp->dynticks_nesting--;
  383. WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
  384. rcu_idle_enter_common(rdtp, oldval);
  385. local_irq_restore(flags);
  386. }
  387. /*
  388. * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle
  389. *
  390. * If the new value of the ->dynticks_nesting counter was previously zero,
  391. * we really have exited idle, and must do the appropriate accounting.
  392. * The caller must have disabled interrupts.
  393. */
  394. static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
  395. {
  396. if (oldval) {
  397. trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
  398. return;
  399. }
  400. smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
  401. atomic_inc(&rdtp->dynticks);
  402. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  403. smp_mb__after_atomic_inc(); /* See above. */
  404. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  405. trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
  406. if (!is_idle_task(current)) {
  407. struct task_struct *idle = idle_task(smp_processor_id());
  408. trace_rcu_dyntick("Error on exit: not idle task",
  409. oldval, rdtp->dynticks_nesting);
  410. ftrace_dump(DUMP_ALL);
  411. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  412. current->pid, current->comm,
  413. idle->pid, idle->comm); /* must be idle task! */
  414. }
  415. }
  416. /**
  417. * rcu_idle_exit - inform RCU that current CPU is leaving idle
  418. *
  419. * Exit idle mode, in other words, -enter- the mode in which RCU
  420. * read-side critical sections can occur.
  421. *
  422. * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
  423. * allow for the possibility of usermode upcalls messing up our count
  424. * of interrupt nesting level during the busy period that is just
  425. * now starting.
  426. */
  427. void rcu_idle_exit(void)
  428. {
  429. unsigned long flags;
  430. struct rcu_dynticks *rdtp;
  431. long long oldval;
  432. local_irq_save(flags);
  433. rdtp = &__get_cpu_var(rcu_dynticks);
  434. oldval = rdtp->dynticks_nesting;
  435. WARN_ON_ONCE(oldval != 0);
  436. rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
  437. rcu_idle_exit_common(rdtp, oldval);
  438. local_irq_restore(flags);
  439. }
  440. /**
  441. * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
  442. *
  443. * Enter an interrupt handler, which might possibly result in exiting
  444. * idle mode, in other words, entering the mode in which read-side critical
  445. * sections can occur.
  446. *
  447. * Note that the Linux kernel is fully capable of entering an interrupt
  448. * handler that it never exits, for example when doing upcalls to
  449. * user mode! This code assumes that the idle loop never does upcalls to
  450. * user mode. If your architecture does do upcalls from the idle loop (or
  451. * does anything else that results in unbalanced calls to the irq_enter()
  452. * and irq_exit() functions), RCU will give you what you deserve, good
  453. * and hard. But very infrequently and irreproducibly.
  454. *
  455. * Use things like work queues to work around this limitation.
  456. *
  457. * You have been warned.
  458. */
  459. void rcu_irq_enter(void)
  460. {
  461. unsigned long flags;
  462. struct rcu_dynticks *rdtp;
  463. long long oldval;
  464. local_irq_save(flags);
  465. rdtp = &__get_cpu_var(rcu_dynticks);
  466. oldval = rdtp->dynticks_nesting;
  467. rdtp->dynticks_nesting++;
  468. WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
  469. rcu_idle_exit_common(rdtp, oldval);
  470. local_irq_restore(flags);
  471. }
  472. /**
  473. * rcu_nmi_enter - inform RCU of entry to NMI context
  474. *
  475. * If the CPU was idle with dynamic ticks active, and there is no
  476. * irq handler running, this updates rdtp->dynticks_nmi to let the
  477. * RCU grace-period handling know that the CPU is active.
  478. */
  479. void rcu_nmi_enter(void)
  480. {
  481. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  482. if (rdtp->dynticks_nmi_nesting == 0 &&
  483. (atomic_read(&rdtp->dynticks) & 0x1))
  484. return;
  485. rdtp->dynticks_nmi_nesting++;
  486. smp_mb__before_atomic_inc(); /* Force delay from prior write. */
  487. atomic_inc(&rdtp->dynticks);
  488. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  489. smp_mb__after_atomic_inc(); /* See above. */
  490. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  491. }
  492. /**
  493. * rcu_nmi_exit - inform RCU of exit from NMI context
  494. *
  495. * If the CPU was idle with dynamic ticks active, and there is no
  496. * irq handler running, this updates rdtp->dynticks_nmi to let the
  497. * RCU grace-period handling know that the CPU is no longer active.
  498. */
  499. void rcu_nmi_exit(void)
  500. {
  501. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  502. if (rdtp->dynticks_nmi_nesting == 0 ||
  503. --rdtp->dynticks_nmi_nesting != 0)
  504. return;
  505. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  506. smp_mb__before_atomic_inc(); /* See above. */
  507. atomic_inc(&rdtp->dynticks);
  508. smp_mb__after_atomic_inc(); /* Force delay to next write. */
  509. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  510. }
  511. #ifdef CONFIG_PROVE_RCU
  512. /**
  513. * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
  514. *
  515. * If the current CPU is in its idle loop and is neither in an interrupt
  516. * or NMI handler, return true.
  517. */
  518. int rcu_is_cpu_idle(void)
  519. {
  520. int ret;
  521. preempt_disable();
  522. ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
  523. preempt_enable();
  524. return ret;
  525. }
  526. EXPORT_SYMBOL(rcu_is_cpu_idle);
  527. #endif /* #ifdef CONFIG_PROVE_RCU */
  528. /**
  529. * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
  530. *
  531. * If the current CPU is idle or running at a first-level (not nested)
  532. * interrupt from idle, return true. The caller must have at least
  533. * disabled preemption.
  534. */
  535. int rcu_is_cpu_rrupt_from_idle(void)
  536. {
  537. return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
  538. }
  539. #ifdef CONFIG_SMP
  540. /*
  541. * Snapshot the specified CPU's dynticks counter so that we can later
  542. * credit them with an implicit quiescent state. Return 1 if this CPU
  543. * is in dynticks idle mode, which is an extended quiescent state.
  544. */
  545. static int dyntick_save_progress_counter(struct rcu_data *rdp)
  546. {
  547. rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
  548. return 0;
  549. }
  550. /*
  551. * Return true if the specified CPU has passed through a quiescent
  552. * state by virtue of being in or having passed through an dynticks
  553. * idle state since the last call to dyntick_save_progress_counter()
  554. * for this same CPU.
  555. */
  556. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  557. {
  558. unsigned int curr;
  559. unsigned int snap;
  560. curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
  561. snap = (unsigned int)rdp->dynticks_snap;
  562. /*
  563. * If the CPU passed through or entered a dynticks idle phase with
  564. * no active irq/NMI handlers, then we can safely pretend that the CPU
  565. * already acknowledged the request to pass through a quiescent
  566. * state. Either way, that CPU cannot possibly be in an RCU
  567. * read-side critical section that started before the beginning
  568. * of the current RCU grace period.
  569. */
  570. if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
  571. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
  572. rdp->dynticks_fqs++;
  573. return 1;
  574. }
  575. /* Go check for the CPU being offline. */
  576. return rcu_implicit_offline_qs(rdp);
  577. }
  578. #endif /* #ifdef CONFIG_SMP */
  579. int rcu_cpu_stall_suppress __read_mostly;
  580. static void record_gp_stall_check_time(struct rcu_state *rsp)
  581. {
  582. rsp->gp_start = jiffies;
  583. rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
  584. }
  585. static void print_other_cpu_stall(struct rcu_state *rsp)
  586. {
  587. int cpu;
  588. long delta;
  589. unsigned long flags;
  590. int ndetected;
  591. struct rcu_node *rnp = rcu_get_root(rsp);
  592. /* Only let one CPU complain about others per time interval. */
  593. raw_spin_lock_irqsave(&rnp->lock, flags);
  594. delta = jiffies - rsp->jiffies_stall;
  595. if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
  596. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  597. return;
  598. }
  599. rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
  600. /*
  601. * Now rat on any tasks that got kicked up to the root rcu_node
  602. * due to CPU offlining.
  603. */
  604. ndetected = rcu_print_task_stall(rnp);
  605. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  606. /*
  607. * OK, time to rat on our buddy...
  608. * See Documentation/RCU/stallwarn.txt for info on how to debug
  609. * RCU CPU stall warnings.
  610. */
  611. printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
  612. rsp->name);
  613. rcu_for_each_leaf_node(rsp, rnp) {
  614. raw_spin_lock_irqsave(&rnp->lock, flags);
  615. ndetected += rcu_print_task_stall(rnp);
  616. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  617. if (rnp->qsmask == 0)
  618. continue;
  619. for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  620. if (rnp->qsmask & (1UL << cpu)) {
  621. printk(" %d", rnp->grplo + cpu);
  622. ndetected++;
  623. }
  624. }
  625. printk("} (detected by %d, t=%ld jiffies)\n",
  626. smp_processor_id(), (long)(jiffies - rsp->gp_start));
  627. if (ndetected == 0)
  628. printk(KERN_ERR "INFO: Stall ended before state dump start\n");
  629. else if (!trigger_all_cpu_backtrace())
  630. dump_stack();
  631. /* If so configured, complain about tasks blocking the grace period. */
  632. rcu_print_detail_task_stall(rsp);
  633. force_quiescent_state(rsp, 0); /* Kick them all. */
  634. }
  635. static void print_cpu_stall(struct rcu_state *rsp)
  636. {
  637. unsigned long flags;
  638. struct rcu_node *rnp = rcu_get_root(rsp);
  639. /*
  640. * OK, time to rat on ourselves...
  641. * See Documentation/RCU/stallwarn.txt for info on how to debug
  642. * RCU CPU stall warnings.
  643. */
  644. printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
  645. rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
  646. if (!trigger_all_cpu_backtrace())
  647. dump_stack();
  648. raw_spin_lock_irqsave(&rnp->lock, flags);
  649. if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
  650. rsp->jiffies_stall =
  651. jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
  652. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  653. set_need_resched(); /* kick ourselves to get things going. */
  654. }
  655. static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  656. {
  657. unsigned long j;
  658. unsigned long js;
  659. struct rcu_node *rnp;
  660. if (rcu_cpu_stall_suppress)
  661. return;
  662. j = ACCESS_ONCE(jiffies);
  663. js = ACCESS_ONCE(rsp->jiffies_stall);
  664. rnp = rdp->mynode;
  665. if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
  666. /* We haven't checked in, so go dump stack. */
  667. print_cpu_stall(rsp);
  668. } else if (rcu_gp_in_progress(rsp) &&
  669. ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
  670. /* They had a few time units to dump stack, so complain. */
  671. print_other_cpu_stall(rsp);
  672. }
  673. }
  674. static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  675. {
  676. rcu_cpu_stall_suppress = 1;
  677. return NOTIFY_DONE;
  678. }
  679. /**
  680. * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  681. *
  682. * Set the stall-warning timeout way off into the future, thus preventing
  683. * any RCU CPU stall-warning messages from appearing in the current set of
  684. * RCU grace periods.
  685. *
  686. * The caller must disable hard irqs.
  687. */
  688. void rcu_cpu_stall_reset(void)
  689. {
  690. rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  691. rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  692. rcu_preempt_stall_reset();
  693. }
  694. static struct notifier_block rcu_panic_block = {
  695. .notifier_call = rcu_panic,
  696. };
  697. static void __init check_cpu_stall_init(void)
  698. {
  699. atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  700. }
  701. /*
  702. * Update CPU-local rcu_data state to record the newly noticed grace period.
  703. * This is used both when we started the grace period and when we notice
  704. * that someone else started the grace period. The caller must hold the
  705. * ->lock of the leaf rcu_node structure corresponding to the current CPU,
  706. * and must have irqs disabled.
  707. */
  708. static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  709. {
  710. if (rdp->gpnum != rnp->gpnum) {
  711. /*
  712. * If the current grace period is waiting for this CPU,
  713. * set up to detect a quiescent state, otherwise don't
  714. * go looking for one.
  715. */
  716. rdp->gpnum = rnp->gpnum;
  717. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
  718. if (rnp->qsmask & rdp->grpmask) {
  719. rdp->qs_pending = 1;
  720. rdp->passed_quiesce = 0;
  721. } else
  722. rdp->qs_pending = 0;
  723. }
  724. }
  725. static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
  726. {
  727. unsigned long flags;
  728. struct rcu_node *rnp;
  729. local_irq_save(flags);
  730. rnp = rdp->mynode;
  731. if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
  732. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  733. local_irq_restore(flags);
  734. return;
  735. }
  736. __note_new_gpnum(rsp, rnp, rdp);
  737. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  738. }
  739. /*
  740. * Did someone else start a new RCU grace period start since we last
  741. * checked? Update local state appropriately if so. Must be called
  742. * on the CPU corresponding to rdp.
  743. */
  744. static int
  745. check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
  746. {
  747. unsigned long flags;
  748. int ret = 0;
  749. local_irq_save(flags);
  750. if (rdp->gpnum != rsp->gpnum) {
  751. note_new_gpnum(rsp, rdp);
  752. ret = 1;
  753. }
  754. local_irq_restore(flags);
  755. return ret;
  756. }
  757. /*
  758. * Advance this CPU's callbacks, but only if the current grace period
  759. * has ended. This may be called only from the CPU to whom the rdp
  760. * belongs. In addition, the corresponding leaf rcu_node structure's
  761. * ->lock must be held by the caller, with irqs disabled.
  762. */
  763. static void
  764. __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  765. {
  766. /* Did another grace period end? */
  767. if (rdp->completed != rnp->completed) {
  768. /* Advance callbacks. No harm if list empty. */
  769. rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
  770. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
  771. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  772. /* Remember that we saw this grace-period completion. */
  773. rdp->completed = rnp->completed;
  774. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
  775. /*
  776. * If we were in an extended quiescent state, we may have
  777. * missed some grace periods that others CPUs handled on
  778. * our behalf. Catch up with this state to avoid noting
  779. * spurious new grace periods. If another grace period
  780. * has started, then rnp->gpnum will have advanced, so
  781. * we will detect this later on.
  782. */
  783. if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
  784. rdp->gpnum = rdp->completed;
  785. /*
  786. * If RCU does not need a quiescent state from this CPU,
  787. * then make sure that this CPU doesn't go looking for one.
  788. */
  789. if ((rnp->qsmask & rdp->grpmask) == 0)
  790. rdp->qs_pending = 0;
  791. }
  792. }
  793. /*
  794. * Advance this CPU's callbacks, but only if the current grace period
  795. * has ended. This may be called only from the CPU to whom the rdp
  796. * belongs.
  797. */
  798. static void
  799. rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
  800. {
  801. unsigned long flags;
  802. struct rcu_node *rnp;
  803. local_irq_save(flags);
  804. rnp = rdp->mynode;
  805. if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
  806. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  807. local_irq_restore(flags);
  808. return;
  809. }
  810. __rcu_process_gp_end(rsp, rnp, rdp);
  811. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  812. }
  813. /*
  814. * Do per-CPU grace-period initialization for running CPU. The caller
  815. * must hold the lock of the leaf rcu_node structure corresponding to
  816. * this CPU.
  817. */
  818. static void
  819. rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  820. {
  821. /* Prior grace period ended, so advance callbacks for current CPU. */
  822. __rcu_process_gp_end(rsp, rnp, rdp);
  823. /*
  824. * Because this CPU just now started the new grace period, we know
  825. * that all of its callbacks will be covered by this upcoming grace
  826. * period, even the ones that were registered arbitrarily recently.
  827. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
  828. *
  829. * Other CPUs cannot be sure exactly when the grace period started.
  830. * Therefore, their recently registered callbacks must pass through
  831. * an additional RCU_NEXT_READY stage, so that they will be handled
  832. * by the next RCU grace period.
  833. */
  834. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  835. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  836. /* Set state so that this CPU will detect the next quiescent state. */
  837. __note_new_gpnum(rsp, rnp, rdp);
  838. }
  839. /*
  840. * Start a new RCU grace period if warranted, re-initializing the hierarchy
  841. * in preparation for detecting the next grace period. The caller must hold
  842. * the root node's ->lock, which is released before return. Hard irqs must
  843. * be disabled.
  844. */
  845. static void
  846. rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
  847. __releases(rcu_get_root(rsp)->lock)
  848. {
  849. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  850. struct rcu_node *rnp = rcu_get_root(rsp);
  851. if (!rcu_scheduler_fully_active ||
  852. !cpu_needs_another_gp(rsp, rdp)) {
  853. /*
  854. * Either the scheduler hasn't yet spawned the first
  855. * non-idle task or this CPU does not need another
  856. * grace period. Either way, don't start a new grace
  857. * period.
  858. */
  859. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  860. return;
  861. }
  862. if (rsp->fqs_active) {
  863. /*
  864. * This CPU needs a grace period, but force_quiescent_state()
  865. * is running. Tell it to start one on this CPU's behalf.
  866. */
  867. rsp->fqs_need_gp = 1;
  868. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  869. return;
  870. }
  871. /* Advance to a new grace period and initialize state. */
  872. rsp->gpnum++;
  873. trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
  874. WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT);
  875. rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */
  876. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  877. record_gp_stall_check_time(rsp);
  878. /* Special-case the common single-level case. */
  879. if (NUM_RCU_NODES == 1) {
  880. rcu_preempt_check_blocked_tasks(rnp);
  881. rnp->qsmask = rnp->qsmaskinit;
  882. rnp->gpnum = rsp->gpnum;
  883. rnp->completed = rsp->completed;
  884. rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */
  885. rcu_start_gp_per_cpu(rsp, rnp, rdp);
  886. rcu_preempt_boost_start_gp(rnp);
  887. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  888. rnp->level, rnp->grplo,
  889. rnp->grphi, rnp->qsmask);
  890. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  891. return;
  892. }
  893. raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
  894. /* Exclude any concurrent CPU-hotplug operations. */
  895. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  896. /*
  897. * Set the quiescent-state-needed bits in all the rcu_node
  898. * structures for all currently online CPUs in breadth-first
  899. * order, starting from the root rcu_node structure. This
  900. * operation relies on the layout of the hierarchy within the
  901. * rsp->node[] array. Note that other CPUs will access only
  902. * the leaves of the hierarchy, which still indicate that no
  903. * grace period is in progress, at least until the corresponding
  904. * leaf node has been initialized. In addition, we have excluded
  905. * CPU-hotplug operations.
  906. *
  907. * Note that the grace period cannot complete until we finish
  908. * the initialization process, as there will be at least one
  909. * qsmask bit set in the root node until that time, namely the
  910. * one corresponding to this CPU, due to the fact that we have
  911. * irqs disabled.
  912. */
  913. rcu_for_each_node_breadth_first(rsp, rnp) {
  914. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  915. rcu_preempt_check_blocked_tasks(rnp);
  916. rnp->qsmask = rnp->qsmaskinit;
  917. rnp->gpnum = rsp->gpnum;
  918. rnp->completed = rsp->completed;
  919. if (rnp == rdp->mynode)
  920. rcu_start_gp_per_cpu(rsp, rnp, rdp);
  921. rcu_preempt_boost_start_gp(rnp);
  922. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  923. rnp->level, rnp->grplo,
  924. rnp->grphi, rnp->qsmask);
  925. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  926. }
  927. rnp = rcu_get_root(rsp);
  928. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  929. rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
  930. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  931. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  932. }
  933. /*
  934. * Report a full set of quiescent states to the specified rcu_state
  935. * data structure. This involves cleaning up after the prior grace
  936. * period and letting rcu_start_gp() start up the next grace period
  937. * if one is needed. Note that the caller must hold rnp->lock, as
  938. * required by rcu_start_gp(), which will release it.
  939. */
  940. static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  941. __releases(rcu_get_root(rsp)->lock)
  942. {
  943. unsigned long gp_duration;
  944. struct rcu_node *rnp = rcu_get_root(rsp);
  945. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  946. WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
  947. /*
  948. * Ensure that all grace-period and pre-grace-period activity
  949. * is seen before the assignment to rsp->completed.
  950. */
  951. smp_mb(); /* See above block comment. */
  952. gp_duration = jiffies - rsp->gp_start;
  953. if (gp_duration > rsp->gp_max)
  954. rsp->gp_max = gp_duration;
  955. /*
  956. * We know the grace period is complete, but to everyone else
  957. * it appears to still be ongoing. But it is also the case
  958. * that to everyone else it looks like there is nothing that
  959. * they can do to advance the grace period. It is therefore
  960. * safe for us to drop the lock in order to mark the grace
  961. * period as completed in all of the rcu_node structures.
  962. *
  963. * But if this CPU needs another grace period, it will take
  964. * care of this while initializing the next grace period.
  965. * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
  966. * because the callbacks have not yet been advanced: Those
  967. * callbacks are waiting on the grace period that just now
  968. * completed.
  969. */
  970. if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
  971. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  972. /*
  973. * Propagate new ->completed value to rcu_node structures
  974. * so that other CPUs don't have to wait until the start
  975. * of the next grace period to process their callbacks.
  976. */
  977. rcu_for_each_node_breadth_first(rsp, rnp) {
  978. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  979. rnp->completed = rsp->gpnum;
  980. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  981. }
  982. rnp = rcu_get_root(rsp);
  983. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  984. }
  985. rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
  986. trace_rcu_grace_period(rsp->name, rsp->completed, "end");
  987. rsp->fqs_state = RCU_GP_IDLE;
  988. rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
  989. }
  990. /*
  991. * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  992. * Allows quiescent states for a group of CPUs to be reported at one go
  993. * to the specified rcu_node structure, though all the CPUs in the group
  994. * must be represented by the same rcu_node structure (which need not be
  995. * a leaf rcu_node structure, though it often will be). That structure's
  996. * lock must be held upon entry, and it is released before return.
  997. */
  998. static void
  999. rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  1000. struct rcu_node *rnp, unsigned long flags)
  1001. __releases(rnp->lock)
  1002. {
  1003. struct rcu_node *rnp_c;
  1004. /* Walk up the rcu_node hierarchy. */
  1005. for (;;) {
  1006. if (!(rnp->qsmask & mask)) {
  1007. /* Our bit has already been cleared, so done. */
  1008. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1009. return;
  1010. }
  1011. rnp->qsmask &= ~mask;
  1012. trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  1013. mask, rnp->qsmask, rnp->level,
  1014. rnp->grplo, rnp->grphi,
  1015. !!rnp->gp_tasks);
  1016. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  1017. /* Other bits still set at this level, so done. */
  1018. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1019. return;
  1020. }
  1021. mask = rnp->grpmask;
  1022. if (rnp->parent == NULL) {
  1023. /* No more levels. Exit loop holding root lock. */
  1024. break;
  1025. }
  1026. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1027. rnp_c = rnp;
  1028. rnp = rnp->parent;
  1029. raw_spin_lock_irqsave(&rnp->lock, flags);
  1030. WARN_ON_ONCE(rnp_c->qsmask);
  1031. }
  1032. /*
  1033. * Get here if we are the last CPU to pass through a quiescent
  1034. * state for this grace period. Invoke rcu_report_qs_rsp()
  1035. * to clean up and start the next grace period if one is needed.
  1036. */
  1037. rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
  1038. }
  1039. /*
  1040. * Record a quiescent state for the specified CPU to that CPU's rcu_data
  1041. * structure. This must be either called from the specified CPU, or
  1042. * called when the specified CPU is known to be offline (and when it is
  1043. * also known that no other CPU is concurrently trying to help the offline
  1044. * CPU). The lastcomp argument is used to make sure we are still in the
  1045. * grace period of interest. We don't want to end the current grace period
  1046. * based on quiescent states detected in an earlier grace period!
  1047. */
  1048. static void
  1049. rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
  1050. {
  1051. unsigned long flags;
  1052. unsigned long mask;
  1053. struct rcu_node *rnp;
  1054. rnp = rdp->mynode;
  1055. raw_spin_lock_irqsave(&rnp->lock, flags);
  1056. if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
  1057. /*
  1058. * The grace period in which this quiescent state was
  1059. * recorded has ended, so don't report it upwards.
  1060. * We will instead need a new quiescent state that lies
  1061. * within the current grace period.
  1062. */
  1063. rdp->passed_quiesce = 0; /* need qs for new gp. */
  1064. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1065. return;
  1066. }
  1067. mask = rdp->grpmask;
  1068. if ((rnp->qsmask & mask) == 0) {
  1069. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1070. } else {
  1071. rdp->qs_pending = 0;
  1072. /*
  1073. * This GP can't end until cpu checks in, so all of our
  1074. * callbacks can be processed during the next GP.
  1075. */
  1076. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  1077. rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
  1078. }
  1079. }
  1080. /*
  1081. * Check to see if there is a new grace period of which this CPU
  1082. * is not yet aware, and if so, set up local rcu_data state for it.
  1083. * Otherwise, see if this CPU has just passed through its first
  1084. * quiescent state for this grace period, and record that fact if so.
  1085. */
  1086. static void
  1087. rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  1088. {
  1089. /* If there is now a new grace period, record and return. */
  1090. if (check_for_new_grace_period(rsp, rdp))
  1091. return;
  1092. /*
  1093. * Does this CPU still need to do its part for current grace period?
  1094. * If no, return and let the other CPUs do their part as well.
  1095. */
  1096. if (!rdp->qs_pending)
  1097. return;
  1098. /*
  1099. * Was there a quiescent state since the beginning of the grace
  1100. * period? If no, then exit and wait for the next call.
  1101. */
  1102. if (!rdp->passed_quiesce)
  1103. return;
  1104. /*
  1105. * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  1106. * judge of that).
  1107. */
  1108. rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
  1109. }
  1110. #ifdef CONFIG_HOTPLUG_CPU
  1111. /*
  1112. * Move a dying CPU's RCU callbacks to online CPU's callback list.
  1113. * Synchronization is not required because this function executes
  1114. * in stop_machine() context.
  1115. */
  1116. static void rcu_send_cbs_to_online(struct rcu_state *rsp)
  1117. {
  1118. int i;
  1119. /* current DYING CPU is cleared in the cpu_online_mask */
  1120. int receive_cpu = cpumask_any(cpu_online_mask);
  1121. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1122. struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
  1123. if (rdp->nxtlist == NULL)
  1124. return; /* irqs disabled, so comparison is stable. */
  1125. *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
  1126. receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  1127. receive_rdp->qlen += rdp->qlen;
  1128. receive_rdp->n_cbs_adopted += rdp->qlen;
  1129. rdp->n_cbs_orphaned += rdp->qlen;
  1130. rdp->nxtlist = NULL;
  1131. for (i = 0; i < RCU_NEXT_SIZE; i++)
  1132. rdp->nxttail[i] = &rdp->nxtlist;
  1133. rdp->qlen = 0;
  1134. }
  1135. /*
  1136. * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
  1137. * and move all callbacks from the outgoing CPU to the current one.
  1138. * There can only be one CPU hotplug operation at a time, so no other
  1139. * CPU can be attempting to update rcu_cpu_kthread_task.
  1140. */
  1141. static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
  1142. {
  1143. unsigned long flags;
  1144. unsigned long mask;
  1145. int need_report = 0;
  1146. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1147. struct rcu_node *rnp;
  1148. rcu_stop_cpu_kthread(cpu);
  1149. /* Exclude any attempts to start a new grace period. */
  1150. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  1151. /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
  1152. rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
  1153. mask = rdp->grpmask; /* rnp->grplo is constant. */
  1154. do {
  1155. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1156. rnp->qsmaskinit &= ~mask;
  1157. if (rnp->qsmaskinit != 0) {
  1158. if (rnp != rdp->mynode)
  1159. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1160. else
  1161. trace_rcu_grace_period(rsp->name,
  1162. rnp->gpnum + 1 -
  1163. !!(rnp->qsmask & mask),
  1164. "cpuofl");
  1165. break;
  1166. }
  1167. if (rnp == rdp->mynode) {
  1168. trace_rcu_grace_period(rsp->name,
  1169. rnp->gpnum + 1 -
  1170. !!(rnp->qsmask & mask),
  1171. "cpuofl");
  1172. need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
  1173. } else
  1174. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1175. mask = rnp->grpmask;
  1176. rnp = rnp->parent;
  1177. } while (rnp != NULL);
  1178. /*
  1179. * We still hold the leaf rcu_node structure lock here, and
  1180. * irqs are still disabled. The reason for this subterfuge is
  1181. * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
  1182. * held leads to deadlock.
  1183. */
  1184. raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
  1185. rnp = rdp->mynode;
  1186. if (need_report & RCU_OFL_TASKS_NORM_GP)
  1187. rcu_report_unblock_qs_rnp(rnp, flags);
  1188. else
  1189. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1190. if (need_report & RCU_OFL_TASKS_EXP_GP)
  1191. rcu_report_exp_rnp(rsp, rnp, true);
  1192. rcu_node_kthread_setaffinity(rnp, -1);
  1193. }
  1194. /*
  1195. * Remove the specified CPU from the RCU hierarchy and move any pending
  1196. * callbacks that it might have to the current CPU. This code assumes
  1197. * that at least one CPU in the system will remain running at all times.
  1198. * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
  1199. */
  1200. static void rcu_offline_cpu(int cpu)
  1201. {
  1202. __rcu_offline_cpu(cpu, &rcu_sched_state);
  1203. __rcu_offline_cpu(cpu, &rcu_bh_state);
  1204. rcu_preempt_offline_cpu(cpu);
  1205. }
  1206. #else /* #ifdef CONFIG_HOTPLUG_CPU */
  1207. static void rcu_send_cbs_to_online(struct rcu_state *rsp)
  1208. {
  1209. }
  1210. static void rcu_offline_cpu(int cpu)
  1211. {
  1212. }
  1213. #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  1214. /*
  1215. * Invoke any RCU callbacks that have made it to the end of their grace
  1216. * period. Thottle as specified by rdp->blimit.
  1217. */
  1218. static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  1219. {
  1220. unsigned long flags;
  1221. struct rcu_head *next, *list, **tail;
  1222. int bl, count;
  1223. /* If no callbacks are ready, just return.*/
  1224. if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
  1225. trace_rcu_batch_start(rsp->name, 0, 0);
  1226. trace_rcu_batch_end(rsp->name, 0);
  1227. return;
  1228. }
  1229. /*
  1230. * Extract the list of ready callbacks, disabling to prevent
  1231. * races with call_rcu() from interrupt handlers.
  1232. */
  1233. local_irq_save(flags);
  1234. bl = rdp->blimit;
  1235. trace_rcu_batch_start(rsp->name, rdp->qlen, bl);
  1236. list = rdp->nxtlist;
  1237. rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  1238. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  1239. tail = rdp->nxttail[RCU_DONE_TAIL];
  1240. for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
  1241. if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
  1242. rdp->nxttail[count] = &rdp->nxtlist;
  1243. local_irq_restore(flags);
  1244. /* Invoke callbacks. */
  1245. count = 0;
  1246. while (list) {
  1247. next = list->next;
  1248. prefetch(next);
  1249. debug_rcu_head_unqueue(list);
  1250. __rcu_reclaim(rsp->name, list);
  1251. list = next;
  1252. if (++count >= bl)
  1253. break;
  1254. }
  1255. local_irq_save(flags);
  1256. trace_rcu_batch_end(rsp->name, count);
  1257. /* Update count, and requeue any remaining callbacks. */
  1258. rdp->qlen -= count;
  1259. rdp->n_cbs_invoked += count;
  1260. if (list != NULL) {
  1261. *tail = rdp->nxtlist;
  1262. rdp->nxtlist = list;
  1263. for (count = 0; count < RCU_NEXT_SIZE; count++)
  1264. if (&rdp->nxtlist == rdp->nxttail[count])
  1265. rdp->nxttail[count] = tail;
  1266. else
  1267. break;
  1268. }
  1269. /* Reinstate batch limit if we have worked down the excess. */
  1270. if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  1271. rdp->blimit = blimit;
  1272. /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  1273. if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  1274. rdp->qlen_last_fqs_check = 0;
  1275. rdp->n_force_qs_snap = rsp->n_force_qs;
  1276. } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  1277. rdp->qlen_last_fqs_check = rdp->qlen;
  1278. local_irq_restore(flags);
  1279. /* Re-invoke RCU core processing if there are callbacks remaining. */
  1280. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1281. invoke_rcu_core();
  1282. }
  1283. /*
  1284. * Check to see if this CPU is in a non-context-switch quiescent state
  1285. * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  1286. * Also schedule RCU core processing.
  1287. *
  1288. * This function must be called from hardirq context. It is normally
  1289. * invoked from the scheduling-clock interrupt. If rcu_pending returns
  1290. * false, there is no point in invoking rcu_check_callbacks().
  1291. */
  1292. void rcu_check_callbacks(int cpu, int user)
  1293. {
  1294. trace_rcu_utilization("Start scheduler-tick");
  1295. if (user || rcu_is_cpu_rrupt_from_idle()) {
  1296. /*
  1297. * Get here if this CPU took its interrupt from user
  1298. * mode or from the idle loop, and if this is not a
  1299. * nested interrupt. In this case, the CPU is in
  1300. * a quiescent state, so note it.
  1301. *
  1302. * No memory barrier is required here because both
  1303. * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  1304. * variables that other CPUs neither access nor modify,
  1305. * at least not while the corresponding CPU is online.
  1306. */
  1307. rcu_sched_qs(cpu);
  1308. rcu_bh_qs(cpu);
  1309. } else if (!in_softirq()) {
  1310. /*
  1311. * Get here if this CPU did not take its interrupt from
  1312. * softirq, in other words, if it is not interrupting
  1313. * a rcu_bh read-side critical section. This is an _bh
  1314. * critical section, so note it.
  1315. */
  1316. rcu_bh_qs(cpu);
  1317. }
  1318. rcu_preempt_check_callbacks(cpu);
  1319. if (rcu_pending(cpu))
  1320. invoke_rcu_core();
  1321. trace_rcu_utilization("End scheduler-tick");
  1322. }
  1323. #ifdef CONFIG_SMP
  1324. /*
  1325. * Scan the leaf rcu_node structures, processing dyntick state for any that
  1326. * have not yet encountered a quiescent state, using the function specified.
  1327. * Also initiate boosting for any threads blocked on the root rcu_node.
  1328. *
  1329. * The caller must have suppressed start of new grace periods.
  1330. */
  1331. static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
  1332. {
  1333. unsigned long bit;
  1334. int cpu;
  1335. unsigned long flags;
  1336. unsigned long mask;
  1337. struct rcu_node *rnp;
  1338. rcu_for_each_leaf_node(rsp, rnp) {
  1339. mask = 0;
  1340. raw_spin_lock_irqsave(&rnp->lock, flags);
  1341. if (!rcu_gp_in_progress(rsp)) {
  1342. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1343. return;
  1344. }
  1345. if (rnp->qsmask == 0) {
  1346. rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
  1347. continue;
  1348. }
  1349. cpu = rnp->grplo;
  1350. bit = 1;
  1351. for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
  1352. if ((rnp->qsmask & bit) != 0 &&
  1353. f(per_cpu_ptr(rsp->rda, cpu)))
  1354. mask |= bit;
  1355. }
  1356. if (mask != 0) {
  1357. /* rcu_report_qs_rnp() releases rnp->lock. */
  1358. rcu_report_qs_rnp(mask, rsp, rnp, flags);
  1359. continue;
  1360. }
  1361. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1362. }
  1363. rnp = rcu_get_root(rsp);
  1364. if (rnp->qsmask == 0) {
  1365. raw_spin_lock_irqsave(&rnp->lock, flags);
  1366. rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
  1367. }
  1368. }
  1369. /*
  1370. * Force quiescent states on reluctant CPUs, and also detect which
  1371. * CPUs are in dyntick-idle mode.
  1372. */
  1373. static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  1374. {
  1375. unsigned long flags;
  1376. struct rcu_node *rnp = rcu_get_root(rsp);
  1377. trace_rcu_utilization("Start fqs");
  1378. if (!rcu_gp_in_progress(rsp)) {
  1379. trace_rcu_utilization("End fqs");
  1380. return; /* No grace period in progress, nothing to force. */
  1381. }
  1382. if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
  1383. rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
  1384. trace_rcu_utilization("End fqs");
  1385. return; /* Someone else is already on the job. */
  1386. }
  1387. if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
  1388. goto unlock_fqs_ret; /* no emergency and done recently. */
  1389. rsp->n_force_qs++;
  1390. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1391. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  1392. if(!rcu_gp_in_progress(rsp)) {
  1393. rsp->n_force_qs_ngp++;
  1394. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1395. goto unlock_fqs_ret; /* no GP in progress, time updated. */
  1396. }
  1397. rsp->fqs_active = 1;
  1398. switch (rsp->fqs_state) {
  1399. case RCU_GP_IDLE:
  1400. case RCU_GP_INIT:
  1401. break; /* grace period idle or initializing, ignore. */
  1402. case RCU_SAVE_DYNTICK:
  1403. if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
  1404. break; /* So gcc recognizes the dead code. */
  1405. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1406. /* Record dyntick-idle state. */
  1407. force_qs_rnp(rsp, dyntick_save_progress_counter);
  1408. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1409. if (rcu_gp_in_progress(rsp))
  1410. rsp->fqs_state = RCU_FORCE_QS;
  1411. break;
  1412. case RCU_FORCE_QS:
  1413. /* Check dyntick-idle state, send IPI to laggarts. */
  1414. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1415. force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
  1416. /* Leave state in case more forcing is required. */
  1417. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1418. break;
  1419. }
  1420. rsp->fqs_active = 0;
  1421. if (rsp->fqs_need_gp) {
  1422. raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
  1423. rsp->fqs_need_gp = 0;
  1424. rcu_start_gp(rsp, flags); /* releases rnp->lock */
  1425. trace_rcu_utilization("End fqs");
  1426. return;
  1427. }
  1428. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1429. unlock_fqs_ret:
  1430. raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
  1431. trace_rcu_utilization("End fqs");
  1432. }
  1433. #else /* #ifdef CONFIG_SMP */
  1434. static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  1435. {
  1436. set_need_resched();
  1437. }
  1438. #endif /* #else #ifdef CONFIG_SMP */
  1439. /*
  1440. * This does the RCU core processing work for the specified rcu_state
  1441. * and rcu_data structures. This may be called only from the CPU to
  1442. * whom the rdp belongs.
  1443. */
  1444. static void
  1445. __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1446. {
  1447. unsigned long flags;
  1448. WARN_ON_ONCE(rdp->beenonline == 0);
  1449. /*
  1450. * If an RCU GP has gone long enough, go check for dyntick
  1451. * idle CPUs and, if needed, send resched IPIs.
  1452. */
  1453. if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1454. force_quiescent_state(rsp, 1);
  1455. /*
  1456. * Advance callbacks in response to end of earlier grace
  1457. * period that some other CPU ended.
  1458. */
  1459. rcu_process_gp_end(rsp, rdp);
  1460. /* Update RCU state based on any recent quiescent states. */
  1461. rcu_check_quiescent_state(rsp, rdp);
  1462. /* Does this CPU require a not-yet-started grace period? */
  1463. if (cpu_needs_another_gp(rsp, rdp)) {
  1464. raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
  1465. rcu_start_gp(rsp, flags); /* releases above lock */
  1466. }
  1467. /* If there are callbacks ready, invoke them. */
  1468. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1469. invoke_rcu_callbacks(rsp, rdp);
  1470. }
  1471. /*
  1472. * Do RCU core processing for the current CPU.
  1473. */
  1474. static void rcu_process_callbacks(struct softirq_action *unused)
  1475. {
  1476. trace_rcu_utilization("Start RCU core");
  1477. __rcu_process_callbacks(&rcu_sched_state,
  1478. &__get_cpu_var(rcu_sched_data));
  1479. __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
  1480. rcu_preempt_process_callbacks();
  1481. trace_rcu_utilization("End RCU core");
  1482. }
  1483. /*
  1484. * Schedule RCU callback invocation. If the specified type of RCU
  1485. * does not support RCU priority boosting, just do a direct call,
  1486. * otherwise wake up the per-CPU kernel kthread. Note that because we
  1487. * are running on the current CPU with interrupts disabled, the
  1488. * rcu_cpu_kthread_task cannot disappear out from under us.
  1489. */
  1490. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1491. {
  1492. if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
  1493. return;
  1494. if (likely(!rsp->boost)) {
  1495. rcu_do_batch(rsp, rdp);
  1496. return;
  1497. }
  1498. invoke_rcu_callbacks_kthread();
  1499. }
  1500. static void invoke_rcu_core(void)
  1501. {
  1502. raise_softirq(RCU_SOFTIRQ);
  1503. }
  1504. static void
  1505. __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
  1506. struct rcu_state *rsp)
  1507. {
  1508. unsigned long flags;
  1509. struct rcu_data *rdp;
  1510. debug_rcu_head_queue(head);
  1511. head->func = func;
  1512. head->next = NULL;
  1513. smp_mb(); /* Ensure RCU update seen before callback registry. */
  1514. /*
  1515. * Opportunistically note grace-period endings and beginnings.
  1516. * Note that we might see a beginning right after we see an
  1517. * end, but never vice versa, since this CPU has to pass through
  1518. * a quiescent state betweentimes.
  1519. */
  1520. local_irq_save(flags);
  1521. rdp = this_cpu_ptr(rsp->rda);
  1522. /* Add the callback to our list. */
  1523. *rdp->nxttail[RCU_NEXT_TAIL] = head;
  1524. rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
  1525. rdp->qlen++;
  1526. if (__is_kfree_rcu_offset((unsigned long)func))
  1527. trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
  1528. rdp->qlen);
  1529. else
  1530. trace_rcu_callback(rsp->name, head, rdp->qlen);
  1531. /* If interrupts were disabled, don't dive into RCU core. */
  1532. if (irqs_disabled_flags(flags)) {
  1533. local_irq_restore(flags);
  1534. return;
  1535. }
  1536. /*
  1537. * Force the grace period if too many callbacks or too long waiting.
  1538. * Enforce hysteresis, and don't invoke force_quiescent_state()
  1539. * if some other CPU has recently done so. Also, don't bother
  1540. * invoking force_quiescent_state() if the newly enqueued callback
  1541. * is the only one waiting for a grace period to complete.
  1542. */
  1543. if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
  1544. /* Are we ignoring a completed grace period? */
  1545. rcu_process_gp_end(rsp, rdp);
  1546. check_for_new_grace_period(rsp, rdp);
  1547. /* Start a new grace period if one not already started. */
  1548. if (!rcu_gp_in_progress(rsp)) {
  1549. unsigned long nestflag;
  1550. struct rcu_node *rnp_root = rcu_get_root(rsp);
  1551. raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
  1552. rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
  1553. } else {
  1554. /* Give the grace period a kick. */
  1555. rdp->blimit = LONG_MAX;
  1556. if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  1557. *rdp->nxttail[RCU_DONE_TAIL] != head)
  1558. force_quiescent_state(rsp, 0);
  1559. rdp->n_force_qs_snap = rsp->n_force_qs;
  1560. rdp->qlen_last_fqs_check = rdp->qlen;
  1561. }
  1562. } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1563. force_quiescent_state(rsp, 1);
  1564. local_irq_restore(flags);
  1565. }
  1566. /*
  1567. * Queue an RCU-sched callback for invocation after a grace period.
  1568. */
  1569. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1570. {
  1571. __call_rcu(head, func, &rcu_sched_state);
  1572. }
  1573. EXPORT_SYMBOL_GPL(call_rcu_sched);
  1574. /*
  1575. * Queue an RCU for invocation after a quicker grace period.
  1576. */
  1577. void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1578. {
  1579. __call_rcu(head, func, &rcu_bh_state);
  1580. }
  1581. EXPORT_SYMBOL_GPL(call_rcu_bh);
  1582. /**
  1583. * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  1584. *
  1585. * Control will return to the caller some time after a full rcu-sched
  1586. * grace period has elapsed, in other words after all currently executing
  1587. * rcu-sched read-side critical sections have completed. These read-side
  1588. * critical sections are delimited by rcu_read_lock_sched() and
  1589. * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  1590. * local_irq_disable(), and so on may be used in place of
  1591. * rcu_read_lock_sched().
  1592. *
  1593. * This means that all preempt_disable code sequences, including NMI and
  1594. * hardware-interrupt handlers, in progress on entry will have completed
  1595. * before this primitive returns. However, this does not guarantee that
  1596. * softirq handlers will have completed, since in some kernels, these
  1597. * handlers can run in process context, and can block.
  1598. *
  1599. * This primitive provides the guarantees made by the (now removed)
  1600. * synchronize_kernel() API. In contrast, synchronize_rcu() only
  1601. * guarantees that rcu_read_lock() sections will have completed.
  1602. * In "classic RCU", these two guarantees happen to be one and
  1603. * the same, but can differ in realtime RCU implementations.
  1604. */
  1605. void synchronize_sched(void)
  1606. {
  1607. if (rcu_blocking_is_gp())
  1608. return;
  1609. wait_rcu_gp(call_rcu_sched);
  1610. }
  1611. EXPORT_SYMBOL_GPL(synchronize_sched);
  1612. /**
  1613. * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  1614. *
  1615. * Control will return to the caller some time after a full rcu_bh grace
  1616. * period has elapsed, in other words after all currently executing rcu_bh
  1617. * read-side critical sections have completed. RCU read-side critical
  1618. * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  1619. * and may be nested.
  1620. */
  1621. void synchronize_rcu_bh(void)
  1622. {
  1623. if (rcu_blocking_is_gp())
  1624. return;
  1625. wait_rcu_gp(call_rcu_bh);
  1626. }
  1627. EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  1628. /*
  1629. * Check to see if there is any immediate RCU-related work to be done
  1630. * by the current CPU, for the specified type of RCU, returning 1 if so.
  1631. * The checks are in order of increasing expense: checks that can be
  1632. * carried out against CPU-local state are performed first. However,
  1633. * we must check for CPU stalls first, else we might not get a chance.
  1634. */
  1635. static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  1636. {
  1637. struct rcu_node *rnp = rdp->mynode;
  1638. rdp->n_rcu_pending++;
  1639. /* Check for CPU stalls, if enabled. */
  1640. check_cpu_stall(rsp, rdp);
  1641. /* Is the RCU core waiting for a quiescent state from this CPU? */
  1642. if (rcu_scheduler_fully_active &&
  1643. rdp->qs_pending && !rdp->passed_quiesce) {
  1644. /*
  1645. * If force_quiescent_state() coming soon and this CPU
  1646. * needs a quiescent state, and this is either RCU-sched
  1647. * or RCU-bh, force a local reschedule.
  1648. */
  1649. rdp->n_rp_qs_pending++;
  1650. if (!rdp->preemptible &&
  1651. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
  1652. jiffies))
  1653. set_need_resched();
  1654. } else if (rdp->qs_pending && rdp->passed_quiesce) {
  1655. rdp->n_rp_report_qs++;
  1656. return 1;
  1657. }
  1658. /* Does this CPU have callbacks ready to invoke? */
  1659. if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  1660. rdp->n_rp_cb_ready++;
  1661. return 1;
  1662. }
  1663. /* Has RCU gone idle with this CPU needing another grace period? */
  1664. if (cpu_needs_another_gp(rsp, rdp)) {
  1665. rdp->n_rp_cpu_needs_gp++;
  1666. return 1;
  1667. }
  1668. /* Has another RCU grace period completed? */
  1669. if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
  1670. rdp->n_rp_gp_completed++;
  1671. return 1;
  1672. }
  1673. /* Has a new RCU grace period started? */
  1674. if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
  1675. rdp->n_rp_gp_started++;
  1676. return 1;
  1677. }
  1678. /* Has an RCU GP gone long enough to send resched IPIs &c? */
  1679. if (rcu_gp_in_progress(rsp) &&
  1680. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
  1681. rdp->n_rp_need_fqs++;
  1682. return 1;
  1683. }
  1684. /* nothing to do */
  1685. rdp->n_rp_need_nothing++;
  1686. return 0;
  1687. }
  1688. /*
  1689. * Check to see if there is any immediate RCU-related work to be done
  1690. * by the current CPU, returning 1 if so. This function is part of the
  1691. * RCU implementation; it is -not- an exported member of the RCU API.
  1692. */
  1693. static int rcu_pending(int cpu)
  1694. {
  1695. return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
  1696. __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
  1697. rcu_preempt_pending(cpu);
  1698. }
  1699. /*
  1700. * Check to see if any future RCU-related work will need to be done
  1701. * by the current CPU, even if none need be done immediately, returning
  1702. * 1 if so.
  1703. */
  1704. static int rcu_cpu_has_callbacks(int cpu)
  1705. {
  1706. /* RCU callbacks either ready or pending? */
  1707. return per_cpu(rcu_sched_data, cpu).nxtlist ||
  1708. per_cpu(rcu_bh_data, cpu).nxtlist ||
  1709. rcu_preempt_needs_cpu(cpu);
  1710. }
  1711. static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  1712. static atomic_t rcu_barrier_cpu_count;
  1713. static DEFINE_MUTEX(rcu_barrier_mutex);
  1714. static struct completion rcu_barrier_completion;
  1715. static void rcu_barrier_callback(struct rcu_head *notused)
  1716. {
  1717. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  1718. complete(&rcu_barrier_completion);
  1719. }
  1720. /*
  1721. * Called with preemption disabled, and from cross-cpu IRQ context.
  1722. */
  1723. static void rcu_barrier_func(void *type)
  1724. {
  1725. int cpu = smp_processor_id();
  1726. struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
  1727. void (*call_rcu_func)(struct rcu_head *head,
  1728. void (*func)(struct rcu_head *head));
  1729. atomic_inc(&rcu_barrier_cpu_count);
  1730. call_rcu_func = type;
  1731. call_rcu_func(head, rcu_barrier_callback);
  1732. }
  1733. /*
  1734. * Orchestrate the specified type of RCU barrier, waiting for all
  1735. * RCU callbacks of the specified type to complete.
  1736. */
  1737. static void _rcu_barrier(struct rcu_state *rsp,
  1738. void (*call_rcu_func)(struct rcu_head *head,
  1739. void (*func)(struct rcu_head *head)))
  1740. {
  1741. BUG_ON(in_interrupt());
  1742. /* Take mutex to serialize concurrent rcu_barrier() requests. */
  1743. mutex_lock(&rcu_barrier_mutex);
  1744. init_completion(&rcu_barrier_completion);
  1745. /*
  1746. * Initialize rcu_barrier_cpu_count to 1, then invoke
  1747. * rcu_barrier_func() on each CPU, so that each CPU also has
  1748. * incremented rcu_barrier_cpu_count. Only then is it safe to
  1749. * decrement rcu_barrier_cpu_count -- otherwise the first CPU
  1750. * might complete its grace period before all of the other CPUs
  1751. * did their increment, causing this function to return too
  1752. * early. Note that on_each_cpu() disables irqs, which prevents
  1753. * any CPUs from coming online or going offline until each online
  1754. * CPU has queued its RCU-barrier callback.
  1755. */
  1756. atomic_set(&rcu_barrier_cpu_count, 1);
  1757. on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
  1758. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  1759. complete(&rcu_barrier_completion);
  1760. wait_for_completion(&rcu_barrier_completion);
  1761. mutex_unlock(&rcu_barrier_mutex);
  1762. }
  1763. /**
  1764. * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  1765. */
  1766. void rcu_barrier_bh(void)
  1767. {
  1768. _rcu_barrier(&rcu_bh_state, call_rcu_bh);
  1769. }
  1770. EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  1771. /**
  1772. * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
  1773. */
  1774. void rcu_barrier_sched(void)
  1775. {
  1776. _rcu_barrier(&rcu_sched_state, call_rcu_sched);
  1777. }
  1778. EXPORT_SYMBOL_GPL(rcu_barrier_sched);
  1779. /*
  1780. * Do boot-time initialization of a CPU's per-CPU RCU data.
  1781. */
  1782. static void __init
  1783. rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  1784. {
  1785. unsigned long flags;
  1786. int i;
  1787. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1788. struct rcu_node *rnp = rcu_get_root(rsp);
  1789. /* Set up local state, ensuring consistent view of global state. */
  1790. raw_spin_lock_irqsave(&rnp->lock, flags);
  1791. rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
  1792. rdp->nxtlist = NULL;
  1793. for (i = 0; i < RCU_NEXT_SIZE; i++)
  1794. rdp->nxttail[i] = &rdp->nxtlist;
  1795. rdp->qlen = 0;
  1796. rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  1797. WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
  1798. WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
  1799. rdp->cpu = cpu;
  1800. rdp->rsp = rsp;
  1801. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1802. }
  1803. /*
  1804. * Initialize a CPU's per-CPU RCU data. Note that only one online or
  1805. * offline event can be happening at a given time. Note also that we
  1806. * can accept some slop in the rsp->completed access due to the fact
  1807. * that this CPU cannot possibly have any RCU callbacks in flight yet.
  1808. */
  1809. static void __cpuinit
  1810. rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
  1811. {
  1812. unsigned long flags;
  1813. unsigned long mask;
  1814. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1815. struct rcu_node *rnp = rcu_get_root(rsp);
  1816. /* Set up local state, ensuring consistent view of global state. */
  1817. raw_spin_lock_irqsave(&rnp->lock, flags);
  1818. rdp->beenonline = 1; /* We have now been online. */
  1819. rdp->preemptible = preemptible;
  1820. rdp->qlen_last_fqs_check = 0;
  1821. rdp->n_force_qs_snap = rsp->n_force_qs;
  1822. rdp->blimit = blimit;
  1823. WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
  1824. WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1);
  1825. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1826. /*
  1827. * A new grace period might start here. If so, we won't be part
  1828. * of it, but that is OK, as we are currently in a quiescent state.
  1829. */
  1830. /* Exclude any attempts to start a new GP on large systems. */
  1831. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  1832. /* Add CPU to rcu_node bitmasks. */
  1833. rnp = rdp->mynode;
  1834. mask = rdp->grpmask;
  1835. do {
  1836. /* Exclude any attempts to start a new GP on small systems. */
  1837. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1838. rnp->qsmaskinit |= mask;
  1839. mask = rnp->grpmask;
  1840. if (rnp == rdp->mynode) {
  1841. /*
  1842. * If there is a grace period in progress, we will
  1843. * set up to wait for it next time we run the
  1844. * RCU core code.
  1845. */
  1846. rdp->gpnum = rnp->completed;
  1847. rdp->completed = rnp->completed;
  1848. rdp->passed_quiesce = 0;
  1849. rdp->qs_pending = 0;
  1850. rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
  1851. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
  1852. }
  1853. raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
  1854. rnp = rnp->parent;
  1855. } while (rnp != NULL && !(rnp->qsmaskinit & mask));
  1856. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  1857. }
  1858. static void __cpuinit rcu_prepare_cpu(int cpu)
  1859. {
  1860. rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
  1861. rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
  1862. rcu_preempt_init_percpu_data(cpu);
  1863. }
  1864. /*
  1865. * Handle CPU online/offline notification events.
  1866. */
  1867. static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
  1868. unsigned long action, void *hcpu)
  1869. {
  1870. long cpu = (long)hcpu;
  1871. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
  1872. struct rcu_node *rnp = rdp->mynode;
  1873. trace_rcu_utilization("Start CPU hotplug");
  1874. switch (action) {
  1875. case CPU_UP_PREPARE:
  1876. case CPU_UP_PREPARE_FROZEN:
  1877. rcu_prepare_cpu(cpu);
  1878. rcu_prepare_kthreads(cpu);
  1879. break;
  1880. case CPU_ONLINE:
  1881. case CPU_DOWN_FAILED:
  1882. rcu_node_kthread_setaffinity(rnp, -1);
  1883. rcu_cpu_kthread_setrt(cpu, 1);
  1884. break;
  1885. case CPU_DOWN_PREPARE:
  1886. rcu_node_kthread_setaffinity(rnp, cpu);
  1887. rcu_cpu_kthread_setrt(cpu, 0);
  1888. break;
  1889. case CPU_DYING:
  1890. case CPU_DYING_FROZEN:
  1891. /*
  1892. * The whole machine is "stopped" except this CPU, so we can
  1893. * touch any data without introducing corruption. We send the
  1894. * dying CPU's callbacks to an arbitrarily chosen online CPU.
  1895. */
  1896. rcu_send_cbs_to_online(&rcu_bh_state);
  1897. rcu_send_cbs_to_online(&rcu_sched_state);
  1898. rcu_preempt_send_cbs_to_online();
  1899. break;
  1900. case CPU_DEAD:
  1901. case CPU_DEAD_FROZEN:
  1902. case CPU_UP_CANCELED:
  1903. case CPU_UP_CANCELED_FROZEN:
  1904. rcu_offline_cpu(cpu);
  1905. break;
  1906. default:
  1907. break;
  1908. }
  1909. trace_rcu_utilization("End CPU hotplug");
  1910. return NOTIFY_OK;
  1911. }
  1912. /*
  1913. * This function is invoked towards the end of the scheduler's initialization
  1914. * process. Before this is called, the idle task might contain
  1915. * RCU read-side critical sections (during which time, this idle
  1916. * task is booting the system). After this function is called, the
  1917. * idle tasks are prohibited from containing RCU read-side critical
  1918. * sections. This function also enables RCU lockdep checking.
  1919. */
  1920. void rcu_scheduler_starting(void)
  1921. {
  1922. WARN_ON(num_online_cpus() != 1);
  1923. WARN_ON(nr_context_switches() > 0);
  1924. rcu_scheduler_active = 1;
  1925. }
  1926. /*
  1927. * Compute the per-level fanout, either using the exact fanout specified
  1928. * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
  1929. */
  1930. #ifdef CONFIG_RCU_FANOUT_EXACT
  1931. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  1932. {
  1933. int i;
  1934. for (i = NUM_RCU_LVLS - 1; i > 0; i--)
  1935. rsp->levelspread[i] = CONFIG_RCU_FANOUT;
  1936. rsp->levelspread[0] = RCU_FANOUT_LEAF;
  1937. }
  1938. #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
  1939. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  1940. {
  1941. int ccur;
  1942. int cprv;
  1943. int i;
  1944. cprv = NR_CPUS;
  1945. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  1946. ccur = rsp->levelcnt[i];
  1947. rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
  1948. cprv = ccur;
  1949. }
  1950. }
  1951. #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
  1952. /*
  1953. * Helper function for rcu_init() that initializes one rcu_state structure.
  1954. */
  1955. static void __init rcu_init_one(struct rcu_state *rsp,
  1956. struct rcu_data __percpu *rda)
  1957. {
  1958. static char *buf[] = { "rcu_node_level_0",
  1959. "rcu_node_level_1",
  1960. "rcu_node_level_2",
  1961. "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
  1962. int cpustride = 1;
  1963. int i;
  1964. int j;
  1965. struct rcu_node *rnp;
  1966. BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
  1967. /* Initialize the level-tracking arrays. */
  1968. for (i = 1; i < NUM_RCU_LVLS; i++)
  1969. rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
  1970. rcu_init_levelspread(rsp);
  1971. /* Initialize the elements themselves, starting from the leaves. */
  1972. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  1973. cpustride *= rsp->levelspread[i];
  1974. rnp = rsp->level[i];
  1975. for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
  1976. raw_spin_lock_init(&rnp->lock);
  1977. lockdep_set_class_and_name(&rnp->lock,
  1978. &rcu_node_class[i], buf[i]);
  1979. rnp->gpnum = 0;
  1980. rnp->qsmask = 0;
  1981. rnp->qsmaskinit = 0;
  1982. rnp->grplo = j * cpustride;
  1983. rnp->grphi = (j + 1) * cpustride - 1;
  1984. if (rnp->grphi >= NR_CPUS)
  1985. rnp->grphi = NR_CPUS - 1;
  1986. if (i == 0) {
  1987. rnp->grpnum = 0;
  1988. rnp->grpmask = 0;
  1989. rnp->parent = NULL;
  1990. } else {
  1991. rnp->grpnum = j % rsp->levelspread[i - 1];
  1992. rnp->grpmask = 1UL << rnp->grpnum;
  1993. rnp->parent = rsp->level[i - 1] +
  1994. j / rsp->levelspread[i - 1];
  1995. }
  1996. rnp->level = i;
  1997. INIT_LIST_HEAD(&rnp->blkd_tasks);
  1998. }
  1999. }
  2000. rsp->rda = rda;
  2001. rnp = rsp->level[NUM_RCU_LVLS - 1];
  2002. for_each_possible_cpu(i) {
  2003. while (i > rnp->grphi)
  2004. rnp++;
  2005. per_cpu_ptr(rsp->rda, i)->mynode = rnp;
  2006. rcu_boot_init_percpu_data(i, rsp);
  2007. }
  2008. }
  2009. void __init rcu_init(void)
  2010. {
  2011. int cpu;
  2012. rcu_bootup_announce();
  2013. rcu_init_one(&rcu_sched_state, &rcu_sched_data);
  2014. rcu_init_one(&rcu_bh_state, &rcu_bh_data);
  2015. __rcu_init_preempt();
  2016. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  2017. /*
  2018. * We don't need protection against CPU-hotplug here because
  2019. * this is called early in boot, before either interrupts
  2020. * or the scheduler are operational.
  2021. */
  2022. cpu_notifier(rcu_cpu_notify, 0);
  2023. for_each_online_cpu(cpu)
  2024. rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
  2025. check_cpu_stall_init();
  2026. }
  2027. #include "rcutree_plugin.h"