rcutree.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23. *
  24. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26. *
  27. * For detailed explanation of Read-Copy Update mechanism see -
  28. * Documentation/RCU
  29. */
  30. #include <linux/types.h>
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/smp.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/sched.h>
  38. #include <linux/nmi.h>
  39. #include <linux/atomic.h>
  40. #include <linux/bitops.h>
  41. #include <linux/module.h>
  42. #include <linux/completion.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/percpu.h>
  45. #include <linux/notifier.h>
  46. #include <linux/cpu.h>
  47. #include <linux/mutex.h>
  48. #include <linux/time.h>
  49. #include <linux/kernel_stat.h>
  50. #include <linux/wait.h>
  51. #include <linux/kthread.h>
  52. #include <linux/prefetch.h>
  53. #include "rcutree.h"
  54. #include <trace/events/rcu.h>
  55. #include "rcu.h"
  56. /* Data structures. */
  57. static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
  58. #define RCU_STATE_INITIALIZER(structname) { \
  59. .level = { &structname##_state.node[0] }, \
  60. .levelcnt = { \
  61. NUM_RCU_LVL_0, /* root of hierarchy. */ \
  62. NUM_RCU_LVL_1, \
  63. NUM_RCU_LVL_2, \
  64. NUM_RCU_LVL_3, \
  65. NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
  66. }, \
  67. .signaled = RCU_GP_IDLE, \
  68. .gpnum = -300, \
  69. .completed = -300, \
  70. .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
  71. .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
  72. .n_force_qs = 0, \
  73. .n_force_qs_ngp = 0, \
  74. .name = #structname, \
  75. }
  76. struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
  77. DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
  78. struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
  79. DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
  80. static struct rcu_state *rcu_state;
  81. /*
  82. * The rcu_scheduler_active variable transitions from zero to one just
  83. * before the first task is spawned. So when this variable is zero, RCU
  84. * can assume that there is but one task, allowing RCU to (for example)
  85. * optimized synchronize_sched() to a simple barrier(). When this variable
  86. * is one, RCU must actually do all the hard work required to detect real
  87. * grace periods. This variable is also used to suppress boot-time false
  88. * positives from lockdep-RCU error checking.
  89. */
  90. int rcu_scheduler_active __read_mostly;
  91. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  92. /*
  93. * The rcu_scheduler_fully_active variable transitions from zero to one
  94. * during the early_initcall() processing, which is after the scheduler
  95. * is capable of creating new tasks. So RCU processing (for example,
  96. * creating tasks for RCU priority boosting) must be delayed until after
  97. * rcu_scheduler_fully_active transitions from zero to one. We also
  98. * currently delay invocation of any RCU callbacks until after this point.
  99. *
  100. * It might later prove better for people registering RCU callbacks during
  101. * early boot to take responsibility for these callbacks, but one step at
  102. * a time.
  103. */
  104. static int rcu_scheduler_fully_active __read_mostly;
  105. #ifdef CONFIG_RCU_BOOST
  106. /*
  107. * Control variables for per-CPU and per-rcu_node kthreads. These
  108. * handle all flavors of RCU.
  109. */
  110. static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
  111. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  112. DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
  113. DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  114. DEFINE_PER_CPU(char, rcu_cpu_has_work);
  115. #endif /* #ifdef CONFIG_RCU_BOOST */
  116. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
  117. static void invoke_rcu_core(void);
  118. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
  119. #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
  120. /*
  121. * Track the rcutorture test sequence number and the update version
  122. * number within a given test. The rcutorture_testseq is incremented
  123. * on every rcutorture module load and unload, so has an odd value
  124. * when a test is running. The rcutorture_vernum is set to zero
  125. * when rcutorture starts and is incremented on each rcutorture update.
  126. * These variables enable correlating rcutorture output with the
  127. * RCU tracing information.
  128. */
  129. unsigned long rcutorture_testseq;
  130. unsigned long rcutorture_vernum;
  131. /*
  132. * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
  133. * permit this function to be invoked without holding the root rcu_node
  134. * structure's ->lock, but of course results can be subject to change.
  135. */
  136. static int rcu_gp_in_progress(struct rcu_state *rsp)
  137. {
  138. return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
  139. }
  140. /*
  141. * Note a quiescent state. Because we do not need to know
  142. * how many quiescent states passed, just if there was at least
  143. * one since the start of the grace period, this just sets a flag.
  144. * The caller must have disabled preemption.
  145. */
  146. void rcu_sched_qs(int cpu)
  147. {
  148. struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
  149. rdp->passed_quiesce_gpnum = rdp->gpnum;
  150. barrier();
  151. if (rdp->passed_quiesce == 0)
  152. trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
  153. rdp->passed_quiesce = 1;
  154. }
  155. void rcu_bh_qs(int cpu)
  156. {
  157. struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
  158. rdp->passed_quiesce_gpnum = rdp->gpnum;
  159. barrier();
  160. if (rdp->passed_quiesce == 0)
  161. trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
  162. rdp->passed_quiesce = 1;
  163. }
  164. /*
  165. * Note a context switch. This is a quiescent state for RCU-sched,
  166. * and requires special handling for preemptible RCU.
  167. * The caller must have disabled preemption.
  168. */
  169. void rcu_note_context_switch(int cpu)
  170. {
  171. trace_rcu_utilization("Start context switch");
  172. rcu_sched_qs(cpu);
  173. rcu_preempt_note_context_switch(cpu);
  174. trace_rcu_utilization("End context switch");
  175. }
  176. EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  177. #ifdef CONFIG_NO_HZ
  178. DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  179. .dynticks_nesting = 1,
  180. .dynticks = ATOMIC_INIT(1),
  181. };
  182. #endif /* #ifdef CONFIG_NO_HZ */
  183. static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
  184. static int qhimark = 10000; /* If this many pending, ignore blimit. */
  185. static int qlowmark = 100; /* Once only this many pending, use blimit. */
  186. module_param(blimit, int, 0);
  187. module_param(qhimark, int, 0);
  188. module_param(qlowmark, int, 0);
  189. int rcu_cpu_stall_suppress __read_mostly;
  190. module_param(rcu_cpu_stall_suppress, int, 0644);
  191. static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
  192. static int rcu_pending(int cpu);
  193. /*
  194. * Return the number of RCU-sched batches processed thus far for debug & stats.
  195. */
  196. long rcu_batches_completed_sched(void)
  197. {
  198. return rcu_sched_state.completed;
  199. }
  200. EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  201. /*
  202. * Return the number of RCU BH batches processed thus far for debug & stats.
  203. */
  204. long rcu_batches_completed_bh(void)
  205. {
  206. return rcu_bh_state.completed;
  207. }
  208. EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  209. /*
  210. * Force a quiescent state for RCU BH.
  211. */
  212. void rcu_bh_force_quiescent_state(void)
  213. {
  214. force_quiescent_state(&rcu_bh_state, 0);
  215. }
  216. EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  217. /*
  218. * Record the number of times rcutorture tests have been initiated and
  219. * terminated. This information allows the debugfs tracing stats to be
  220. * correlated to the rcutorture messages, even when the rcutorture module
  221. * is being repeatedly loaded and unloaded. In other words, we cannot
  222. * store this state in rcutorture itself.
  223. */
  224. void rcutorture_record_test_transition(void)
  225. {
  226. rcutorture_testseq++;
  227. rcutorture_vernum = 0;
  228. }
  229. EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  230. /*
  231. * Record the number of writer passes through the current rcutorture test.
  232. * This is also used to correlate debugfs tracing stats with the rcutorture
  233. * messages.
  234. */
  235. void rcutorture_record_progress(unsigned long vernum)
  236. {
  237. rcutorture_vernum++;
  238. }
  239. EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  240. /*
  241. * Force a quiescent state for RCU-sched.
  242. */
  243. void rcu_sched_force_quiescent_state(void)
  244. {
  245. force_quiescent_state(&rcu_sched_state, 0);
  246. }
  247. EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  248. /*
  249. * Does the CPU have callbacks ready to be invoked?
  250. */
  251. static int
  252. cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  253. {
  254. return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
  255. }
  256. /*
  257. * Does the current CPU require a yet-as-unscheduled grace period?
  258. */
  259. static int
  260. cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  261. {
  262. return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
  263. }
  264. /*
  265. * Return the root node of the specified rcu_state structure.
  266. */
  267. static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  268. {
  269. return &rsp->node[0];
  270. }
  271. #ifdef CONFIG_SMP
  272. /*
  273. * If the specified CPU is offline, tell the caller that it is in
  274. * a quiescent state. Otherwise, whack it with a reschedule IPI.
  275. * Grace periods can end up waiting on an offline CPU when that
  276. * CPU is in the process of coming online -- it will be added to the
  277. * rcu_node bitmasks before it actually makes it online. The same thing
  278. * can happen while a CPU is in the process of coming online. Because this
  279. * race is quite rare, we check for it after detecting that the grace
  280. * period has been delayed rather than checking each and every CPU
  281. * each and every time we start a new grace period.
  282. */
  283. static int rcu_implicit_offline_qs(struct rcu_data *rdp)
  284. {
  285. /*
  286. * If the CPU is offline, it is in a quiescent state. We can
  287. * trust its state not to change because interrupts are disabled.
  288. */
  289. if (cpu_is_offline(rdp->cpu)) {
  290. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
  291. rdp->offline_fqs++;
  292. return 1;
  293. }
  294. /* If preemptible RCU, no point in sending reschedule IPI. */
  295. if (rdp->preemptible)
  296. return 0;
  297. /* The CPU is online, so send it a reschedule IPI. */
  298. if (rdp->cpu != smp_processor_id())
  299. smp_send_reschedule(rdp->cpu);
  300. else
  301. set_need_resched();
  302. rdp->resched_ipi++;
  303. return 0;
  304. }
  305. #endif /* #ifdef CONFIG_SMP */
  306. #ifdef CONFIG_NO_HZ
  307. /**
  308. * rcu_enter_nohz - inform RCU that current CPU is entering nohz
  309. *
  310. * Enter nohz mode, in other words, -leave- the mode in which RCU
  311. * read-side critical sections can occur. (Though RCU read-side
  312. * critical sections can occur in irq handlers in nohz mode, a possibility
  313. * handled by rcu_irq_enter() and rcu_irq_exit()).
  314. */
  315. void rcu_enter_nohz(void)
  316. {
  317. unsigned long flags;
  318. struct rcu_dynticks *rdtp;
  319. local_irq_save(flags);
  320. rdtp = &__get_cpu_var(rcu_dynticks);
  321. if (--rdtp->dynticks_nesting) {
  322. local_irq_restore(flags);
  323. return;
  324. }
  325. trace_rcu_dyntick("Start");
  326. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  327. smp_mb__before_atomic_inc(); /* See above. */
  328. atomic_inc(&rdtp->dynticks);
  329. smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
  330. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  331. local_irq_restore(flags);
  332. }
  333. /*
  334. * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
  335. *
  336. * Exit nohz mode, in other words, -enter- the mode in which RCU
  337. * read-side critical sections normally occur.
  338. */
  339. void rcu_exit_nohz(void)
  340. {
  341. unsigned long flags;
  342. struct rcu_dynticks *rdtp;
  343. local_irq_save(flags);
  344. rdtp = &__get_cpu_var(rcu_dynticks);
  345. if (rdtp->dynticks_nesting++) {
  346. local_irq_restore(flags);
  347. return;
  348. }
  349. smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
  350. atomic_inc(&rdtp->dynticks);
  351. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  352. smp_mb__after_atomic_inc(); /* See above. */
  353. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  354. trace_rcu_dyntick("End");
  355. local_irq_restore(flags);
  356. }
  357. /**
  358. * rcu_nmi_enter - inform RCU of entry to NMI context
  359. *
  360. * If the CPU was idle with dynamic ticks active, and there is no
  361. * irq handler running, this updates rdtp->dynticks_nmi to let the
  362. * RCU grace-period handling know that the CPU is active.
  363. */
  364. void rcu_nmi_enter(void)
  365. {
  366. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  367. if (rdtp->dynticks_nmi_nesting == 0 &&
  368. (atomic_read(&rdtp->dynticks) & 0x1))
  369. return;
  370. rdtp->dynticks_nmi_nesting++;
  371. smp_mb__before_atomic_inc(); /* Force delay from prior write. */
  372. atomic_inc(&rdtp->dynticks);
  373. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  374. smp_mb__after_atomic_inc(); /* See above. */
  375. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  376. }
  377. /**
  378. * rcu_nmi_exit - inform RCU of exit from NMI context
  379. *
  380. * If the CPU was idle with dynamic ticks active, and there is no
  381. * irq handler running, this updates rdtp->dynticks_nmi to let the
  382. * RCU grace-period handling know that the CPU is no longer active.
  383. */
  384. void rcu_nmi_exit(void)
  385. {
  386. struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  387. if (rdtp->dynticks_nmi_nesting == 0 ||
  388. --rdtp->dynticks_nmi_nesting != 0)
  389. return;
  390. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  391. smp_mb__before_atomic_inc(); /* See above. */
  392. atomic_inc(&rdtp->dynticks);
  393. smp_mb__after_atomic_inc(); /* Force delay to next write. */
  394. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  395. }
  396. /**
  397. * rcu_irq_enter - inform RCU of entry to hard irq context
  398. *
  399. * If the CPU was idle with dynamic ticks active, this updates the
  400. * rdtp->dynticks to let the RCU handling know that the CPU is active.
  401. */
  402. void rcu_irq_enter(void)
  403. {
  404. rcu_exit_nohz();
  405. }
  406. /**
  407. * rcu_irq_exit - inform RCU of exit from hard irq context
  408. *
  409. * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
  410. * to put let the RCU handling be aware that the CPU is going back to idle
  411. * with no ticks.
  412. */
  413. void rcu_irq_exit(void)
  414. {
  415. rcu_enter_nohz();
  416. }
  417. #ifdef CONFIG_SMP
  418. /*
  419. * Snapshot the specified CPU's dynticks counter so that we can later
  420. * credit them with an implicit quiescent state. Return 1 if this CPU
  421. * is in dynticks idle mode, which is an extended quiescent state.
  422. */
  423. static int dyntick_save_progress_counter(struct rcu_data *rdp)
  424. {
  425. rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
  426. return 0;
  427. }
  428. /*
  429. * Return true if the specified CPU has passed through a quiescent
  430. * state by virtue of being in or having passed through an dynticks
  431. * idle state since the last call to dyntick_save_progress_counter()
  432. * for this same CPU.
  433. */
  434. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  435. {
  436. unsigned int curr;
  437. unsigned int snap;
  438. curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
  439. snap = (unsigned int)rdp->dynticks_snap;
  440. /*
  441. * If the CPU passed through or entered a dynticks idle phase with
  442. * no active irq/NMI handlers, then we can safely pretend that the CPU
  443. * already acknowledged the request to pass through a quiescent
  444. * state. Either way, that CPU cannot possibly be in an RCU
  445. * read-side critical section that started before the beginning
  446. * of the current RCU grace period.
  447. */
  448. if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
  449. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
  450. rdp->dynticks_fqs++;
  451. return 1;
  452. }
  453. /* Go check for the CPU being offline. */
  454. return rcu_implicit_offline_qs(rdp);
  455. }
  456. #endif /* #ifdef CONFIG_SMP */
  457. #else /* #ifdef CONFIG_NO_HZ */
  458. #ifdef CONFIG_SMP
  459. static int dyntick_save_progress_counter(struct rcu_data *rdp)
  460. {
  461. return 0;
  462. }
  463. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  464. {
  465. return rcu_implicit_offline_qs(rdp);
  466. }
  467. #endif /* #ifdef CONFIG_SMP */
  468. #endif /* #else #ifdef CONFIG_NO_HZ */
  469. int rcu_cpu_stall_suppress __read_mostly;
  470. static void record_gp_stall_check_time(struct rcu_state *rsp)
  471. {
  472. rsp->gp_start = jiffies;
  473. rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
  474. }
  475. static void print_other_cpu_stall(struct rcu_state *rsp)
  476. {
  477. int cpu;
  478. long delta;
  479. unsigned long flags;
  480. struct rcu_node *rnp = rcu_get_root(rsp);
  481. /* Only let one CPU complain about others per time interval. */
  482. raw_spin_lock_irqsave(&rnp->lock, flags);
  483. delta = jiffies - rsp->jiffies_stall;
  484. if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
  485. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  486. return;
  487. }
  488. rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
  489. /*
  490. * Now rat on any tasks that got kicked up to the root rcu_node
  491. * due to CPU offlining.
  492. */
  493. rcu_print_task_stall(rnp);
  494. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  495. /*
  496. * OK, time to rat on our buddy...
  497. * See Documentation/RCU/stallwarn.txt for info on how to debug
  498. * RCU CPU stall warnings.
  499. */
  500. printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {",
  501. rsp->name);
  502. rcu_for_each_leaf_node(rsp, rnp) {
  503. raw_spin_lock_irqsave(&rnp->lock, flags);
  504. rcu_print_task_stall(rnp);
  505. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  506. if (rnp->qsmask == 0)
  507. continue;
  508. for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  509. if (rnp->qsmask & (1UL << cpu))
  510. printk(" %d", rnp->grplo + cpu);
  511. }
  512. printk("} (detected by %d, t=%ld jiffies)\n",
  513. smp_processor_id(), (long)(jiffies - rsp->gp_start));
  514. if (!trigger_all_cpu_backtrace())
  515. dump_stack();
  516. /* If so configured, complain about tasks blocking the grace period. */
  517. rcu_print_detail_task_stall(rsp);
  518. force_quiescent_state(rsp, 0); /* Kick them all. */
  519. }
  520. static void print_cpu_stall(struct rcu_state *rsp)
  521. {
  522. unsigned long flags;
  523. struct rcu_node *rnp = rcu_get_root(rsp);
  524. /*
  525. * OK, time to rat on ourselves...
  526. * See Documentation/RCU/stallwarn.txt for info on how to debug
  527. * RCU CPU stall warnings.
  528. */
  529. printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
  530. rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
  531. if (!trigger_all_cpu_backtrace())
  532. dump_stack();
  533. raw_spin_lock_irqsave(&rnp->lock, flags);
  534. if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
  535. rsp->jiffies_stall =
  536. jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
  537. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  538. set_need_resched(); /* kick ourselves to get things going. */
  539. }
  540. static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  541. {
  542. unsigned long j;
  543. unsigned long js;
  544. struct rcu_node *rnp;
  545. if (rcu_cpu_stall_suppress)
  546. return;
  547. j = ACCESS_ONCE(jiffies);
  548. js = ACCESS_ONCE(rsp->jiffies_stall);
  549. rnp = rdp->mynode;
  550. if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
  551. /* We haven't checked in, so go dump stack. */
  552. print_cpu_stall(rsp);
  553. } else if (rcu_gp_in_progress(rsp) &&
  554. ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
  555. /* They had a few time units to dump stack, so complain. */
  556. print_other_cpu_stall(rsp);
  557. }
  558. }
  559. static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  560. {
  561. rcu_cpu_stall_suppress = 1;
  562. return NOTIFY_DONE;
  563. }
  564. /**
  565. * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  566. *
  567. * Set the stall-warning timeout way off into the future, thus preventing
  568. * any RCU CPU stall-warning messages from appearing in the current set of
  569. * RCU grace periods.
  570. *
  571. * The caller must disable hard irqs.
  572. */
  573. void rcu_cpu_stall_reset(void)
  574. {
  575. rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  576. rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  577. rcu_preempt_stall_reset();
  578. }
  579. static struct notifier_block rcu_panic_block = {
  580. .notifier_call = rcu_panic,
  581. };
  582. static void __init check_cpu_stall_init(void)
  583. {
  584. atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  585. }
  586. /*
  587. * Update CPU-local rcu_data state to record the newly noticed grace period.
  588. * This is used both when we started the grace period and when we notice
  589. * that someone else started the grace period. The caller must hold the
  590. * ->lock of the leaf rcu_node structure corresponding to the current CPU,
  591. * and must have irqs disabled.
  592. */
  593. static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  594. {
  595. if (rdp->gpnum != rnp->gpnum) {
  596. /*
  597. * If the current grace period is waiting for this CPU,
  598. * set up to detect a quiescent state, otherwise don't
  599. * go looking for one.
  600. */
  601. rdp->gpnum = rnp->gpnum;
  602. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
  603. if (rnp->qsmask & rdp->grpmask) {
  604. rdp->qs_pending = 1;
  605. rdp->passed_quiesce = 0;
  606. } else
  607. rdp->qs_pending = 0;
  608. }
  609. }
  610. static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
  611. {
  612. unsigned long flags;
  613. struct rcu_node *rnp;
  614. local_irq_save(flags);
  615. rnp = rdp->mynode;
  616. if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
  617. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  618. local_irq_restore(flags);
  619. return;
  620. }
  621. __note_new_gpnum(rsp, rnp, rdp);
  622. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  623. }
  624. /*
  625. * Did someone else start a new RCU grace period start since we last
  626. * checked? Update local state appropriately if so. Must be called
  627. * on the CPU corresponding to rdp.
  628. */
  629. static int
  630. check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
  631. {
  632. unsigned long flags;
  633. int ret = 0;
  634. local_irq_save(flags);
  635. if (rdp->gpnum != rsp->gpnum) {
  636. note_new_gpnum(rsp, rdp);
  637. ret = 1;
  638. }
  639. local_irq_restore(flags);
  640. return ret;
  641. }
  642. /*
  643. * Advance this CPU's callbacks, but only if the current grace period
  644. * has ended. This may be called only from the CPU to whom the rdp
  645. * belongs. In addition, the corresponding leaf rcu_node structure's
  646. * ->lock must be held by the caller, with irqs disabled.
  647. */
  648. static void
  649. __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  650. {
  651. /* Did another grace period end? */
  652. if (rdp->completed != rnp->completed) {
  653. /* Advance callbacks. No harm if list empty. */
  654. rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
  655. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
  656. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  657. /* Remember that we saw this grace-period completion. */
  658. rdp->completed = rnp->completed;
  659. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
  660. /*
  661. * If we were in an extended quiescent state, we may have
  662. * missed some grace periods that others CPUs handled on
  663. * our behalf. Catch up with this state to avoid noting
  664. * spurious new grace periods. If another grace period
  665. * has started, then rnp->gpnum will have advanced, so
  666. * we will detect this later on.
  667. */
  668. if (ULONG_CMP_LT(rdp->gpnum, rdp->completed))
  669. rdp->gpnum = rdp->completed;
  670. /*
  671. * If RCU does not need a quiescent state from this CPU,
  672. * then make sure that this CPU doesn't go looking for one.
  673. */
  674. if ((rnp->qsmask & rdp->grpmask) == 0)
  675. rdp->qs_pending = 0;
  676. }
  677. }
  678. /*
  679. * Advance this CPU's callbacks, but only if the current grace period
  680. * has ended. This may be called only from the CPU to whom the rdp
  681. * belongs.
  682. */
  683. static void
  684. rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
  685. {
  686. unsigned long flags;
  687. struct rcu_node *rnp;
  688. local_irq_save(flags);
  689. rnp = rdp->mynode;
  690. if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
  691. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  692. local_irq_restore(flags);
  693. return;
  694. }
  695. __rcu_process_gp_end(rsp, rnp, rdp);
  696. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  697. }
  698. /*
  699. * Do per-CPU grace-period initialization for running CPU. The caller
  700. * must hold the lock of the leaf rcu_node structure corresponding to
  701. * this CPU.
  702. */
  703. static void
  704. rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  705. {
  706. /* Prior grace period ended, so advance callbacks for current CPU. */
  707. __rcu_process_gp_end(rsp, rnp, rdp);
  708. /*
  709. * Because this CPU just now started the new grace period, we know
  710. * that all of its callbacks will be covered by this upcoming grace
  711. * period, even the ones that were registered arbitrarily recently.
  712. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
  713. *
  714. * Other CPUs cannot be sure exactly when the grace period started.
  715. * Therefore, their recently registered callbacks must pass through
  716. * an additional RCU_NEXT_READY stage, so that they will be handled
  717. * by the next RCU grace period.
  718. */
  719. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  720. rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  721. /* Set state so that this CPU will detect the next quiescent state. */
  722. __note_new_gpnum(rsp, rnp, rdp);
  723. }
  724. /*
  725. * Start a new RCU grace period if warranted, re-initializing the hierarchy
  726. * in preparation for detecting the next grace period. The caller must hold
  727. * the root node's ->lock, which is released before return. Hard irqs must
  728. * be disabled.
  729. */
  730. static void
  731. rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
  732. __releases(rcu_get_root(rsp)->lock)
  733. {
  734. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  735. struct rcu_node *rnp = rcu_get_root(rsp);
  736. if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
  737. if (cpu_needs_another_gp(rsp, rdp))
  738. rsp->fqs_need_gp = 1;
  739. if (rnp->completed == rsp->completed) {
  740. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  741. return;
  742. }
  743. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  744. /*
  745. * Propagate new ->completed value to rcu_node structures
  746. * so that other CPUs don't have to wait until the start
  747. * of the next grace period to process their callbacks.
  748. */
  749. rcu_for_each_node_breadth_first(rsp, rnp) {
  750. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  751. rnp->completed = rsp->completed;
  752. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  753. }
  754. local_irq_restore(flags);
  755. return;
  756. }
  757. /* Advance to a new grace period and initialize state. */
  758. rsp->gpnum++;
  759. trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
  760. WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
  761. rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
  762. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  763. record_gp_stall_check_time(rsp);
  764. /* Special-case the common single-level case. */
  765. if (NUM_RCU_NODES == 1) {
  766. rcu_preempt_check_blocked_tasks(rnp);
  767. rnp->qsmask = rnp->qsmaskinit;
  768. rnp->gpnum = rsp->gpnum;
  769. rnp->completed = rsp->completed;
  770. rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
  771. rcu_start_gp_per_cpu(rsp, rnp, rdp);
  772. rcu_preempt_boost_start_gp(rnp);
  773. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  774. rnp->level, rnp->grplo,
  775. rnp->grphi, rnp->qsmask);
  776. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  777. return;
  778. }
  779. raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */
  780. /* Exclude any concurrent CPU-hotplug operations. */
  781. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  782. /*
  783. * Set the quiescent-state-needed bits in all the rcu_node
  784. * structures for all currently online CPUs in breadth-first
  785. * order, starting from the root rcu_node structure. This
  786. * operation relies on the layout of the hierarchy within the
  787. * rsp->node[] array. Note that other CPUs will access only
  788. * the leaves of the hierarchy, which still indicate that no
  789. * grace period is in progress, at least until the corresponding
  790. * leaf node has been initialized. In addition, we have excluded
  791. * CPU-hotplug operations.
  792. *
  793. * Note that the grace period cannot complete until we finish
  794. * the initialization process, as there will be at least one
  795. * qsmask bit set in the root node until that time, namely the
  796. * one corresponding to this CPU, due to the fact that we have
  797. * irqs disabled.
  798. */
  799. rcu_for_each_node_breadth_first(rsp, rnp) {
  800. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  801. rcu_preempt_check_blocked_tasks(rnp);
  802. rnp->qsmask = rnp->qsmaskinit;
  803. rnp->gpnum = rsp->gpnum;
  804. rnp->completed = rsp->completed;
  805. if (rnp == rdp->mynode)
  806. rcu_start_gp_per_cpu(rsp, rnp, rdp);
  807. rcu_preempt_boost_start_gp(rnp);
  808. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  809. rnp->level, rnp->grplo,
  810. rnp->grphi, rnp->qsmask);
  811. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  812. }
  813. rnp = rcu_get_root(rsp);
  814. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  815. rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
  816. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  817. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  818. }
  819. /*
  820. * Report a full set of quiescent states to the specified rcu_state
  821. * data structure. This involves cleaning up after the prior grace
  822. * period and letting rcu_start_gp() start up the next grace period
  823. * if one is needed. Note that the caller must hold rnp->lock, as
  824. * required by rcu_start_gp(), which will release it.
  825. */
  826. static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  827. __releases(rcu_get_root(rsp)->lock)
  828. {
  829. unsigned long gp_duration;
  830. WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
  831. /*
  832. * Ensure that all grace-period and pre-grace-period activity
  833. * is seen before the assignment to rsp->completed.
  834. */
  835. smp_mb(); /* See above block comment. */
  836. gp_duration = jiffies - rsp->gp_start;
  837. if (gp_duration > rsp->gp_max)
  838. rsp->gp_max = gp_duration;
  839. rsp->completed = rsp->gpnum;
  840. trace_rcu_grace_period(rsp->name, rsp->completed, "end");
  841. rsp->signaled = RCU_GP_IDLE;
  842. rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
  843. }
  844. /*
  845. * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  846. * Allows quiescent states for a group of CPUs to be reported at one go
  847. * to the specified rcu_node structure, though all the CPUs in the group
  848. * must be represented by the same rcu_node structure (which need not be
  849. * a leaf rcu_node structure, though it often will be). That structure's
  850. * lock must be held upon entry, and it is released before return.
  851. */
  852. static void
  853. rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  854. struct rcu_node *rnp, unsigned long flags)
  855. __releases(rnp->lock)
  856. {
  857. struct rcu_node *rnp_c;
  858. /* Walk up the rcu_node hierarchy. */
  859. for (;;) {
  860. if (!(rnp->qsmask & mask)) {
  861. /* Our bit has already been cleared, so done. */
  862. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  863. return;
  864. }
  865. rnp->qsmask &= ~mask;
  866. trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  867. mask, rnp->qsmask, rnp->level,
  868. rnp->grplo, rnp->grphi,
  869. !!rnp->gp_tasks);
  870. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  871. /* Other bits still set at this level, so done. */
  872. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  873. return;
  874. }
  875. mask = rnp->grpmask;
  876. if (rnp->parent == NULL) {
  877. /* No more levels. Exit loop holding root lock. */
  878. break;
  879. }
  880. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  881. rnp_c = rnp;
  882. rnp = rnp->parent;
  883. raw_spin_lock_irqsave(&rnp->lock, flags);
  884. WARN_ON_ONCE(rnp_c->qsmask);
  885. }
  886. /*
  887. * Get here if we are the last CPU to pass through a quiescent
  888. * state for this grace period. Invoke rcu_report_qs_rsp()
  889. * to clean up and start the next grace period if one is needed.
  890. */
  891. rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
  892. }
  893. /*
  894. * Record a quiescent state for the specified CPU to that CPU's rcu_data
  895. * structure. This must be either called from the specified CPU, or
  896. * called when the specified CPU is known to be offline (and when it is
  897. * also known that no other CPU is concurrently trying to help the offline
  898. * CPU). The lastcomp argument is used to make sure we are still in the
  899. * grace period of interest. We don't want to end the current grace period
  900. * based on quiescent states detected in an earlier grace period!
  901. */
  902. static void
  903. rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
  904. {
  905. unsigned long flags;
  906. unsigned long mask;
  907. struct rcu_node *rnp;
  908. rnp = rdp->mynode;
  909. raw_spin_lock_irqsave(&rnp->lock, flags);
  910. if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
  911. /*
  912. * The grace period in which this quiescent state was
  913. * recorded has ended, so don't report it upwards.
  914. * We will instead need a new quiescent state that lies
  915. * within the current grace period.
  916. */
  917. rdp->passed_quiesce = 0; /* need qs for new gp. */
  918. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  919. return;
  920. }
  921. mask = rdp->grpmask;
  922. if ((rnp->qsmask & mask) == 0) {
  923. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  924. } else {
  925. rdp->qs_pending = 0;
  926. /*
  927. * This GP can't end until cpu checks in, so all of our
  928. * callbacks can be processed during the next GP.
  929. */
  930. rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  931. rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
  932. }
  933. }
  934. /*
  935. * Check to see if there is a new grace period of which this CPU
  936. * is not yet aware, and if so, set up local rcu_data state for it.
  937. * Otherwise, see if this CPU has just passed through its first
  938. * quiescent state for this grace period, and record that fact if so.
  939. */
  940. static void
  941. rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  942. {
  943. /* If there is now a new grace period, record and return. */
  944. if (check_for_new_grace_period(rsp, rdp))
  945. return;
  946. /*
  947. * Does this CPU still need to do its part for current grace period?
  948. * If no, return and let the other CPUs do their part as well.
  949. */
  950. if (!rdp->qs_pending)
  951. return;
  952. /*
  953. * Was there a quiescent state since the beginning of the grace
  954. * period? If no, then exit and wait for the next call.
  955. */
  956. if (!rdp->passed_quiesce)
  957. return;
  958. /*
  959. * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  960. * judge of that).
  961. */
  962. rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
  963. }
  964. #ifdef CONFIG_HOTPLUG_CPU
  965. /*
  966. * Move a dying CPU's RCU callbacks to online CPU's callback list.
  967. * Synchronization is not required because this function executes
  968. * in stop_machine() context.
  969. */
  970. static void rcu_send_cbs_to_online(struct rcu_state *rsp)
  971. {
  972. int i;
  973. /* current DYING CPU is cleared in the cpu_online_mask */
  974. int receive_cpu = cpumask_any(cpu_online_mask);
  975. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  976. struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu);
  977. if (rdp->nxtlist == NULL)
  978. return; /* irqs disabled, so comparison is stable. */
  979. *receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
  980. receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  981. receive_rdp->qlen += rdp->qlen;
  982. receive_rdp->n_cbs_adopted += rdp->qlen;
  983. rdp->n_cbs_orphaned += rdp->qlen;
  984. rdp->nxtlist = NULL;
  985. for (i = 0; i < RCU_NEXT_SIZE; i++)
  986. rdp->nxttail[i] = &rdp->nxtlist;
  987. rdp->qlen = 0;
  988. }
  989. /*
  990. * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
  991. * and move all callbacks from the outgoing CPU to the current one.
  992. * There can only be one CPU hotplug operation at a time, so no other
  993. * CPU can be attempting to update rcu_cpu_kthread_task.
  994. */
  995. static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
  996. {
  997. unsigned long flags;
  998. unsigned long mask;
  999. int need_report = 0;
  1000. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1001. struct rcu_node *rnp;
  1002. rcu_stop_cpu_kthread(cpu);
  1003. /* Exclude any attempts to start a new grace period. */
  1004. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  1005. /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
  1006. rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
  1007. mask = rdp->grpmask; /* rnp->grplo is constant. */
  1008. do {
  1009. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1010. rnp->qsmaskinit &= ~mask;
  1011. if (rnp->qsmaskinit != 0) {
  1012. if (rnp != rdp->mynode)
  1013. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1014. else
  1015. trace_rcu_grace_period(rsp->name,
  1016. rnp->gpnum + 1 -
  1017. !!(rnp->qsmask & mask),
  1018. "cpuofl");
  1019. break;
  1020. }
  1021. if (rnp == rdp->mynode) {
  1022. trace_rcu_grace_period(rsp->name,
  1023. rnp->gpnum + 1 -
  1024. !!(rnp->qsmask & mask),
  1025. "cpuofl");
  1026. need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
  1027. } else
  1028. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1029. mask = rnp->grpmask;
  1030. rnp = rnp->parent;
  1031. } while (rnp != NULL);
  1032. /*
  1033. * We still hold the leaf rcu_node structure lock here, and
  1034. * irqs are still disabled. The reason for this subterfuge is
  1035. * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
  1036. * held leads to deadlock.
  1037. */
  1038. raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
  1039. rnp = rdp->mynode;
  1040. if (need_report & RCU_OFL_TASKS_NORM_GP)
  1041. rcu_report_unblock_qs_rnp(rnp, flags);
  1042. else
  1043. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1044. if (need_report & RCU_OFL_TASKS_EXP_GP)
  1045. rcu_report_exp_rnp(rsp, rnp);
  1046. rcu_node_kthread_setaffinity(rnp, -1);
  1047. }
  1048. /*
  1049. * Remove the specified CPU from the RCU hierarchy and move any pending
  1050. * callbacks that it might have to the current CPU. This code assumes
  1051. * that at least one CPU in the system will remain running at all times.
  1052. * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
  1053. */
  1054. static void rcu_offline_cpu(int cpu)
  1055. {
  1056. __rcu_offline_cpu(cpu, &rcu_sched_state);
  1057. __rcu_offline_cpu(cpu, &rcu_bh_state);
  1058. rcu_preempt_offline_cpu(cpu);
  1059. }
  1060. #else /* #ifdef CONFIG_HOTPLUG_CPU */
  1061. static void rcu_send_cbs_to_online(struct rcu_state *rsp)
  1062. {
  1063. }
  1064. static void rcu_offline_cpu(int cpu)
  1065. {
  1066. }
  1067. #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  1068. /*
  1069. * Invoke any RCU callbacks that have made it to the end of their grace
  1070. * period. Thottle as specified by rdp->blimit.
  1071. */
  1072. static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  1073. {
  1074. unsigned long flags;
  1075. struct rcu_head *next, *list, **tail;
  1076. int bl, count;
  1077. /* If no callbacks are ready, just return.*/
  1078. if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
  1079. trace_rcu_batch_start(rsp->name, 0, 0);
  1080. trace_rcu_batch_end(rsp->name, 0);
  1081. return;
  1082. }
  1083. /*
  1084. * Extract the list of ready callbacks, disabling to prevent
  1085. * races with call_rcu() from interrupt handlers.
  1086. */
  1087. local_irq_save(flags);
  1088. bl = rdp->blimit;
  1089. trace_rcu_batch_start(rsp->name, rdp->qlen, bl);
  1090. list = rdp->nxtlist;
  1091. rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  1092. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  1093. tail = rdp->nxttail[RCU_DONE_TAIL];
  1094. for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
  1095. if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
  1096. rdp->nxttail[count] = &rdp->nxtlist;
  1097. local_irq_restore(flags);
  1098. /* Invoke callbacks. */
  1099. count = 0;
  1100. while (list) {
  1101. next = list->next;
  1102. prefetch(next);
  1103. debug_rcu_head_unqueue(list);
  1104. __rcu_reclaim(rsp->name, list);
  1105. list = next;
  1106. if (++count >= bl)
  1107. break;
  1108. }
  1109. local_irq_save(flags);
  1110. trace_rcu_batch_end(rsp->name, count);
  1111. /* Update count, and requeue any remaining callbacks. */
  1112. rdp->qlen -= count;
  1113. rdp->n_cbs_invoked += count;
  1114. if (list != NULL) {
  1115. *tail = rdp->nxtlist;
  1116. rdp->nxtlist = list;
  1117. for (count = 0; count < RCU_NEXT_SIZE; count++)
  1118. if (&rdp->nxtlist == rdp->nxttail[count])
  1119. rdp->nxttail[count] = tail;
  1120. else
  1121. break;
  1122. }
  1123. /* Reinstate batch limit if we have worked down the excess. */
  1124. if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  1125. rdp->blimit = blimit;
  1126. /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  1127. if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  1128. rdp->qlen_last_fqs_check = 0;
  1129. rdp->n_force_qs_snap = rsp->n_force_qs;
  1130. } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  1131. rdp->qlen_last_fqs_check = rdp->qlen;
  1132. local_irq_restore(flags);
  1133. /* Re-invoke RCU core processing if there are callbacks remaining. */
  1134. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1135. invoke_rcu_core();
  1136. }
  1137. /*
  1138. * Check to see if this CPU is in a non-context-switch quiescent state
  1139. * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  1140. * Also schedule RCU core processing.
  1141. *
  1142. * This function must be called with hardirqs disabled. It is normally
  1143. * invoked from the scheduling-clock interrupt. If rcu_pending returns
  1144. * false, there is no point in invoking rcu_check_callbacks().
  1145. */
  1146. void rcu_check_callbacks(int cpu, int user)
  1147. {
  1148. trace_rcu_utilization("Start scheduler-tick");
  1149. if (user ||
  1150. (idle_cpu(cpu) && rcu_scheduler_active &&
  1151. !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
  1152. /*
  1153. * Get here if this CPU took its interrupt from user
  1154. * mode or from the idle loop, and if this is not a
  1155. * nested interrupt. In this case, the CPU is in
  1156. * a quiescent state, so note it.
  1157. *
  1158. * No memory barrier is required here because both
  1159. * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  1160. * variables that other CPUs neither access nor modify,
  1161. * at least not while the corresponding CPU is online.
  1162. */
  1163. rcu_sched_qs(cpu);
  1164. rcu_bh_qs(cpu);
  1165. } else if (!in_softirq()) {
  1166. /*
  1167. * Get here if this CPU did not take its interrupt from
  1168. * softirq, in other words, if it is not interrupting
  1169. * a rcu_bh read-side critical section. This is an _bh
  1170. * critical section, so note it.
  1171. */
  1172. rcu_bh_qs(cpu);
  1173. }
  1174. rcu_preempt_check_callbacks(cpu);
  1175. if (rcu_pending(cpu))
  1176. invoke_rcu_core();
  1177. trace_rcu_utilization("End scheduler-tick");
  1178. }
  1179. #ifdef CONFIG_SMP
  1180. /*
  1181. * Scan the leaf rcu_node structures, processing dyntick state for any that
  1182. * have not yet encountered a quiescent state, using the function specified.
  1183. * Also initiate boosting for any threads blocked on the root rcu_node.
  1184. *
  1185. * The caller must have suppressed start of new grace periods.
  1186. */
  1187. static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
  1188. {
  1189. unsigned long bit;
  1190. int cpu;
  1191. unsigned long flags;
  1192. unsigned long mask;
  1193. struct rcu_node *rnp;
  1194. rcu_for_each_leaf_node(rsp, rnp) {
  1195. mask = 0;
  1196. raw_spin_lock_irqsave(&rnp->lock, flags);
  1197. if (!rcu_gp_in_progress(rsp)) {
  1198. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1199. return;
  1200. }
  1201. if (rnp->qsmask == 0) {
  1202. rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
  1203. continue;
  1204. }
  1205. cpu = rnp->grplo;
  1206. bit = 1;
  1207. for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
  1208. if ((rnp->qsmask & bit) != 0 &&
  1209. f(per_cpu_ptr(rsp->rda, cpu)))
  1210. mask |= bit;
  1211. }
  1212. if (mask != 0) {
  1213. /* rcu_report_qs_rnp() releases rnp->lock. */
  1214. rcu_report_qs_rnp(mask, rsp, rnp, flags);
  1215. continue;
  1216. }
  1217. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1218. }
  1219. rnp = rcu_get_root(rsp);
  1220. if (rnp->qsmask == 0) {
  1221. raw_spin_lock_irqsave(&rnp->lock, flags);
  1222. rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
  1223. }
  1224. }
  1225. /*
  1226. * Force quiescent states on reluctant CPUs, and also detect which
  1227. * CPUs are in dyntick-idle mode.
  1228. */
  1229. static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  1230. {
  1231. unsigned long flags;
  1232. struct rcu_node *rnp = rcu_get_root(rsp);
  1233. trace_rcu_utilization("Start fqs");
  1234. if (!rcu_gp_in_progress(rsp)) {
  1235. trace_rcu_utilization("End fqs");
  1236. return; /* No grace period in progress, nothing to force. */
  1237. }
  1238. if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
  1239. rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
  1240. trace_rcu_utilization("End fqs");
  1241. return; /* Someone else is already on the job. */
  1242. }
  1243. if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
  1244. goto unlock_fqs_ret; /* no emergency and done recently. */
  1245. rsp->n_force_qs++;
  1246. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1247. rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
  1248. if(!rcu_gp_in_progress(rsp)) {
  1249. rsp->n_force_qs_ngp++;
  1250. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1251. goto unlock_fqs_ret; /* no GP in progress, time updated. */
  1252. }
  1253. rsp->fqs_active = 1;
  1254. switch (rsp->signaled) {
  1255. case RCU_GP_IDLE:
  1256. case RCU_GP_INIT:
  1257. break; /* grace period idle or initializing, ignore. */
  1258. case RCU_SAVE_DYNTICK:
  1259. if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
  1260. break; /* So gcc recognizes the dead code. */
  1261. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1262. /* Record dyntick-idle state. */
  1263. force_qs_rnp(rsp, dyntick_save_progress_counter);
  1264. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1265. if (rcu_gp_in_progress(rsp))
  1266. rsp->signaled = RCU_FORCE_QS;
  1267. break;
  1268. case RCU_FORCE_QS:
  1269. /* Check dyntick-idle state, send IPI to laggarts. */
  1270. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1271. force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
  1272. /* Leave state in case more forcing is required. */
  1273. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  1274. break;
  1275. }
  1276. rsp->fqs_active = 0;
  1277. if (rsp->fqs_need_gp) {
  1278. raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
  1279. rsp->fqs_need_gp = 0;
  1280. rcu_start_gp(rsp, flags); /* releases rnp->lock */
  1281. trace_rcu_utilization("End fqs");
  1282. return;
  1283. }
  1284. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  1285. unlock_fqs_ret:
  1286. raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
  1287. trace_rcu_utilization("End fqs");
  1288. }
  1289. #else /* #ifdef CONFIG_SMP */
  1290. static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  1291. {
  1292. set_need_resched();
  1293. }
  1294. #endif /* #else #ifdef CONFIG_SMP */
  1295. /*
  1296. * This does the RCU core processing work for the specified rcu_state
  1297. * and rcu_data structures. This may be called only from the CPU to
  1298. * whom the rdp belongs.
  1299. */
  1300. static void
  1301. __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1302. {
  1303. unsigned long flags;
  1304. WARN_ON_ONCE(rdp->beenonline == 0);
  1305. /*
  1306. * If an RCU GP has gone long enough, go check for dyntick
  1307. * idle CPUs and, if needed, send resched IPIs.
  1308. */
  1309. if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1310. force_quiescent_state(rsp, 1);
  1311. /*
  1312. * Advance callbacks in response to end of earlier grace
  1313. * period that some other CPU ended.
  1314. */
  1315. rcu_process_gp_end(rsp, rdp);
  1316. /* Update RCU state based on any recent quiescent states. */
  1317. rcu_check_quiescent_state(rsp, rdp);
  1318. /* Does this CPU require a not-yet-started grace period? */
  1319. if (cpu_needs_another_gp(rsp, rdp)) {
  1320. raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
  1321. rcu_start_gp(rsp, flags); /* releases above lock */
  1322. }
  1323. /* If there are callbacks ready, invoke them. */
  1324. if (cpu_has_callbacks_ready_to_invoke(rdp))
  1325. invoke_rcu_callbacks(rsp, rdp);
  1326. }
  1327. /*
  1328. * Do RCU core processing for the current CPU.
  1329. */
  1330. static void rcu_process_callbacks(struct softirq_action *unused)
  1331. {
  1332. trace_rcu_utilization("Start RCU core");
  1333. __rcu_process_callbacks(&rcu_sched_state,
  1334. &__get_cpu_var(rcu_sched_data));
  1335. __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
  1336. rcu_preempt_process_callbacks();
  1337. /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
  1338. rcu_needs_cpu_flush();
  1339. trace_rcu_utilization("End RCU core");
  1340. }
  1341. /*
  1342. * Schedule RCU callback invocation. If the specified type of RCU
  1343. * does not support RCU priority boosting, just do a direct call,
  1344. * otherwise wake up the per-CPU kernel kthread. Note that because we
  1345. * are running on the current CPU with interrupts disabled, the
  1346. * rcu_cpu_kthread_task cannot disappear out from under us.
  1347. */
  1348. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  1349. {
  1350. if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
  1351. return;
  1352. if (likely(!rsp->boost)) {
  1353. rcu_do_batch(rsp, rdp);
  1354. return;
  1355. }
  1356. invoke_rcu_callbacks_kthread();
  1357. }
  1358. static void invoke_rcu_core(void)
  1359. {
  1360. raise_softirq(RCU_SOFTIRQ);
  1361. }
  1362. static void
  1363. __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
  1364. struct rcu_state *rsp)
  1365. {
  1366. unsigned long flags;
  1367. struct rcu_data *rdp;
  1368. debug_rcu_head_queue(head);
  1369. head->func = func;
  1370. head->next = NULL;
  1371. smp_mb(); /* Ensure RCU update seen before callback registry. */
  1372. /*
  1373. * Opportunistically note grace-period endings and beginnings.
  1374. * Note that we might see a beginning right after we see an
  1375. * end, but never vice versa, since this CPU has to pass through
  1376. * a quiescent state betweentimes.
  1377. */
  1378. local_irq_save(flags);
  1379. rdp = this_cpu_ptr(rsp->rda);
  1380. /* Add the callback to our list. */
  1381. *rdp->nxttail[RCU_NEXT_TAIL] = head;
  1382. rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
  1383. rdp->qlen++;
  1384. if (__is_kfree_rcu_offset((unsigned long)func))
  1385. trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
  1386. rdp->qlen);
  1387. else
  1388. trace_rcu_callback(rsp->name, head, rdp->qlen);
  1389. /* If interrupts were disabled, don't dive into RCU core. */
  1390. if (irqs_disabled_flags(flags)) {
  1391. local_irq_restore(flags);
  1392. return;
  1393. }
  1394. /*
  1395. * Force the grace period if too many callbacks or too long waiting.
  1396. * Enforce hysteresis, and don't invoke force_quiescent_state()
  1397. * if some other CPU has recently done so. Also, don't bother
  1398. * invoking force_quiescent_state() if the newly enqueued callback
  1399. * is the only one waiting for a grace period to complete.
  1400. */
  1401. if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
  1402. /* Are we ignoring a completed grace period? */
  1403. rcu_process_gp_end(rsp, rdp);
  1404. check_for_new_grace_period(rsp, rdp);
  1405. /* Start a new grace period if one not already started. */
  1406. if (!rcu_gp_in_progress(rsp)) {
  1407. unsigned long nestflag;
  1408. struct rcu_node *rnp_root = rcu_get_root(rsp);
  1409. raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
  1410. rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
  1411. } else {
  1412. /* Give the grace period a kick. */
  1413. rdp->blimit = LONG_MAX;
  1414. if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  1415. *rdp->nxttail[RCU_DONE_TAIL] != head)
  1416. force_quiescent_state(rsp, 0);
  1417. rdp->n_force_qs_snap = rsp->n_force_qs;
  1418. rdp->qlen_last_fqs_check = rdp->qlen;
  1419. }
  1420. } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
  1421. force_quiescent_state(rsp, 1);
  1422. local_irq_restore(flags);
  1423. }
  1424. /*
  1425. * Queue an RCU-sched callback for invocation after a grace period.
  1426. */
  1427. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1428. {
  1429. __call_rcu(head, func, &rcu_sched_state);
  1430. }
  1431. EXPORT_SYMBOL_GPL(call_rcu_sched);
  1432. /*
  1433. * Queue an RCU for invocation after a quicker grace period.
  1434. */
  1435. void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  1436. {
  1437. __call_rcu(head, func, &rcu_bh_state);
  1438. }
  1439. EXPORT_SYMBOL_GPL(call_rcu_bh);
  1440. /**
  1441. * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  1442. *
  1443. * Control will return to the caller some time after a full rcu-sched
  1444. * grace period has elapsed, in other words after all currently executing
  1445. * rcu-sched read-side critical sections have completed. These read-side
  1446. * critical sections are delimited by rcu_read_lock_sched() and
  1447. * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  1448. * local_irq_disable(), and so on may be used in place of
  1449. * rcu_read_lock_sched().
  1450. *
  1451. * This means that all preempt_disable code sequences, including NMI and
  1452. * hardware-interrupt handlers, in progress on entry will have completed
  1453. * before this primitive returns. However, this does not guarantee that
  1454. * softirq handlers will have completed, since in some kernels, these
  1455. * handlers can run in process context, and can block.
  1456. *
  1457. * This primitive provides the guarantees made by the (now removed)
  1458. * synchronize_kernel() API. In contrast, synchronize_rcu() only
  1459. * guarantees that rcu_read_lock() sections will have completed.
  1460. * In "classic RCU", these two guarantees happen to be one and
  1461. * the same, but can differ in realtime RCU implementations.
  1462. */
  1463. void synchronize_sched(void)
  1464. {
  1465. if (rcu_blocking_is_gp())
  1466. return;
  1467. wait_rcu_gp(call_rcu_sched);
  1468. }
  1469. EXPORT_SYMBOL_GPL(synchronize_sched);
  1470. /**
  1471. * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  1472. *
  1473. * Control will return to the caller some time after a full rcu_bh grace
  1474. * period has elapsed, in other words after all currently executing rcu_bh
  1475. * read-side critical sections have completed. RCU read-side critical
  1476. * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  1477. * and may be nested.
  1478. */
  1479. void synchronize_rcu_bh(void)
  1480. {
  1481. if (rcu_blocking_is_gp())
  1482. return;
  1483. wait_rcu_gp(call_rcu_bh);
  1484. }
  1485. EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  1486. /*
  1487. * Check to see if there is any immediate RCU-related work to be done
  1488. * by the current CPU, for the specified type of RCU, returning 1 if so.
  1489. * The checks are in order of increasing expense: checks that can be
  1490. * carried out against CPU-local state are performed first. However,
  1491. * we must check for CPU stalls first, else we might not get a chance.
  1492. */
  1493. static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  1494. {
  1495. struct rcu_node *rnp = rdp->mynode;
  1496. rdp->n_rcu_pending++;
  1497. /* Check for CPU stalls, if enabled. */
  1498. check_cpu_stall(rsp, rdp);
  1499. /* Is the RCU core waiting for a quiescent state from this CPU? */
  1500. if (rdp->qs_pending && !rdp->passed_quiesce) {
  1501. /*
  1502. * If force_quiescent_state() coming soon and this CPU
  1503. * needs a quiescent state, and this is either RCU-sched
  1504. * or RCU-bh, force a local reschedule.
  1505. */
  1506. rdp->n_rp_qs_pending++;
  1507. if (!rdp->preemptible &&
  1508. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
  1509. jiffies))
  1510. set_need_resched();
  1511. } else if (rdp->qs_pending && rdp->passed_quiesce) {
  1512. rdp->n_rp_report_qs++;
  1513. return 1;
  1514. }
  1515. /* Does this CPU have callbacks ready to invoke? */
  1516. if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  1517. rdp->n_rp_cb_ready++;
  1518. return 1;
  1519. }
  1520. /* Has RCU gone idle with this CPU needing another grace period? */
  1521. if (cpu_needs_another_gp(rsp, rdp)) {
  1522. rdp->n_rp_cpu_needs_gp++;
  1523. return 1;
  1524. }
  1525. /* Has another RCU grace period completed? */
  1526. if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
  1527. rdp->n_rp_gp_completed++;
  1528. return 1;
  1529. }
  1530. /* Has a new RCU grace period started? */
  1531. if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
  1532. rdp->n_rp_gp_started++;
  1533. return 1;
  1534. }
  1535. /* Has an RCU GP gone long enough to send resched IPIs &c? */
  1536. if (rcu_gp_in_progress(rsp) &&
  1537. ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
  1538. rdp->n_rp_need_fqs++;
  1539. return 1;
  1540. }
  1541. /* nothing to do */
  1542. rdp->n_rp_need_nothing++;
  1543. return 0;
  1544. }
  1545. /*
  1546. * Check to see if there is any immediate RCU-related work to be done
  1547. * by the current CPU, returning 1 if so. This function is part of the
  1548. * RCU implementation; it is -not- an exported member of the RCU API.
  1549. */
  1550. static int rcu_pending(int cpu)
  1551. {
  1552. return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
  1553. __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
  1554. rcu_preempt_pending(cpu);
  1555. }
  1556. /*
  1557. * Check to see if any future RCU-related work will need to be done
  1558. * by the current CPU, even if none need be done immediately, returning
  1559. * 1 if so.
  1560. */
  1561. static int rcu_needs_cpu_quick_check(int cpu)
  1562. {
  1563. /* RCU callbacks either ready or pending? */
  1564. return per_cpu(rcu_sched_data, cpu).nxtlist ||
  1565. per_cpu(rcu_bh_data, cpu).nxtlist ||
  1566. rcu_preempt_needs_cpu(cpu);
  1567. }
  1568. static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  1569. static atomic_t rcu_barrier_cpu_count;
  1570. static DEFINE_MUTEX(rcu_barrier_mutex);
  1571. static struct completion rcu_barrier_completion;
  1572. static void rcu_barrier_callback(struct rcu_head *notused)
  1573. {
  1574. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  1575. complete(&rcu_barrier_completion);
  1576. }
  1577. /*
  1578. * Called with preemption disabled, and from cross-cpu IRQ context.
  1579. */
  1580. static void rcu_barrier_func(void *type)
  1581. {
  1582. int cpu = smp_processor_id();
  1583. struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
  1584. void (*call_rcu_func)(struct rcu_head *head,
  1585. void (*func)(struct rcu_head *head));
  1586. atomic_inc(&rcu_barrier_cpu_count);
  1587. call_rcu_func = type;
  1588. call_rcu_func(head, rcu_barrier_callback);
  1589. }
  1590. /*
  1591. * Orchestrate the specified type of RCU barrier, waiting for all
  1592. * RCU callbacks of the specified type to complete.
  1593. */
  1594. static void _rcu_barrier(struct rcu_state *rsp,
  1595. void (*call_rcu_func)(struct rcu_head *head,
  1596. void (*func)(struct rcu_head *head)))
  1597. {
  1598. BUG_ON(in_interrupt());
  1599. /* Take mutex to serialize concurrent rcu_barrier() requests. */
  1600. mutex_lock(&rcu_barrier_mutex);
  1601. init_completion(&rcu_barrier_completion);
  1602. /*
  1603. * Initialize rcu_barrier_cpu_count to 1, then invoke
  1604. * rcu_barrier_func() on each CPU, so that each CPU also has
  1605. * incremented rcu_barrier_cpu_count. Only then is it safe to
  1606. * decrement rcu_barrier_cpu_count -- otherwise the first CPU
  1607. * might complete its grace period before all of the other CPUs
  1608. * did their increment, causing this function to return too
  1609. * early. Note that on_each_cpu() disables irqs, which prevents
  1610. * any CPUs from coming online or going offline until each online
  1611. * CPU has queued its RCU-barrier callback.
  1612. */
  1613. atomic_set(&rcu_barrier_cpu_count, 1);
  1614. on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
  1615. if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  1616. complete(&rcu_barrier_completion);
  1617. wait_for_completion(&rcu_barrier_completion);
  1618. mutex_unlock(&rcu_barrier_mutex);
  1619. }
  1620. /**
  1621. * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  1622. */
  1623. void rcu_barrier_bh(void)
  1624. {
  1625. _rcu_barrier(&rcu_bh_state, call_rcu_bh);
  1626. }
  1627. EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  1628. /**
  1629. * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
  1630. */
  1631. void rcu_barrier_sched(void)
  1632. {
  1633. _rcu_barrier(&rcu_sched_state, call_rcu_sched);
  1634. }
  1635. EXPORT_SYMBOL_GPL(rcu_barrier_sched);
  1636. /*
  1637. * Do boot-time initialization of a CPU's per-CPU RCU data.
  1638. */
  1639. static void __init
  1640. rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  1641. {
  1642. unsigned long flags;
  1643. int i;
  1644. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1645. struct rcu_node *rnp = rcu_get_root(rsp);
  1646. /* Set up local state, ensuring consistent view of global state. */
  1647. raw_spin_lock_irqsave(&rnp->lock, flags);
  1648. rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
  1649. rdp->nxtlist = NULL;
  1650. for (i = 0; i < RCU_NEXT_SIZE; i++)
  1651. rdp->nxttail[i] = &rdp->nxtlist;
  1652. rdp->qlen = 0;
  1653. #ifdef CONFIG_NO_HZ
  1654. rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  1655. #endif /* #ifdef CONFIG_NO_HZ */
  1656. rdp->cpu = cpu;
  1657. rdp->rsp = rsp;
  1658. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1659. }
  1660. /*
  1661. * Initialize a CPU's per-CPU RCU data. Note that only one online or
  1662. * offline event can be happening at a given time. Note also that we
  1663. * can accept some slop in the rsp->completed access due to the fact
  1664. * that this CPU cannot possibly have any RCU callbacks in flight yet.
  1665. */
  1666. static void __cpuinit
  1667. rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
  1668. {
  1669. unsigned long flags;
  1670. unsigned long mask;
  1671. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1672. struct rcu_node *rnp = rcu_get_root(rsp);
  1673. /* Set up local state, ensuring consistent view of global state. */
  1674. raw_spin_lock_irqsave(&rnp->lock, flags);
  1675. rdp->passed_quiesce = 0; /* We could be racing with new GP, */
  1676. rdp->qs_pending = 1; /* so set up to respond to current GP. */
  1677. rdp->beenonline = 1; /* We have now been online. */
  1678. rdp->preemptible = preemptible;
  1679. rdp->qlen_last_fqs_check = 0;
  1680. rdp->n_force_qs_snap = rsp->n_force_qs;
  1681. rdp->blimit = blimit;
  1682. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  1683. /*
  1684. * A new grace period might start here. If so, we won't be part
  1685. * of it, but that is OK, as we are currently in a quiescent state.
  1686. */
  1687. /* Exclude any attempts to start a new GP on large systems. */
  1688. raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
  1689. /* Add CPU to rcu_node bitmasks. */
  1690. rnp = rdp->mynode;
  1691. mask = rdp->grpmask;
  1692. do {
  1693. /* Exclude any attempts to start a new GP on small systems. */
  1694. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  1695. rnp->qsmaskinit |= mask;
  1696. mask = rnp->grpmask;
  1697. if (rnp == rdp->mynode) {
  1698. rdp->gpnum = rnp->completed; /* if GP in progress... */
  1699. rdp->completed = rnp->completed;
  1700. rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
  1701. trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
  1702. }
  1703. raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
  1704. rnp = rnp->parent;
  1705. } while (rnp != NULL && !(rnp->qsmaskinit & mask));
  1706. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  1707. }
  1708. static void __cpuinit rcu_prepare_cpu(int cpu)
  1709. {
  1710. rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
  1711. rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
  1712. rcu_preempt_init_percpu_data(cpu);
  1713. }
  1714. /*
  1715. * Handle CPU online/offline notification events.
  1716. */
  1717. static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
  1718. unsigned long action, void *hcpu)
  1719. {
  1720. long cpu = (long)hcpu;
  1721. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
  1722. struct rcu_node *rnp = rdp->mynode;
  1723. trace_rcu_utilization("Start CPU hotplug");
  1724. switch (action) {
  1725. case CPU_UP_PREPARE:
  1726. case CPU_UP_PREPARE_FROZEN:
  1727. rcu_prepare_cpu(cpu);
  1728. rcu_prepare_kthreads(cpu);
  1729. break;
  1730. case CPU_ONLINE:
  1731. case CPU_DOWN_FAILED:
  1732. rcu_node_kthread_setaffinity(rnp, -1);
  1733. rcu_cpu_kthread_setrt(cpu, 1);
  1734. break;
  1735. case CPU_DOWN_PREPARE:
  1736. rcu_node_kthread_setaffinity(rnp, cpu);
  1737. rcu_cpu_kthread_setrt(cpu, 0);
  1738. break;
  1739. case CPU_DYING:
  1740. case CPU_DYING_FROZEN:
  1741. /*
  1742. * The whole machine is "stopped" except this CPU, so we can
  1743. * touch any data without introducing corruption. We send the
  1744. * dying CPU's callbacks to an arbitrarily chosen online CPU.
  1745. */
  1746. rcu_send_cbs_to_online(&rcu_bh_state);
  1747. rcu_send_cbs_to_online(&rcu_sched_state);
  1748. rcu_preempt_send_cbs_to_online();
  1749. break;
  1750. case CPU_DEAD:
  1751. case CPU_DEAD_FROZEN:
  1752. case CPU_UP_CANCELED:
  1753. case CPU_UP_CANCELED_FROZEN:
  1754. rcu_offline_cpu(cpu);
  1755. break;
  1756. default:
  1757. break;
  1758. }
  1759. trace_rcu_utilization("End CPU hotplug");
  1760. return NOTIFY_OK;
  1761. }
  1762. /*
  1763. * This function is invoked towards the end of the scheduler's initialization
  1764. * process. Before this is called, the idle task might contain
  1765. * RCU read-side critical sections (during which time, this idle
  1766. * task is booting the system). After this function is called, the
  1767. * idle tasks are prohibited from containing RCU read-side critical
  1768. * sections. This function also enables RCU lockdep checking.
  1769. */
  1770. void rcu_scheduler_starting(void)
  1771. {
  1772. WARN_ON(num_online_cpus() != 1);
  1773. WARN_ON(nr_context_switches() > 0);
  1774. rcu_scheduler_active = 1;
  1775. }
  1776. /*
  1777. * Compute the per-level fanout, either using the exact fanout specified
  1778. * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
  1779. */
  1780. #ifdef CONFIG_RCU_FANOUT_EXACT
  1781. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  1782. {
  1783. int i;
  1784. for (i = NUM_RCU_LVLS - 1; i > 0; i--)
  1785. rsp->levelspread[i] = CONFIG_RCU_FANOUT;
  1786. rsp->levelspread[0] = RCU_FANOUT_LEAF;
  1787. }
  1788. #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
  1789. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  1790. {
  1791. int ccur;
  1792. int cprv;
  1793. int i;
  1794. cprv = NR_CPUS;
  1795. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  1796. ccur = rsp->levelcnt[i];
  1797. rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
  1798. cprv = ccur;
  1799. }
  1800. }
  1801. #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
  1802. /*
  1803. * Helper function for rcu_init() that initializes one rcu_state structure.
  1804. */
  1805. static void __init rcu_init_one(struct rcu_state *rsp,
  1806. struct rcu_data __percpu *rda)
  1807. {
  1808. static char *buf[] = { "rcu_node_level_0",
  1809. "rcu_node_level_1",
  1810. "rcu_node_level_2",
  1811. "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */
  1812. int cpustride = 1;
  1813. int i;
  1814. int j;
  1815. struct rcu_node *rnp;
  1816. BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
  1817. /* Initialize the level-tracking arrays. */
  1818. for (i = 1; i < NUM_RCU_LVLS; i++)
  1819. rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
  1820. rcu_init_levelspread(rsp);
  1821. /* Initialize the elements themselves, starting from the leaves. */
  1822. for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  1823. cpustride *= rsp->levelspread[i];
  1824. rnp = rsp->level[i];
  1825. for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
  1826. raw_spin_lock_init(&rnp->lock);
  1827. lockdep_set_class_and_name(&rnp->lock,
  1828. &rcu_node_class[i], buf[i]);
  1829. rnp->gpnum = 0;
  1830. rnp->qsmask = 0;
  1831. rnp->qsmaskinit = 0;
  1832. rnp->grplo = j * cpustride;
  1833. rnp->grphi = (j + 1) * cpustride - 1;
  1834. if (rnp->grphi >= NR_CPUS)
  1835. rnp->grphi = NR_CPUS - 1;
  1836. if (i == 0) {
  1837. rnp->grpnum = 0;
  1838. rnp->grpmask = 0;
  1839. rnp->parent = NULL;
  1840. } else {
  1841. rnp->grpnum = j % rsp->levelspread[i - 1];
  1842. rnp->grpmask = 1UL << rnp->grpnum;
  1843. rnp->parent = rsp->level[i - 1] +
  1844. j / rsp->levelspread[i - 1];
  1845. }
  1846. rnp->level = i;
  1847. INIT_LIST_HEAD(&rnp->blkd_tasks);
  1848. }
  1849. }
  1850. rsp->rda = rda;
  1851. rnp = rsp->level[NUM_RCU_LVLS - 1];
  1852. for_each_possible_cpu(i) {
  1853. while (i > rnp->grphi)
  1854. rnp++;
  1855. per_cpu_ptr(rsp->rda, i)->mynode = rnp;
  1856. rcu_boot_init_percpu_data(i, rsp);
  1857. }
  1858. }
  1859. void __init rcu_init(void)
  1860. {
  1861. int cpu;
  1862. rcu_bootup_announce();
  1863. rcu_init_one(&rcu_sched_state, &rcu_sched_data);
  1864. rcu_init_one(&rcu_bh_state, &rcu_bh_data);
  1865. __rcu_init_preempt();
  1866. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  1867. /*
  1868. * We don't need protection against CPU-hotplug here because
  1869. * this is called early in boot, before either interrupts
  1870. * or the scheduler are operational.
  1871. */
  1872. cpu_notifier(rcu_cpu_notify, 0);
  1873. for_each_online_cpu(cpu)
  1874. rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
  1875. check_cpu_stall_init();
  1876. }
  1877. #include "rcutree_plugin.h"