rcutree_plugin.h 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright Red Hat, 2009
  21. * Copyright IBM Corporation, 2009
  22. *
  23. * Author: Ingo Molnar <mingo@elte.hu>
  24. * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25. */
  26. #include <linux/delay.h>
  27. #define RCU_KTHREAD_PRIO 1
  28. #ifdef CONFIG_RCU_BOOST
  29. #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  30. #else
  31. #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
  32. #endif
  33. /*
  34. * Check the RCU kernel configuration parameters and print informative
  35. * messages about anything out of the ordinary. If you like #ifdef, you
  36. * will love this function.
  37. */
  38. static void __init rcu_bootup_announce_oddness(void)
  39. {
  40. #ifdef CONFIG_RCU_TRACE
  41. printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
  42. #endif
  43. #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
  44. printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  45. CONFIG_RCU_FANOUT);
  46. #endif
  47. #ifdef CONFIG_RCU_FANOUT_EXACT
  48. printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
  49. #endif
  50. #ifdef CONFIG_RCU_FAST_NO_HZ
  51. printk(KERN_INFO
  52. "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  53. #endif
  54. #ifdef CONFIG_PROVE_RCU
  55. printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
  56. #endif
  57. #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
  58. printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
  59. #endif
  60. #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
  61. printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
  62. #endif
  63. #if defined(CONFIG_RCU_CPU_STALL_INFO)
  64. printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
  65. #endif
  66. #if NUM_RCU_LVL_4 != 0
  67. printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
  68. #endif
  69. if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
  70. printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
  71. if (nr_cpu_ids != NR_CPUS)
  72. printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
  73. }
  74. #ifdef CONFIG_TREE_PREEMPT_RCU
  75. struct rcu_state rcu_preempt_state =
  76. RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
  77. DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
  78. static struct rcu_state *rcu_state = &rcu_preempt_state;
  79. static int rcu_preempted_readers_exp(struct rcu_node *rnp);
  80. /*
  81. * Tell them what RCU they are running.
  82. */
  83. static void __init rcu_bootup_announce(void)
  84. {
  85. printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
  86. rcu_bootup_announce_oddness();
  87. }
  88. /*
  89. * Return the number of RCU-preempt batches processed thus far
  90. * for debug and statistics.
  91. */
  92. long rcu_batches_completed_preempt(void)
  93. {
  94. return rcu_preempt_state.completed;
  95. }
  96. EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
  97. /*
  98. * Return the number of RCU batches processed thus far for debug & stats.
  99. */
  100. long rcu_batches_completed(void)
  101. {
  102. return rcu_batches_completed_preempt();
  103. }
  104. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  105. /*
  106. * Force a quiescent state for preemptible RCU.
  107. */
  108. void rcu_force_quiescent_state(void)
  109. {
  110. force_quiescent_state(&rcu_preempt_state, 0);
  111. }
  112. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  113. /*
  114. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  115. * that this just means that the task currently running on the CPU is
  116. * not in a quiescent state. There might be any number of tasks blocked
  117. * while in an RCU read-side critical section.
  118. *
  119. * Unlike the other rcu_*_qs() functions, callers to this function
  120. * must disable irqs in order to protect the assignment to
  121. * ->rcu_read_unlock_special.
  122. */
  123. static void rcu_preempt_qs(int cpu)
  124. {
  125. struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
  126. rdp->passed_quiesce_gpnum = rdp->gpnum;
  127. barrier();
  128. if (rdp->passed_quiesce == 0)
  129. trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
  130. rdp->passed_quiesce = 1;
  131. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  132. }
  133. /*
  134. * We have entered the scheduler, and the current task might soon be
  135. * context-switched away from. If this task is in an RCU read-side
  136. * critical section, we will no longer be able to rely on the CPU to
  137. * record that fact, so we enqueue the task on the blkd_tasks list.
  138. * The task will dequeue itself when it exits the outermost enclosing
  139. * RCU read-side critical section. Therefore, the current grace period
  140. * cannot be permitted to complete until the blkd_tasks list entries
  141. * predating the current grace period drain, in other words, until
  142. * rnp->gp_tasks becomes NULL.
  143. *
  144. * Caller must disable preemption.
  145. */
  146. static void rcu_preempt_note_context_switch(int cpu)
  147. {
  148. struct task_struct *t = current;
  149. unsigned long flags;
  150. struct rcu_data *rdp;
  151. struct rcu_node *rnp;
  152. if (t->rcu_read_lock_nesting > 0 &&
  153. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  154. /* Possibly blocking in an RCU read-side critical section. */
  155. rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
  156. rnp = rdp->mynode;
  157. raw_spin_lock_irqsave(&rnp->lock, flags);
  158. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  159. t->rcu_blocked_node = rnp;
  160. /*
  161. * If this CPU has already checked in, then this task
  162. * will hold up the next grace period rather than the
  163. * current grace period. Queue the task accordingly.
  164. * If the task is queued for the current grace period
  165. * (i.e., this CPU has not yet passed through a quiescent
  166. * state for the current grace period), then as long
  167. * as that task remains queued, the current grace period
  168. * cannot end. Note that there is some uncertainty as
  169. * to exactly when the current grace period started.
  170. * We take a conservative approach, which can result
  171. * in unnecessarily waiting on tasks that started very
  172. * slightly after the current grace period began. C'est
  173. * la vie!!!
  174. *
  175. * But first, note that the current CPU must still be
  176. * on line!
  177. */
  178. WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
  179. WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
  180. if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
  181. list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
  182. rnp->gp_tasks = &t->rcu_node_entry;
  183. #ifdef CONFIG_RCU_BOOST
  184. if (rnp->boost_tasks != NULL)
  185. rnp->boost_tasks = rnp->gp_tasks;
  186. #endif /* #ifdef CONFIG_RCU_BOOST */
  187. } else {
  188. list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
  189. if (rnp->qsmask & rdp->grpmask)
  190. rnp->gp_tasks = &t->rcu_node_entry;
  191. }
  192. trace_rcu_preempt_task(rdp->rsp->name,
  193. t->pid,
  194. (rnp->qsmask & rdp->grpmask)
  195. ? rnp->gpnum
  196. : rnp->gpnum + 1);
  197. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  198. } else if (t->rcu_read_lock_nesting < 0 &&
  199. t->rcu_read_unlock_special) {
  200. /*
  201. * Complete exit from RCU read-side critical section on
  202. * behalf of preempted instance of __rcu_read_unlock().
  203. */
  204. rcu_read_unlock_special(t);
  205. }
  206. /*
  207. * Either we were not in an RCU read-side critical section to
  208. * begin with, or we have now recorded that critical section
  209. * globally. Either way, we can now note a quiescent state
  210. * for this CPU. Again, if we were in an RCU read-side critical
  211. * section, and if that critical section was blocking the current
  212. * grace period, then the fact that the task has been enqueued
  213. * means that we continue to block the current grace period.
  214. */
  215. local_irq_save(flags);
  216. rcu_preempt_qs(cpu);
  217. local_irq_restore(flags);
  218. }
  219. /*
  220. * Check for preempted RCU readers blocking the current grace period
  221. * for the specified rcu_node structure. If the caller needs a reliable
  222. * answer, it must hold the rcu_node's ->lock.
  223. */
  224. static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  225. {
  226. return rnp->gp_tasks != NULL;
  227. }
  228. /*
  229. * Record a quiescent state for all tasks that were previously queued
  230. * on the specified rcu_node structure and that were blocking the current
  231. * RCU grace period. The caller must hold the specified rnp->lock with
  232. * irqs disabled, and this lock is released upon return, but irqs remain
  233. * disabled.
  234. */
  235. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  236. __releases(rnp->lock)
  237. {
  238. unsigned long mask;
  239. struct rcu_node *rnp_p;
  240. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  241. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  242. return; /* Still need more quiescent states! */
  243. }
  244. rnp_p = rnp->parent;
  245. if (rnp_p == NULL) {
  246. /*
  247. * Either there is only one rcu_node in the tree,
  248. * or tasks were kicked up to root rcu_node due to
  249. * CPUs going offline.
  250. */
  251. rcu_report_qs_rsp(&rcu_preempt_state, flags);
  252. return;
  253. }
  254. /* Report up the rest of the hierarchy. */
  255. mask = rnp->grpmask;
  256. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  257. raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
  258. rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
  259. }
  260. /*
  261. * Advance a ->blkd_tasks-list pointer to the next entry, instead
  262. * returning NULL if at the end of the list.
  263. */
  264. static struct list_head *rcu_next_node_entry(struct task_struct *t,
  265. struct rcu_node *rnp)
  266. {
  267. struct list_head *np;
  268. np = t->rcu_node_entry.next;
  269. if (np == &rnp->blkd_tasks)
  270. np = NULL;
  271. return np;
  272. }
  273. /*
  274. * Handle special cases during rcu_read_unlock(), such as needing to
  275. * notify RCU core processing or task having blocked during the RCU
  276. * read-side critical section.
  277. */
  278. void rcu_read_unlock_special(struct task_struct *t)
  279. {
  280. int empty;
  281. int empty_exp;
  282. int empty_exp_now;
  283. unsigned long flags;
  284. struct list_head *np;
  285. #ifdef CONFIG_RCU_BOOST
  286. struct rt_mutex *rbmp = NULL;
  287. #endif /* #ifdef CONFIG_RCU_BOOST */
  288. struct rcu_node *rnp;
  289. int special;
  290. /* NMI handlers cannot block and cannot safely manipulate state. */
  291. if (in_nmi())
  292. return;
  293. local_irq_save(flags);
  294. /*
  295. * If RCU core is waiting for this CPU to exit critical section,
  296. * let it know that we have done so.
  297. */
  298. special = t->rcu_read_unlock_special;
  299. if (special & RCU_READ_UNLOCK_NEED_QS) {
  300. rcu_preempt_qs(smp_processor_id());
  301. }
  302. /* Hardware IRQ handlers cannot block. */
  303. if (in_irq() || in_serving_softirq()) {
  304. local_irq_restore(flags);
  305. return;
  306. }
  307. /* Clean up if blocked during RCU read-side critical section. */
  308. if (special & RCU_READ_UNLOCK_BLOCKED) {
  309. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  310. /*
  311. * Remove this task from the list it blocked on. The
  312. * task can migrate while we acquire the lock, but at
  313. * most one time. So at most two passes through loop.
  314. */
  315. for (;;) {
  316. rnp = t->rcu_blocked_node;
  317. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  318. if (rnp == t->rcu_blocked_node)
  319. break;
  320. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  321. }
  322. empty = !rcu_preempt_blocked_readers_cgp(rnp);
  323. empty_exp = !rcu_preempted_readers_exp(rnp);
  324. smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
  325. np = rcu_next_node_entry(t, rnp);
  326. list_del_init(&t->rcu_node_entry);
  327. t->rcu_blocked_node = NULL;
  328. trace_rcu_unlock_preempted_task("rcu_preempt",
  329. rnp->gpnum, t->pid);
  330. if (&t->rcu_node_entry == rnp->gp_tasks)
  331. rnp->gp_tasks = np;
  332. if (&t->rcu_node_entry == rnp->exp_tasks)
  333. rnp->exp_tasks = np;
  334. #ifdef CONFIG_RCU_BOOST
  335. if (&t->rcu_node_entry == rnp->boost_tasks)
  336. rnp->boost_tasks = np;
  337. /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
  338. if (t->rcu_boost_mutex) {
  339. rbmp = t->rcu_boost_mutex;
  340. t->rcu_boost_mutex = NULL;
  341. }
  342. #endif /* #ifdef CONFIG_RCU_BOOST */
  343. /*
  344. * If this was the last task on the current list, and if
  345. * we aren't waiting on any CPUs, report the quiescent state.
  346. * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
  347. * so we must take a snapshot of the expedited state.
  348. */
  349. empty_exp_now = !rcu_preempted_readers_exp(rnp);
  350. if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
  351. trace_rcu_quiescent_state_report("preempt_rcu",
  352. rnp->gpnum,
  353. 0, rnp->qsmask,
  354. rnp->level,
  355. rnp->grplo,
  356. rnp->grphi,
  357. !!rnp->gp_tasks);
  358. rcu_report_unblock_qs_rnp(rnp, flags);
  359. } else {
  360. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  361. }
  362. #ifdef CONFIG_RCU_BOOST
  363. /* Unboost if we were boosted. */
  364. if (rbmp)
  365. rt_mutex_unlock(rbmp);
  366. #endif /* #ifdef CONFIG_RCU_BOOST */
  367. /*
  368. * If this was the last task on the expedited lists,
  369. * then we need to report up the rcu_node hierarchy.
  370. */
  371. if (!empty_exp && empty_exp_now)
  372. rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
  373. } else {
  374. local_irq_restore(flags);
  375. }
  376. }
  377. #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
  378. /*
  379. * Dump detailed information for all tasks blocking the current RCU
  380. * grace period on the specified rcu_node structure.
  381. */
  382. static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  383. {
  384. unsigned long flags;
  385. struct task_struct *t;
  386. if (!rcu_preempt_blocked_readers_cgp(rnp))
  387. return;
  388. raw_spin_lock_irqsave(&rnp->lock, flags);
  389. t = list_entry(rnp->gp_tasks,
  390. struct task_struct, rcu_node_entry);
  391. list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
  392. sched_show_task(t);
  393. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  394. }
  395. /*
  396. * Dump detailed information for all tasks blocking the current RCU
  397. * grace period.
  398. */
  399. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  400. {
  401. struct rcu_node *rnp = rcu_get_root(rsp);
  402. rcu_print_detail_task_stall_rnp(rnp);
  403. rcu_for_each_leaf_node(rsp, rnp)
  404. rcu_print_detail_task_stall_rnp(rnp);
  405. }
  406. #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  407. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  408. {
  409. }
  410. #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  411. #ifdef CONFIG_RCU_CPU_STALL_INFO
  412. static void rcu_print_task_stall_begin(struct rcu_node *rnp)
  413. {
  414. printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
  415. rnp->level, rnp->grplo, rnp->grphi);
  416. }
  417. static void rcu_print_task_stall_end(void)
  418. {
  419. printk(KERN_CONT "\n");
  420. }
  421. #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
  422. static void rcu_print_task_stall_begin(struct rcu_node *rnp)
  423. {
  424. }
  425. static void rcu_print_task_stall_end(void)
  426. {
  427. }
  428. #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
  429. /*
  430. * Scan the current list of tasks blocked within RCU read-side critical
  431. * sections, printing out the tid of each.
  432. */
  433. static int rcu_print_task_stall(struct rcu_node *rnp)
  434. {
  435. struct task_struct *t;
  436. int ndetected = 0;
  437. if (!rcu_preempt_blocked_readers_cgp(rnp))
  438. return 0;
  439. rcu_print_task_stall_begin(rnp);
  440. t = list_entry(rnp->gp_tasks,
  441. struct task_struct, rcu_node_entry);
  442. list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
  443. printk(KERN_CONT " P%d", t->pid);
  444. ndetected++;
  445. }
  446. rcu_print_task_stall_end();
  447. return ndetected;
  448. }
  449. /*
  450. * Check that the list of blocked tasks for the newly completed grace
  451. * period is in fact empty. It is a serious bug to complete a grace
  452. * period that still has RCU readers blocked! This function must be
  453. * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
  454. * must be held by the caller.
  455. *
  456. * Also, if there are blocked tasks on the list, they automatically
  457. * block the newly created grace period, so set up ->gp_tasks accordingly.
  458. */
  459. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  460. {
  461. WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  462. if (!list_empty(&rnp->blkd_tasks))
  463. rnp->gp_tasks = rnp->blkd_tasks.next;
  464. WARN_ON_ONCE(rnp->qsmask);
  465. }
  466. #ifdef CONFIG_HOTPLUG_CPU
  467. /*
  468. * Handle tasklist migration for case in which all CPUs covered by the
  469. * specified rcu_node have gone offline. Move them up to the root
  470. * rcu_node. The reason for not just moving them to the immediate
  471. * parent is to remove the need for rcu_read_unlock_special() to
  472. * make more than two attempts to acquire the target rcu_node's lock.
  473. * Returns true if there were tasks blocking the current RCU grace
  474. * period.
  475. *
  476. * Returns 1 if there was previously a task blocking the current grace
  477. * period on the specified rcu_node structure.
  478. *
  479. * The caller must hold rnp->lock with irqs disabled.
  480. */
  481. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  482. struct rcu_node *rnp,
  483. struct rcu_data *rdp)
  484. {
  485. struct list_head *lp;
  486. struct list_head *lp_root;
  487. int retval = 0;
  488. struct rcu_node *rnp_root = rcu_get_root(rsp);
  489. struct task_struct *t;
  490. if (rnp == rnp_root) {
  491. WARN_ONCE(1, "Last CPU thought to be offlined?");
  492. return 0; /* Shouldn't happen: at least one CPU online. */
  493. }
  494. /* If we are on an internal node, complain bitterly. */
  495. WARN_ON_ONCE(rnp != rdp->mynode);
  496. /*
  497. * Move tasks up to root rcu_node. Don't try to get fancy for
  498. * this corner-case operation -- just put this node's tasks
  499. * at the head of the root node's list, and update the root node's
  500. * ->gp_tasks and ->exp_tasks pointers to those of this node's,
  501. * if non-NULL. This might result in waiting for more tasks than
  502. * absolutely necessary, but this is a good performance/complexity
  503. * tradeoff.
  504. */
  505. if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
  506. retval |= RCU_OFL_TASKS_NORM_GP;
  507. if (rcu_preempted_readers_exp(rnp))
  508. retval |= RCU_OFL_TASKS_EXP_GP;
  509. lp = &rnp->blkd_tasks;
  510. lp_root = &rnp_root->blkd_tasks;
  511. while (!list_empty(lp)) {
  512. t = list_entry(lp->next, typeof(*t), rcu_node_entry);
  513. raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
  514. list_del(&t->rcu_node_entry);
  515. t->rcu_blocked_node = rnp_root;
  516. list_add(&t->rcu_node_entry, lp_root);
  517. if (&t->rcu_node_entry == rnp->gp_tasks)
  518. rnp_root->gp_tasks = rnp->gp_tasks;
  519. if (&t->rcu_node_entry == rnp->exp_tasks)
  520. rnp_root->exp_tasks = rnp->exp_tasks;
  521. #ifdef CONFIG_RCU_BOOST
  522. if (&t->rcu_node_entry == rnp->boost_tasks)
  523. rnp_root->boost_tasks = rnp->boost_tasks;
  524. #endif /* #ifdef CONFIG_RCU_BOOST */
  525. raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
  526. }
  527. #ifdef CONFIG_RCU_BOOST
  528. /* In case root is being boosted and leaf is not. */
  529. raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
  530. if (rnp_root->boost_tasks != NULL &&
  531. rnp_root->boost_tasks != rnp_root->gp_tasks)
  532. rnp_root->boost_tasks = rnp_root->gp_tasks;
  533. raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
  534. #endif /* #ifdef CONFIG_RCU_BOOST */
  535. rnp->gp_tasks = NULL;
  536. rnp->exp_tasks = NULL;
  537. return retval;
  538. }
  539. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  540. /*
  541. * Check for a quiescent state from the current CPU. When a task blocks,
  542. * the task is recorded in the corresponding CPU's rcu_node structure,
  543. * which is checked elsewhere.
  544. *
  545. * Caller must disable hard irqs.
  546. */
  547. static void rcu_preempt_check_callbacks(int cpu)
  548. {
  549. struct task_struct *t = current;
  550. if (t->rcu_read_lock_nesting == 0) {
  551. rcu_preempt_qs(cpu);
  552. return;
  553. }
  554. if (t->rcu_read_lock_nesting > 0 &&
  555. per_cpu(rcu_preempt_data, cpu).qs_pending)
  556. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  557. }
  558. #ifdef CONFIG_RCU_BOOST
  559. static void rcu_preempt_do_callbacks(void)
  560. {
  561. rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
  562. }
  563. #endif /* #ifdef CONFIG_RCU_BOOST */
  564. /*
  565. * Queue a preemptible-RCU callback for invocation after a grace period.
  566. */
  567. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  568. {
  569. __call_rcu(head, func, &rcu_preempt_state, 0);
  570. }
  571. EXPORT_SYMBOL_GPL(call_rcu);
  572. /*
  573. * Queue an RCU callback for lazy invocation after a grace period.
  574. * This will likely be later named something like "call_rcu_lazy()",
  575. * but this change will require some way of tagging the lazy RCU
  576. * callbacks in the list of pending callbacks. Until then, this
  577. * function may only be called from __kfree_rcu().
  578. */
  579. void kfree_call_rcu(struct rcu_head *head,
  580. void (*func)(struct rcu_head *rcu))
  581. {
  582. __call_rcu(head, func, &rcu_preempt_state, 1);
  583. }
  584. EXPORT_SYMBOL_GPL(kfree_call_rcu);
  585. /**
  586. * synchronize_rcu - wait until a grace period has elapsed.
  587. *
  588. * Control will return to the caller some time after a full grace
  589. * period has elapsed, in other words after all currently executing RCU
  590. * read-side critical sections have completed. Note, however, that
  591. * upon return from synchronize_rcu(), the caller might well be executing
  592. * concurrently with new RCU read-side critical sections that began while
  593. * synchronize_rcu() was waiting. RCU read-side critical sections are
  594. * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
  595. */
  596. void synchronize_rcu(void)
  597. {
  598. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  599. !lock_is_held(&rcu_lock_map) &&
  600. !lock_is_held(&rcu_sched_lock_map),
  601. "Illegal synchronize_rcu() in RCU read-side critical section");
  602. if (!rcu_scheduler_active)
  603. return;
  604. wait_rcu_gp(call_rcu);
  605. }
  606. EXPORT_SYMBOL_GPL(synchronize_rcu);
  607. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  608. static long sync_rcu_preempt_exp_count;
  609. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  610. /*
  611. * Return non-zero if there are any tasks in RCU read-side critical
  612. * sections blocking the current preemptible-RCU expedited grace period.
  613. * If there is no preemptible-RCU expedited grace period currently in
  614. * progress, returns zero unconditionally.
  615. */
  616. static int rcu_preempted_readers_exp(struct rcu_node *rnp)
  617. {
  618. return rnp->exp_tasks != NULL;
  619. }
  620. /*
  621. * return non-zero if there is no RCU expedited grace period in progress
  622. * for the specified rcu_node structure, in other words, if all CPUs and
  623. * tasks covered by the specified rcu_node structure have done their bit
  624. * for the current expedited grace period. Works only for preemptible
  625. * RCU -- other RCU implementation use other means.
  626. *
  627. * Caller must hold sync_rcu_preempt_exp_mutex.
  628. */
  629. static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  630. {
  631. return !rcu_preempted_readers_exp(rnp) &&
  632. ACCESS_ONCE(rnp->expmask) == 0;
  633. }
  634. /*
  635. * Report the exit from RCU read-side critical section for the last task
  636. * that queued itself during or before the current expedited preemptible-RCU
  637. * grace period. This event is reported either to the rcu_node structure on
  638. * which the task was queued or to one of that rcu_node structure's ancestors,
  639. * recursively up the tree. (Calm down, calm down, we do the recursion
  640. * iteratively!)
  641. *
  642. * Most callers will set the "wake" flag, but the task initiating the
  643. * expedited grace period need not wake itself.
  644. *
  645. * Caller must hold sync_rcu_preempt_exp_mutex.
  646. */
  647. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  648. bool wake)
  649. {
  650. unsigned long flags;
  651. unsigned long mask;
  652. raw_spin_lock_irqsave(&rnp->lock, flags);
  653. for (;;) {
  654. if (!sync_rcu_preempt_exp_done(rnp)) {
  655. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  656. break;
  657. }
  658. if (rnp->parent == NULL) {
  659. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  660. if (wake)
  661. wake_up(&sync_rcu_preempt_exp_wq);
  662. break;
  663. }
  664. mask = rnp->grpmask;
  665. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  666. rnp = rnp->parent;
  667. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  668. rnp->expmask &= ~mask;
  669. }
  670. }
  671. /*
  672. * Snapshot the tasks blocking the newly started preemptible-RCU expedited
  673. * grace period for the specified rcu_node structure. If there are no such
  674. * tasks, report it up the rcu_node hierarchy.
  675. *
  676. * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
  677. */
  678. static void
  679. sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
  680. {
  681. unsigned long flags;
  682. int must_wait = 0;
  683. raw_spin_lock_irqsave(&rnp->lock, flags);
  684. if (list_empty(&rnp->blkd_tasks)) {
  685. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  686. } else {
  687. rnp->exp_tasks = rnp->blkd_tasks.next;
  688. rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
  689. must_wait = 1;
  690. }
  691. if (!must_wait)
  692. rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
  693. }
  694. /**
  695. * synchronize_rcu_expedited - Brute-force RCU grace period
  696. *
  697. * Wait for an RCU-preempt grace period, but expedite it. The basic
  698. * idea is to invoke synchronize_sched_expedited() to push all the tasks to
  699. * the ->blkd_tasks lists and wait for this list to drain. This consumes
  700. * significant time on all CPUs and is unfriendly to real-time workloads,
  701. * so is thus not recommended for any sort of common-case code.
  702. * In fact, if you are using synchronize_rcu_expedited() in a loop,
  703. * please restructure your code to batch your updates, and then Use a
  704. * single synchronize_rcu() instead.
  705. *
  706. * Note that it is illegal to call this function while holding any lock
  707. * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
  708. * to call this function from a CPU-hotplug notifier. Failing to observe
  709. * these restriction will result in deadlock.
  710. */
  711. void synchronize_rcu_expedited(void)
  712. {
  713. unsigned long flags;
  714. struct rcu_node *rnp;
  715. struct rcu_state *rsp = &rcu_preempt_state;
  716. long snap;
  717. int trycount = 0;
  718. smp_mb(); /* Caller's modifications seen first by other CPUs. */
  719. snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
  720. smp_mb(); /* Above access cannot bleed into critical section. */
  721. /*
  722. * Acquire lock, falling back to synchronize_rcu() if too many
  723. * lock-acquisition failures. Of course, if someone does the
  724. * expedited grace period for us, just leave.
  725. */
  726. while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
  727. if (trycount++ < 10) {
  728. udelay(trycount * num_online_cpus());
  729. } else {
  730. synchronize_rcu();
  731. return;
  732. }
  733. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  734. goto mb_ret; /* Others did our work for us. */
  735. }
  736. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  737. goto unlock_mb_ret; /* Others did our work for us. */
  738. /* force all RCU readers onto ->blkd_tasks lists. */
  739. synchronize_sched_expedited();
  740. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  741. /* Initialize ->expmask for all non-leaf rcu_node structures. */
  742. rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
  743. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  744. rnp->expmask = rnp->qsmaskinit;
  745. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  746. }
  747. /* Snapshot current state of ->blkd_tasks lists. */
  748. rcu_for_each_leaf_node(rsp, rnp)
  749. sync_rcu_preempt_exp_init(rsp, rnp);
  750. if (NUM_RCU_NODES > 1)
  751. sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
  752. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  753. /* Wait for snapshotted ->blkd_tasks lists to drain. */
  754. rnp = rcu_get_root(rsp);
  755. wait_event(sync_rcu_preempt_exp_wq,
  756. sync_rcu_preempt_exp_done(rnp));
  757. /* Clean up and exit. */
  758. smp_mb(); /* ensure expedited GP seen before counter increment. */
  759. ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
  760. unlock_mb_ret:
  761. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  762. mb_ret:
  763. smp_mb(); /* ensure subsequent action seen after grace period. */
  764. }
  765. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  766. /**
  767. * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  768. */
  769. void rcu_barrier(void)
  770. {
  771. _rcu_barrier(&rcu_preempt_state);
  772. }
  773. EXPORT_SYMBOL_GPL(rcu_barrier);
  774. /*
  775. * Initialize preemptible RCU's state structures.
  776. */
  777. static void __init __rcu_init_preempt(void)
  778. {
  779. rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
  780. }
  781. #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  782. static struct rcu_state *rcu_state = &rcu_sched_state;
  783. /*
  784. * Tell them what RCU they are running.
  785. */
  786. static void __init rcu_bootup_announce(void)
  787. {
  788. printk(KERN_INFO "Hierarchical RCU implementation.\n");
  789. rcu_bootup_announce_oddness();
  790. }
  791. /*
  792. * Return the number of RCU batches processed thus far for debug & stats.
  793. */
  794. long rcu_batches_completed(void)
  795. {
  796. return rcu_batches_completed_sched();
  797. }
  798. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  799. /*
  800. * Force a quiescent state for RCU, which, because there is no preemptible
  801. * RCU, becomes the same as rcu-sched.
  802. */
  803. void rcu_force_quiescent_state(void)
  804. {
  805. rcu_sched_force_quiescent_state();
  806. }
  807. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  808. /*
  809. * Because preemptible RCU does not exist, we never have to check for
  810. * CPUs being in quiescent states.
  811. */
  812. static void rcu_preempt_note_context_switch(int cpu)
  813. {
  814. }
  815. /*
  816. * Because preemptible RCU does not exist, there are never any preempted
  817. * RCU readers.
  818. */
  819. static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  820. {
  821. return 0;
  822. }
  823. #ifdef CONFIG_HOTPLUG_CPU
  824. /* Because preemptible RCU does not exist, no quieting of tasks. */
  825. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  826. {
  827. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  828. }
  829. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  830. /*
  831. * Because preemptible RCU does not exist, we never have to check for
  832. * tasks blocked within RCU read-side critical sections.
  833. */
  834. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  835. {
  836. }
  837. /*
  838. * Because preemptible RCU does not exist, we never have to check for
  839. * tasks blocked within RCU read-side critical sections.
  840. */
  841. static int rcu_print_task_stall(struct rcu_node *rnp)
  842. {
  843. return 0;
  844. }
  845. /*
  846. * Because there is no preemptible RCU, there can be no readers blocked,
  847. * so there is no need to check for blocked tasks. So check only for
  848. * bogus qsmask values.
  849. */
  850. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  851. {
  852. WARN_ON_ONCE(rnp->qsmask);
  853. }
  854. #ifdef CONFIG_HOTPLUG_CPU
  855. /*
  856. * Because preemptible RCU does not exist, it never needs to migrate
  857. * tasks that were blocked within RCU read-side critical sections, and
  858. * such non-existent tasks cannot possibly have been blocking the current
  859. * grace period.
  860. */
  861. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  862. struct rcu_node *rnp,
  863. struct rcu_data *rdp)
  864. {
  865. return 0;
  866. }
  867. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  868. /*
  869. * Because preemptible RCU does not exist, it never has any callbacks
  870. * to check.
  871. */
  872. static void rcu_preempt_check_callbacks(int cpu)
  873. {
  874. }
  875. /*
  876. * Queue an RCU callback for lazy invocation after a grace period.
  877. * This will likely be later named something like "call_rcu_lazy()",
  878. * but this change will require some way of tagging the lazy RCU
  879. * callbacks in the list of pending callbacks. Until then, this
  880. * function may only be called from __kfree_rcu().
  881. *
  882. * Because there is no preemptible RCU, we use RCU-sched instead.
  883. */
  884. void kfree_call_rcu(struct rcu_head *head,
  885. void (*func)(struct rcu_head *rcu))
  886. {
  887. __call_rcu(head, func, &rcu_sched_state, 1);
  888. }
  889. EXPORT_SYMBOL_GPL(kfree_call_rcu);
  890. /*
  891. * Wait for an rcu-preempt grace period, but make it happen quickly.
  892. * But because preemptible RCU does not exist, map to rcu-sched.
  893. */
  894. void synchronize_rcu_expedited(void)
  895. {
  896. synchronize_sched_expedited();
  897. }
  898. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  899. #ifdef CONFIG_HOTPLUG_CPU
  900. /*
  901. * Because preemptible RCU does not exist, there is never any need to
  902. * report on tasks preempted in RCU read-side critical sections during
  903. * expedited RCU grace periods.
  904. */
  905. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  906. bool wake)
  907. {
  908. }
  909. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  910. /*
  911. * Because preemptible RCU does not exist, rcu_barrier() is just
  912. * another name for rcu_barrier_sched().
  913. */
  914. void rcu_barrier(void)
  915. {
  916. rcu_barrier_sched();
  917. }
  918. EXPORT_SYMBOL_GPL(rcu_barrier);
  919. /*
  920. * Because preemptible RCU does not exist, it need not be initialized.
  921. */
  922. static void __init __rcu_init_preempt(void)
  923. {
  924. }
  925. #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
  926. #ifdef CONFIG_RCU_BOOST
  927. #include "rtmutex_common.h"
  928. #ifdef CONFIG_RCU_TRACE
  929. static void rcu_initiate_boost_trace(struct rcu_node *rnp)
  930. {
  931. if (list_empty(&rnp->blkd_tasks))
  932. rnp->n_balk_blkd_tasks++;
  933. else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
  934. rnp->n_balk_exp_gp_tasks++;
  935. else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
  936. rnp->n_balk_boost_tasks++;
  937. else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
  938. rnp->n_balk_notblocked++;
  939. else if (rnp->gp_tasks != NULL &&
  940. ULONG_CMP_LT(jiffies, rnp->boost_time))
  941. rnp->n_balk_notyet++;
  942. else
  943. rnp->n_balk_nos++;
  944. }
  945. #else /* #ifdef CONFIG_RCU_TRACE */
  946. static void rcu_initiate_boost_trace(struct rcu_node *rnp)
  947. {
  948. }
  949. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  950. /*
  951. * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  952. * or ->boost_tasks, advancing the pointer to the next task in the
  953. * ->blkd_tasks list.
  954. *
  955. * Note that irqs must be enabled: boosting the task can block.
  956. * Returns 1 if there are more tasks needing to be boosted.
  957. */
  958. static int rcu_boost(struct rcu_node *rnp)
  959. {
  960. unsigned long flags;
  961. struct rt_mutex mtx;
  962. struct task_struct *t;
  963. struct list_head *tb;
  964. if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
  965. return 0; /* Nothing left to boost. */
  966. raw_spin_lock_irqsave(&rnp->lock, flags);
  967. /*
  968. * Recheck under the lock: all tasks in need of boosting
  969. * might exit their RCU read-side critical sections on their own.
  970. */
  971. if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
  972. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  973. return 0;
  974. }
  975. /*
  976. * Preferentially boost tasks blocking expedited grace periods.
  977. * This cannot starve the normal grace periods because a second
  978. * expedited grace period must boost all blocked tasks, including
  979. * those blocking the pre-existing normal grace period.
  980. */
  981. if (rnp->exp_tasks != NULL) {
  982. tb = rnp->exp_tasks;
  983. rnp->n_exp_boosts++;
  984. } else {
  985. tb = rnp->boost_tasks;
  986. rnp->n_normal_boosts++;
  987. }
  988. rnp->n_tasks_boosted++;
  989. /*
  990. * We boost task t by manufacturing an rt_mutex that appears to
  991. * be held by task t. We leave a pointer to that rt_mutex where
  992. * task t can find it, and task t will release the mutex when it
  993. * exits its outermost RCU read-side critical section. Then
  994. * simply acquiring this artificial rt_mutex will boost task
  995. * t's priority. (Thanks to tglx for suggesting this approach!)
  996. *
  997. * Note that task t must acquire rnp->lock to remove itself from
  998. * the ->blkd_tasks list, which it will do from exit() if from
  999. * nowhere else. We therefore are guaranteed that task t will
  1000. * stay around at least until we drop rnp->lock. Note that
  1001. * rnp->lock also resolves races between our priority boosting
  1002. * and task t's exiting its outermost RCU read-side critical
  1003. * section.
  1004. */
  1005. t = container_of(tb, struct task_struct, rcu_node_entry);
  1006. rt_mutex_init_proxy_locked(&mtx, t);
  1007. t->rcu_boost_mutex = &mtx;
  1008. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1009. rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
  1010. rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
  1011. return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
  1012. ACCESS_ONCE(rnp->boost_tasks) != NULL;
  1013. }
  1014. /*
  1015. * Timer handler to initiate waking up of boost kthreads that
  1016. * have yielded the CPU due to excessive numbers of tasks to
  1017. * boost. We wake up the per-rcu_node kthread, which in turn
  1018. * will wake up the booster kthread.
  1019. */
  1020. static void rcu_boost_kthread_timer(unsigned long arg)
  1021. {
  1022. invoke_rcu_node_kthread((struct rcu_node *)arg);
  1023. }
  1024. /*
  1025. * Priority-boosting kthread. One per leaf rcu_node and one for the
  1026. * root rcu_node.
  1027. */
  1028. static int rcu_boost_kthread(void *arg)
  1029. {
  1030. struct rcu_node *rnp = (struct rcu_node *)arg;
  1031. int spincnt = 0;
  1032. int more2boost;
  1033. trace_rcu_utilization("Start boost kthread@init");
  1034. for (;;) {
  1035. rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
  1036. trace_rcu_utilization("End boost kthread@rcu_wait");
  1037. rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
  1038. trace_rcu_utilization("Start boost kthread@rcu_wait");
  1039. rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
  1040. more2boost = rcu_boost(rnp);
  1041. if (more2boost)
  1042. spincnt++;
  1043. else
  1044. spincnt = 0;
  1045. if (spincnt > 10) {
  1046. trace_rcu_utilization("End boost kthread@rcu_yield");
  1047. rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
  1048. trace_rcu_utilization("Start boost kthread@rcu_yield");
  1049. spincnt = 0;
  1050. }
  1051. }
  1052. /* NOTREACHED */
  1053. trace_rcu_utilization("End boost kthread@notreached");
  1054. return 0;
  1055. }
  1056. /*
  1057. * Check to see if it is time to start boosting RCU readers that are
  1058. * blocking the current grace period, and, if so, tell the per-rcu_node
  1059. * kthread to start boosting them. If there is an expedited grace
  1060. * period in progress, it is always time to boost.
  1061. *
  1062. * The caller must hold rnp->lock, which this function releases,
  1063. * but irqs remain disabled. The ->boost_kthread_task is immortal,
  1064. * so we don't need to worry about it going away.
  1065. */
  1066. static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
  1067. {
  1068. struct task_struct *t;
  1069. if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
  1070. rnp->n_balk_exp_gp_tasks++;
  1071. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1072. return;
  1073. }
  1074. if (rnp->exp_tasks != NULL ||
  1075. (rnp->gp_tasks != NULL &&
  1076. rnp->boost_tasks == NULL &&
  1077. rnp->qsmask == 0 &&
  1078. ULONG_CMP_GE(jiffies, rnp->boost_time))) {
  1079. if (rnp->exp_tasks == NULL)
  1080. rnp->boost_tasks = rnp->gp_tasks;
  1081. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1082. t = rnp->boost_kthread_task;
  1083. if (t != NULL)
  1084. wake_up_process(t);
  1085. } else {
  1086. rcu_initiate_boost_trace(rnp);
  1087. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1088. }
  1089. }
  1090. /*
  1091. * Wake up the per-CPU kthread to invoke RCU callbacks.
  1092. */
  1093. static void invoke_rcu_callbacks_kthread(void)
  1094. {
  1095. unsigned long flags;
  1096. local_irq_save(flags);
  1097. __this_cpu_write(rcu_cpu_has_work, 1);
  1098. if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
  1099. current != __this_cpu_read(rcu_cpu_kthread_task))
  1100. wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
  1101. local_irq_restore(flags);
  1102. }
  1103. /*
  1104. * Is the current CPU running the RCU-callbacks kthread?
  1105. * Caller must have preemption disabled.
  1106. */
  1107. static bool rcu_is_callbacks_kthread(void)
  1108. {
  1109. return __get_cpu_var(rcu_cpu_kthread_task) == current;
  1110. }
  1111. /*
  1112. * Set the affinity of the boost kthread. The CPU-hotplug locks are
  1113. * held, so no one should be messing with the existence of the boost
  1114. * kthread.
  1115. */
  1116. static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
  1117. cpumask_var_t cm)
  1118. {
  1119. struct task_struct *t;
  1120. t = rnp->boost_kthread_task;
  1121. if (t != NULL)
  1122. set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
  1123. }
  1124. #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  1125. /*
  1126. * Do priority-boost accounting for the start of a new grace period.
  1127. */
  1128. static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  1129. {
  1130. rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  1131. }
  1132. /*
  1133. * Create an RCU-boost kthread for the specified node if one does not
  1134. * already exist. We only create this kthread for preemptible RCU.
  1135. * Returns zero if all is well, a negated errno otherwise.
  1136. */
  1137. static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
  1138. struct rcu_node *rnp,
  1139. int rnp_index)
  1140. {
  1141. unsigned long flags;
  1142. struct sched_param sp;
  1143. struct task_struct *t;
  1144. if (&rcu_preempt_state != rsp)
  1145. return 0;
  1146. rsp->boost = 1;
  1147. if (rnp->boost_kthread_task != NULL)
  1148. return 0;
  1149. t = kthread_create(rcu_boost_kthread, (void *)rnp,
  1150. "rcub/%d", rnp_index);
  1151. if (IS_ERR(t))
  1152. return PTR_ERR(t);
  1153. raw_spin_lock_irqsave(&rnp->lock, flags);
  1154. rnp->boost_kthread_task = t;
  1155. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1156. sp.sched_priority = RCU_BOOST_PRIO;
  1157. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1158. wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
  1159. return 0;
  1160. }
  1161. #ifdef CONFIG_HOTPLUG_CPU
  1162. /*
  1163. * Stop the RCU's per-CPU kthread when its CPU goes offline,.
  1164. */
  1165. static void rcu_stop_cpu_kthread(int cpu)
  1166. {
  1167. struct task_struct *t;
  1168. /* Stop the CPU's kthread. */
  1169. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1170. if (t != NULL) {
  1171. per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
  1172. kthread_stop(t);
  1173. }
  1174. }
  1175. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  1176. static void rcu_kthread_do_work(void)
  1177. {
  1178. rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
  1179. rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
  1180. rcu_preempt_do_callbacks();
  1181. }
  1182. /*
  1183. * Wake up the specified per-rcu_node-structure kthread.
  1184. * Because the per-rcu_node kthreads are immortal, we don't need
  1185. * to do anything to keep them alive.
  1186. */
  1187. static void invoke_rcu_node_kthread(struct rcu_node *rnp)
  1188. {
  1189. struct task_struct *t;
  1190. t = rnp->node_kthread_task;
  1191. if (t != NULL)
  1192. wake_up_process(t);
  1193. }
  1194. /*
  1195. * Set the specified CPU's kthread to run RT or not, as specified by
  1196. * the to_rt argument. The CPU-hotplug locks are held, so the task
  1197. * is not going away.
  1198. */
  1199. static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
  1200. {
  1201. int policy;
  1202. struct sched_param sp;
  1203. struct task_struct *t;
  1204. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1205. if (t == NULL)
  1206. return;
  1207. if (to_rt) {
  1208. policy = SCHED_FIFO;
  1209. sp.sched_priority = RCU_KTHREAD_PRIO;
  1210. } else {
  1211. policy = SCHED_NORMAL;
  1212. sp.sched_priority = 0;
  1213. }
  1214. sched_setscheduler_nocheck(t, policy, &sp);
  1215. }
  1216. /*
  1217. * Timer handler to initiate the waking up of per-CPU kthreads that
  1218. * have yielded the CPU due to excess numbers of RCU callbacks.
  1219. * We wake up the per-rcu_node kthread, which in turn will wake up
  1220. * the booster kthread.
  1221. */
  1222. static void rcu_cpu_kthread_timer(unsigned long arg)
  1223. {
  1224. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
  1225. struct rcu_node *rnp = rdp->mynode;
  1226. atomic_or(rdp->grpmask, &rnp->wakemask);
  1227. invoke_rcu_node_kthread(rnp);
  1228. }
  1229. /*
  1230. * Drop to non-real-time priority and yield, but only after posting a
  1231. * timer that will cause us to regain our real-time priority if we
  1232. * remain preempted. Either way, we restore our real-time priority
  1233. * before returning.
  1234. */
  1235. static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
  1236. {
  1237. struct sched_param sp;
  1238. struct timer_list yield_timer;
  1239. int prio = current->rt_priority;
  1240. setup_timer_on_stack(&yield_timer, f, arg);
  1241. mod_timer(&yield_timer, jiffies + 2);
  1242. sp.sched_priority = 0;
  1243. sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
  1244. set_user_nice(current, 19);
  1245. schedule();
  1246. set_user_nice(current, 0);
  1247. sp.sched_priority = prio;
  1248. sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
  1249. del_timer(&yield_timer);
  1250. }
  1251. /*
  1252. * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
  1253. * This can happen while the corresponding CPU is either coming online
  1254. * or going offline. We cannot wait until the CPU is fully online
  1255. * before starting the kthread, because the various notifier functions
  1256. * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
  1257. * the corresponding CPU is online.
  1258. *
  1259. * Return 1 if the kthread needs to stop, 0 otherwise.
  1260. *
  1261. * Caller must disable bh. This function can momentarily enable it.
  1262. */
  1263. static int rcu_cpu_kthread_should_stop(int cpu)
  1264. {
  1265. while (cpu_is_offline(cpu) ||
  1266. !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
  1267. smp_processor_id() != cpu) {
  1268. if (kthread_should_stop())
  1269. return 1;
  1270. per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
  1271. per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
  1272. local_bh_enable();
  1273. schedule_timeout_uninterruptible(1);
  1274. if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
  1275. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  1276. local_bh_disable();
  1277. }
  1278. per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
  1279. return 0;
  1280. }
  1281. /*
  1282. * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
  1283. * RCU softirq used in flavors and configurations of RCU that do not
  1284. * support RCU priority boosting.
  1285. */
  1286. static int rcu_cpu_kthread(void *arg)
  1287. {
  1288. int cpu = (int)(long)arg;
  1289. unsigned long flags;
  1290. int spincnt = 0;
  1291. unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
  1292. char work;
  1293. char *workp = &per_cpu(rcu_cpu_has_work, cpu);
  1294. trace_rcu_utilization("Start CPU kthread@init");
  1295. for (;;) {
  1296. *statusp = RCU_KTHREAD_WAITING;
  1297. trace_rcu_utilization("End CPU kthread@rcu_wait");
  1298. rcu_wait(*workp != 0 || kthread_should_stop());
  1299. trace_rcu_utilization("Start CPU kthread@rcu_wait");
  1300. local_bh_disable();
  1301. if (rcu_cpu_kthread_should_stop(cpu)) {
  1302. local_bh_enable();
  1303. break;
  1304. }
  1305. *statusp = RCU_KTHREAD_RUNNING;
  1306. per_cpu(rcu_cpu_kthread_loops, cpu)++;
  1307. local_irq_save(flags);
  1308. work = *workp;
  1309. *workp = 0;
  1310. local_irq_restore(flags);
  1311. if (work)
  1312. rcu_kthread_do_work();
  1313. local_bh_enable();
  1314. if (*workp != 0)
  1315. spincnt++;
  1316. else
  1317. spincnt = 0;
  1318. if (spincnt > 10) {
  1319. *statusp = RCU_KTHREAD_YIELDING;
  1320. trace_rcu_utilization("End CPU kthread@rcu_yield");
  1321. rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
  1322. trace_rcu_utilization("Start CPU kthread@rcu_yield");
  1323. spincnt = 0;
  1324. }
  1325. }
  1326. *statusp = RCU_KTHREAD_STOPPED;
  1327. trace_rcu_utilization("End CPU kthread@term");
  1328. return 0;
  1329. }
  1330. /*
  1331. * Spawn a per-CPU kthread, setting up affinity and priority.
  1332. * Because the CPU hotplug lock is held, no other CPU will be attempting
  1333. * to manipulate rcu_cpu_kthread_task. There might be another CPU
  1334. * attempting to access it during boot, but the locking in kthread_bind()
  1335. * will enforce sufficient ordering.
  1336. *
  1337. * Please note that we cannot simply refuse to wake up the per-CPU
  1338. * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
  1339. * which can result in softlockup complaints if the task ends up being
  1340. * idle for more than a couple of minutes.
  1341. *
  1342. * However, please note also that we cannot bind the per-CPU kthread to its
  1343. * CPU until that CPU is fully online. We also cannot wait until the
  1344. * CPU is fully online before we create its per-CPU kthread, as this would
  1345. * deadlock the system when CPU notifiers tried waiting for grace
  1346. * periods. So we bind the per-CPU kthread to its CPU only if the CPU
  1347. * is online. If its CPU is not yet fully online, then the code in
  1348. * rcu_cpu_kthread() will wait until it is fully online, and then do
  1349. * the binding.
  1350. */
  1351. static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
  1352. {
  1353. struct sched_param sp;
  1354. struct task_struct *t;
  1355. if (!rcu_scheduler_fully_active ||
  1356. per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
  1357. return 0;
  1358. t = kthread_create_on_node(rcu_cpu_kthread,
  1359. (void *)(long)cpu,
  1360. cpu_to_node(cpu),
  1361. "rcuc/%d", cpu);
  1362. if (IS_ERR(t))
  1363. return PTR_ERR(t);
  1364. if (cpu_online(cpu))
  1365. kthread_bind(t, cpu);
  1366. per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
  1367. WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
  1368. sp.sched_priority = RCU_KTHREAD_PRIO;
  1369. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1370. per_cpu(rcu_cpu_kthread_task, cpu) = t;
  1371. wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
  1372. return 0;
  1373. }
  1374. /*
  1375. * Per-rcu_node kthread, which is in charge of waking up the per-CPU
  1376. * kthreads when needed. We ignore requests to wake up kthreads
  1377. * for offline CPUs, which is OK because force_quiescent_state()
  1378. * takes care of this case.
  1379. */
  1380. static int rcu_node_kthread(void *arg)
  1381. {
  1382. int cpu;
  1383. unsigned long flags;
  1384. unsigned long mask;
  1385. struct rcu_node *rnp = (struct rcu_node *)arg;
  1386. struct sched_param sp;
  1387. struct task_struct *t;
  1388. for (;;) {
  1389. rnp->node_kthread_status = RCU_KTHREAD_WAITING;
  1390. rcu_wait(atomic_read(&rnp->wakemask) != 0);
  1391. rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
  1392. raw_spin_lock_irqsave(&rnp->lock, flags);
  1393. mask = atomic_xchg(&rnp->wakemask, 0);
  1394. rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
  1395. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
  1396. if ((mask & 0x1) == 0)
  1397. continue;
  1398. preempt_disable();
  1399. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1400. if (!cpu_online(cpu) || t == NULL) {
  1401. preempt_enable();
  1402. continue;
  1403. }
  1404. per_cpu(rcu_cpu_has_work, cpu) = 1;
  1405. sp.sched_priority = RCU_KTHREAD_PRIO;
  1406. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1407. preempt_enable();
  1408. }
  1409. }
  1410. /* NOTREACHED */
  1411. rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
  1412. return 0;
  1413. }
  1414. /*
  1415. * Set the per-rcu_node kthread's affinity to cover all CPUs that are
  1416. * served by the rcu_node in question. The CPU hotplug lock is still
  1417. * held, so the value of rnp->qsmaskinit will be stable.
  1418. *
  1419. * We don't include outgoingcpu in the affinity set, use -1 if there is
  1420. * no outgoing CPU. If there are no CPUs left in the affinity set,
  1421. * this function allows the kthread to execute on any CPU.
  1422. */
  1423. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
  1424. {
  1425. cpumask_var_t cm;
  1426. int cpu;
  1427. unsigned long mask = rnp->qsmaskinit;
  1428. if (rnp->node_kthread_task == NULL)
  1429. return;
  1430. if (!alloc_cpumask_var(&cm, GFP_KERNEL))
  1431. return;
  1432. cpumask_clear(cm);
  1433. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
  1434. if ((mask & 0x1) && cpu != outgoingcpu)
  1435. cpumask_set_cpu(cpu, cm);
  1436. if (cpumask_weight(cm) == 0) {
  1437. cpumask_setall(cm);
  1438. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
  1439. cpumask_clear_cpu(cpu, cm);
  1440. WARN_ON_ONCE(cpumask_weight(cm) == 0);
  1441. }
  1442. set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
  1443. rcu_boost_kthread_setaffinity(rnp, cm);
  1444. free_cpumask_var(cm);
  1445. }
  1446. /*
  1447. * Spawn a per-rcu_node kthread, setting priority and affinity.
  1448. * Called during boot before online/offline can happen, or, if
  1449. * during runtime, with the main CPU-hotplug locks held. So only
  1450. * one of these can be executing at a time.
  1451. */
  1452. static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
  1453. struct rcu_node *rnp)
  1454. {
  1455. unsigned long flags;
  1456. int rnp_index = rnp - &rsp->node[0];
  1457. struct sched_param sp;
  1458. struct task_struct *t;
  1459. if (!rcu_scheduler_fully_active ||
  1460. rnp->qsmaskinit == 0)
  1461. return 0;
  1462. if (rnp->node_kthread_task == NULL) {
  1463. t = kthread_create(rcu_node_kthread, (void *)rnp,
  1464. "rcun/%d", rnp_index);
  1465. if (IS_ERR(t))
  1466. return PTR_ERR(t);
  1467. raw_spin_lock_irqsave(&rnp->lock, flags);
  1468. rnp->node_kthread_task = t;
  1469. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1470. sp.sched_priority = 99;
  1471. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1472. wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
  1473. }
  1474. return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
  1475. }
  1476. /*
  1477. * Spawn all kthreads -- called as soon as the scheduler is running.
  1478. */
  1479. static int __init rcu_spawn_kthreads(void)
  1480. {
  1481. int cpu;
  1482. struct rcu_node *rnp;
  1483. rcu_scheduler_fully_active = 1;
  1484. for_each_possible_cpu(cpu) {
  1485. per_cpu(rcu_cpu_has_work, cpu) = 0;
  1486. if (cpu_online(cpu))
  1487. (void)rcu_spawn_one_cpu_kthread(cpu);
  1488. }
  1489. rnp = rcu_get_root(rcu_state);
  1490. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1491. if (NUM_RCU_NODES > 1) {
  1492. rcu_for_each_leaf_node(rcu_state, rnp)
  1493. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1494. }
  1495. return 0;
  1496. }
  1497. early_initcall(rcu_spawn_kthreads);
  1498. static void __cpuinit rcu_prepare_kthreads(int cpu)
  1499. {
  1500. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
  1501. struct rcu_node *rnp = rdp->mynode;
  1502. /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
  1503. if (rcu_scheduler_fully_active) {
  1504. (void)rcu_spawn_one_cpu_kthread(cpu);
  1505. if (rnp->node_kthread_task == NULL)
  1506. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1507. }
  1508. }
  1509. #else /* #ifdef CONFIG_RCU_BOOST */
  1510. static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
  1511. {
  1512. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1513. }
  1514. static void invoke_rcu_callbacks_kthread(void)
  1515. {
  1516. WARN_ON_ONCE(1);
  1517. }
  1518. static bool rcu_is_callbacks_kthread(void)
  1519. {
  1520. return false;
  1521. }
  1522. static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  1523. {
  1524. }
  1525. #ifdef CONFIG_HOTPLUG_CPU
  1526. static void rcu_stop_cpu_kthread(int cpu)
  1527. {
  1528. }
  1529. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  1530. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
  1531. {
  1532. }
  1533. static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
  1534. {
  1535. }
  1536. static int __init rcu_scheduler_really_started(void)
  1537. {
  1538. rcu_scheduler_fully_active = 1;
  1539. return 0;
  1540. }
  1541. early_initcall(rcu_scheduler_really_started);
  1542. static void __cpuinit rcu_prepare_kthreads(int cpu)
  1543. {
  1544. }
  1545. #endif /* #else #ifdef CONFIG_RCU_BOOST */
  1546. #if !defined(CONFIG_RCU_FAST_NO_HZ)
  1547. /*
  1548. * Check to see if any future RCU-related work will need to be done
  1549. * by the current CPU, even if none need be done immediately, returning
  1550. * 1 if so. This function is part of the RCU implementation; it is -not-
  1551. * an exported member of the RCU API.
  1552. *
  1553. * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  1554. * any flavor of RCU.
  1555. */
  1556. int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
  1557. {
  1558. *delta_jiffies = ULONG_MAX;
  1559. return rcu_cpu_has_callbacks(cpu);
  1560. }
  1561. /*
  1562. * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
  1563. */
  1564. static void rcu_prepare_for_idle_init(int cpu)
  1565. {
  1566. }
  1567. /*
  1568. * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
  1569. * after it.
  1570. */
  1571. static void rcu_cleanup_after_idle(int cpu)
  1572. {
  1573. }
  1574. /*
  1575. * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
  1576. * is nothing.
  1577. */
  1578. static void rcu_prepare_for_idle(int cpu)
  1579. {
  1580. }
  1581. /*
  1582. * Don't bother keeping a running count of the number of RCU callbacks
  1583. * posted because CONFIG_RCU_FAST_NO_HZ=n.
  1584. */
  1585. static void rcu_idle_count_callbacks_posted(void)
  1586. {
  1587. }
  1588. #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
  1589. /*
  1590. * This code is invoked when a CPU goes idle, at which point we want
  1591. * to have the CPU do everything required for RCU so that it can enter
  1592. * the energy-efficient dyntick-idle mode. This is handled by a
  1593. * state machine implemented by rcu_prepare_for_idle() below.
  1594. *
  1595. * The following three proprocessor symbols control this state machine:
  1596. *
  1597. * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
  1598. * to satisfy RCU. Beyond this point, it is better to incur a periodic
  1599. * scheduling-clock interrupt than to loop through the state machine
  1600. * at full power.
  1601. * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
  1602. * optional if RCU does not need anything immediately from this
  1603. * CPU, even if this CPU still has RCU callbacks queued. The first
  1604. * times through the state machine are mandatory: we need to give
  1605. * the state machine a chance to communicate a quiescent state
  1606. * to the RCU core.
  1607. * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
  1608. * to sleep in dyntick-idle mode with RCU callbacks pending. This
  1609. * is sized to be roughly one RCU grace period. Those energy-efficiency
  1610. * benchmarkers who might otherwise be tempted to set this to a large
  1611. * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
  1612. * system. And if you are -that- concerned about energy efficiency,
  1613. * just power the system down and be done with it!
  1614. * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
  1615. * permitted to sleep in dyntick-idle mode with only lazy RCU
  1616. * callbacks pending. Setting this too high can OOM your system.
  1617. *
  1618. * The values below work well in practice. If future workloads require
  1619. * adjustment, they can be converted into kernel config parameters, though
  1620. * making the state machine smarter might be a better option.
  1621. */
  1622. #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
  1623. #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
  1624. #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
  1625. #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
  1626. extern int tick_nohz_enabled;
  1627. /*
  1628. * Does the specified flavor of RCU have non-lazy callbacks pending on
  1629. * the specified CPU? Both RCU flavor and CPU are specified by the
  1630. * rcu_data structure.
  1631. */
  1632. static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
  1633. {
  1634. return rdp->qlen != rdp->qlen_lazy;
  1635. }
  1636. #ifdef CONFIG_TREE_PREEMPT_RCU
  1637. /*
  1638. * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
  1639. * is no RCU-preempt in the kernel.)
  1640. */
  1641. static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
  1642. {
  1643. struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
  1644. return __rcu_cpu_has_nonlazy_callbacks(rdp);
  1645. }
  1646. #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  1647. static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
  1648. {
  1649. return 0;
  1650. }
  1651. #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
  1652. /*
  1653. * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
  1654. */
  1655. static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
  1656. {
  1657. return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
  1658. __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
  1659. rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
  1660. }
  1661. /*
  1662. * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
  1663. * callbacks on this CPU, (2) this CPU has not yet attempted to enter
  1664. * dyntick-idle mode, or (3) this CPU is in the process of attempting to
  1665. * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
  1666. * to enter dyntick-idle mode, we refuse to try to enter it. After all,
  1667. * it is better to incur scheduling-clock interrupts than to spin
  1668. * continuously for the same time duration!
  1669. *
  1670. * The delta_jiffies argument is used to store the time when RCU is
  1671. * going to need the CPU again if it still has callbacks. The reason
  1672. * for this is that rcu_prepare_for_idle() might need to post a timer,
  1673. * but if so, it will do so after tick_nohz_stop_sched_tick() has set
  1674. * the wakeup time for this CPU. This means that RCU's timer can be
  1675. * delayed until the wakeup time, which defeats the purpose of posting
  1676. * a timer.
  1677. */
  1678. int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
  1679. {
  1680. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  1681. /* Flag a new idle sojourn to the idle-entry state machine. */
  1682. rdtp->idle_first_pass = 1;
  1683. /* If no callbacks, RCU doesn't need the CPU. */
  1684. if (!rcu_cpu_has_callbacks(cpu)) {
  1685. *delta_jiffies = ULONG_MAX;
  1686. return 0;
  1687. }
  1688. if (rdtp->dyntick_holdoff == jiffies) {
  1689. /* RCU recently tried and failed, so don't try again. */
  1690. *delta_jiffies = 1;
  1691. return 1;
  1692. }
  1693. /* Set up for the possibility that RCU will post a timer. */
  1694. if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
  1695. *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
  1696. RCU_IDLE_GP_DELAY) - jiffies;
  1697. } else {
  1698. *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
  1699. *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
  1700. }
  1701. return 0;
  1702. }
  1703. /*
  1704. * Handler for smp_call_function_single(). The only point of this
  1705. * handler is to wake the CPU up, so the handler does only tracing.
  1706. */
  1707. void rcu_idle_demigrate(void *unused)
  1708. {
  1709. trace_rcu_prep_idle("Demigrate");
  1710. }
  1711. /*
  1712. * Timer handler used to force CPU to start pushing its remaining RCU
  1713. * callbacks in the case where it entered dyntick-idle mode with callbacks
  1714. * pending. The hander doesn't really need to do anything because the
  1715. * real work is done upon re-entry to idle, or by the next scheduling-clock
  1716. * interrupt should idle not be re-entered.
  1717. *
  1718. * One special case: the timer gets migrated without awakening the CPU
  1719. * on which the timer was scheduled on. In this case, we must wake up
  1720. * that CPU. We do so with smp_call_function_single().
  1721. */
  1722. static void rcu_idle_gp_timer_func(unsigned long cpu_in)
  1723. {
  1724. int cpu = (int)cpu_in;
  1725. trace_rcu_prep_idle("Timer");
  1726. if (cpu != smp_processor_id())
  1727. smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
  1728. else
  1729. WARN_ON_ONCE(1); /* Getting here can hang the system... */
  1730. }
  1731. /*
  1732. * Initialize the timer used to pull CPUs out of dyntick-idle mode.
  1733. */
  1734. static void rcu_prepare_for_idle_init(int cpu)
  1735. {
  1736. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  1737. rdtp->dyntick_holdoff = jiffies - 1;
  1738. setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
  1739. rdtp->idle_gp_timer_expires = jiffies - 1;
  1740. rdtp->idle_first_pass = 1;
  1741. }
  1742. /*
  1743. * Clean up for exit from idle. Because we are exiting from idle, there
  1744. * is no longer any point to ->idle_gp_timer, so cancel it. This will
  1745. * do nothing if this timer is not active, so just cancel it unconditionally.
  1746. */
  1747. static void rcu_cleanup_after_idle(int cpu)
  1748. {
  1749. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  1750. del_timer(&rdtp->idle_gp_timer);
  1751. trace_rcu_prep_idle("Cleanup after idle");
  1752. rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
  1753. }
  1754. /*
  1755. * Check to see if any RCU-related work can be done by the current CPU,
  1756. * and if so, schedule a softirq to get it done. This function is part
  1757. * of the RCU implementation; it is -not- an exported member of the RCU API.
  1758. *
  1759. * The idea is for the current CPU to clear out all work required by the
  1760. * RCU core for the current grace period, so that this CPU can be permitted
  1761. * to enter dyntick-idle mode. In some cases, it will need to be awakened
  1762. * at the end of the grace period by whatever CPU ends the grace period.
  1763. * This allows CPUs to go dyntick-idle more quickly, and to reduce the
  1764. * number of wakeups by a modest integer factor.
  1765. *
  1766. * Because it is not legal to invoke rcu_process_callbacks() with irqs
  1767. * disabled, we do one pass of force_quiescent_state(), then do a
  1768. * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  1769. * later. The ->dyntick_drain field controls the sequencing.
  1770. *
  1771. * The caller must have disabled interrupts.
  1772. */
  1773. static void rcu_prepare_for_idle(int cpu)
  1774. {
  1775. struct timer_list *tp;
  1776. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  1777. int tne;
  1778. /* Handle nohz enablement switches conservatively. */
  1779. tne = ACCESS_ONCE(tick_nohz_enabled);
  1780. if (tne != rdtp->tick_nohz_enabled_snap) {
  1781. if (rcu_cpu_has_callbacks(cpu))
  1782. invoke_rcu_core(); /* force nohz to see update. */
  1783. rdtp->tick_nohz_enabled_snap = tne;
  1784. return;
  1785. }
  1786. if (!tne)
  1787. return;
  1788. /*
  1789. * If this is an idle re-entry, for example, due to use of
  1790. * RCU_NONIDLE() or the new idle-loop tracing API within the idle
  1791. * loop, then don't take any state-machine actions, unless the
  1792. * momentary exit from idle queued additional non-lazy callbacks.
  1793. * Instead, repost the ->idle_gp_timer if this CPU has callbacks
  1794. * pending.
  1795. */
  1796. if (!rdtp->idle_first_pass &&
  1797. (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
  1798. if (rcu_cpu_has_callbacks(cpu)) {
  1799. tp = &rdtp->idle_gp_timer;
  1800. mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
  1801. }
  1802. return;
  1803. }
  1804. rdtp->idle_first_pass = 0;
  1805. rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
  1806. /*
  1807. * If there are no callbacks on this CPU, enter dyntick-idle mode.
  1808. * Also reset state to avoid prejudicing later attempts.
  1809. */
  1810. if (!rcu_cpu_has_callbacks(cpu)) {
  1811. rdtp->dyntick_holdoff = jiffies - 1;
  1812. rdtp->dyntick_drain = 0;
  1813. trace_rcu_prep_idle("No callbacks");
  1814. return;
  1815. }
  1816. /*
  1817. * If in holdoff mode, just return. We will presumably have
  1818. * refrained from disabling the scheduling-clock tick.
  1819. */
  1820. if (rdtp->dyntick_holdoff == jiffies) {
  1821. trace_rcu_prep_idle("In holdoff");
  1822. return;
  1823. }
  1824. /* Check and update the ->dyntick_drain sequencing. */
  1825. if (rdtp->dyntick_drain <= 0) {
  1826. /* First time through, initialize the counter. */
  1827. rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
  1828. } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
  1829. !rcu_pending(cpu) &&
  1830. !local_softirq_pending()) {
  1831. /* Can we go dyntick-idle despite still having callbacks? */
  1832. rdtp->dyntick_drain = 0;
  1833. rdtp->dyntick_holdoff = jiffies;
  1834. if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
  1835. trace_rcu_prep_idle("Dyntick with callbacks");
  1836. rdtp->idle_gp_timer_expires =
  1837. round_up(jiffies + RCU_IDLE_GP_DELAY,
  1838. RCU_IDLE_GP_DELAY);
  1839. } else {
  1840. rdtp->idle_gp_timer_expires =
  1841. round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
  1842. trace_rcu_prep_idle("Dyntick with lazy callbacks");
  1843. }
  1844. tp = &rdtp->idle_gp_timer;
  1845. mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
  1846. rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
  1847. return; /* Nothing more to do immediately. */
  1848. } else if (--(rdtp->dyntick_drain) <= 0) {
  1849. /* We have hit the limit, so time to give up. */
  1850. rdtp->dyntick_holdoff = jiffies;
  1851. trace_rcu_prep_idle("Begin holdoff");
  1852. invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
  1853. return;
  1854. }
  1855. /*
  1856. * Do one step of pushing the remaining RCU callbacks through
  1857. * the RCU core state machine.
  1858. */
  1859. #ifdef CONFIG_TREE_PREEMPT_RCU
  1860. if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
  1861. rcu_preempt_qs(cpu);
  1862. force_quiescent_state(&rcu_preempt_state, 0);
  1863. }
  1864. #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  1865. if (per_cpu(rcu_sched_data, cpu).nxtlist) {
  1866. rcu_sched_qs(cpu);
  1867. force_quiescent_state(&rcu_sched_state, 0);
  1868. }
  1869. if (per_cpu(rcu_bh_data, cpu).nxtlist) {
  1870. rcu_bh_qs(cpu);
  1871. force_quiescent_state(&rcu_bh_state, 0);
  1872. }
  1873. /*
  1874. * If RCU callbacks are still pending, RCU still needs this CPU.
  1875. * So try forcing the callbacks through the grace period.
  1876. */
  1877. if (rcu_cpu_has_callbacks(cpu)) {
  1878. trace_rcu_prep_idle("More callbacks");
  1879. invoke_rcu_core();
  1880. } else {
  1881. trace_rcu_prep_idle("Callbacks drained");
  1882. }
  1883. }
  1884. /*
  1885. * Keep a running count of the number of non-lazy callbacks posted
  1886. * on this CPU. This running counter (which is never decremented) allows
  1887. * rcu_prepare_for_idle() to detect when something out of the idle loop
  1888. * posts a callback, even if an equal number of callbacks are invoked.
  1889. * Of course, callbacks should only be posted from within a trace event
  1890. * designed to be called from idle or from within RCU_NONIDLE().
  1891. */
  1892. static void rcu_idle_count_callbacks_posted(void)
  1893. {
  1894. __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
  1895. }
  1896. #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
  1897. #ifdef CONFIG_RCU_CPU_STALL_INFO
  1898. #ifdef CONFIG_RCU_FAST_NO_HZ
  1899. static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
  1900. {
  1901. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  1902. struct timer_list *tltp = &rdtp->idle_gp_timer;
  1903. sprintf(cp, "drain=%d %c timer=%lu",
  1904. rdtp->dyntick_drain,
  1905. rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
  1906. timer_pending(tltp) ? tltp->expires - jiffies : -1);
  1907. }
  1908. #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
  1909. static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
  1910. {
  1911. *cp = '\0';
  1912. }
  1913. #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
  1914. /* Initiate the stall-info list. */
  1915. static void print_cpu_stall_info_begin(void)
  1916. {
  1917. printk(KERN_CONT "\n");
  1918. }
  1919. /*
  1920. * Print out diagnostic information for the specified stalled CPU.
  1921. *
  1922. * If the specified CPU is aware of the current RCU grace period
  1923. * (flavor specified by rsp), then print the number of scheduling
  1924. * clock interrupts the CPU has taken during the time that it has
  1925. * been aware. Otherwise, print the number of RCU grace periods
  1926. * that this CPU is ignorant of, for example, "1" if the CPU was
  1927. * aware of the previous grace period.
  1928. *
  1929. * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
  1930. */
  1931. static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
  1932. {
  1933. char fast_no_hz[72];
  1934. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  1935. struct rcu_dynticks *rdtp = rdp->dynticks;
  1936. char *ticks_title;
  1937. unsigned long ticks_value;
  1938. if (rsp->gpnum == rdp->gpnum) {
  1939. ticks_title = "ticks this GP";
  1940. ticks_value = rdp->ticks_this_gp;
  1941. } else {
  1942. ticks_title = "GPs behind";
  1943. ticks_value = rsp->gpnum - rdp->gpnum;
  1944. }
  1945. print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
  1946. printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
  1947. cpu, ticks_value, ticks_title,
  1948. atomic_read(&rdtp->dynticks) & 0xfff,
  1949. rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
  1950. fast_no_hz);
  1951. }
  1952. /* Terminate the stall-info list. */
  1953. static void print_cpu_stall_info_end(void)
  1954. {
  1955. printk(KERN_ERR "\t");
  1956. }
  1957. /* Zero ->ticks_this_gp for all flavors of RCU. */
  1958. static void zero_cpu_stall_ticks(struct rcu_data *rdp)
  1959. {
  1960. rdp->ticks_this_gp = 0;
  1961. }
  1962. /* Increment ->ticks_this_gp for all flavors of RCU. */
  1963. static void increment_cpu_stall_ticks(void)
  1964. {
  1965. __get_cpu_var(rcu_sched_data).ticks_this_gp++;
  1966. __get_cpu_var(rcu_bh_data).ticks_this_gp++;
  1967. #ifdef CONFIG_TREE_PREEMPT_RCU
  1968. __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
  1969. #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  1970. }
  1971. #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
  1972. static void print_cpu_stall_info_begin(void)
  1973. {
  1974. printk(KERN_CONT " {");
  1975. }
  1976. static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
  1977. {
  1978. printk(KERN_CONT " %d", cpu);
  1979. }
  1980. static void print_cpu_stall_info_end(void)
  1981. {
  1982. printk(KERN_CONT "} ");
  1983. }
  1984. static void zero_cpu_stall_ticks(struct rcu_data *rdp)
  1985. {
  1986. }
  1987. static void increment_cpu_stall_ticks(void)
  1988. {
  1989. }
  1990. #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */