rcutree_plugin.h 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright Red Hat, 2009
  21. * Copyright IBM Corporation, 2009
  22. *
  23. * Author: Ingo Molnar <mingo@elte.hu>
  24. * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/stop_machine.h>
  28. #define RCU_KTHREAD_PRIO 1
  29. #ifdef CONFIG_RCU_BOOST
  30. #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  31. #else
  32. #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
  33. #endif
  34. /*
  35. * Check the RCU kernel configuration parameters and print informative
  36. * messages about anything out of the ordinary. If you like #ifdef, you
  37. * will love this function.
  38. */
  39. static void __init rcu_bootup_announce_oddness(void)
  40. {
  41. #ifdef CONFIG_RCU_TRACE
  42. printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
  43. #endif
  44. #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
  45. printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  46. CONFIG_RCU_FANOUT);
  47. #endif
  48. #ifdef CONFIG_RCU_FANOUT_EXACT
  49. printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
  50. #endif
  51. #ifdef CONFIG_RCU_FAST_NO_HZ
  52. printk(KERN_INFO
  53. "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  54. #endif
  55. #ifdef CONFIG_PROVE_RCU
  56. printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
  57. #endif
  58. #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
  59. printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
  60. #endif
  61. #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
  62. printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
  63. #endif
  64. #if NUM_RCU_LVL_4 != 0
  65. printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
  66. #endif
  67. }
  68. #ifdef CONFIG_TREE_PREEMPT_RCU
  69. struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
  70. DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
  71. static struct rcu_state *rcu_state = &rcu_preempt_state;
  72. static void rcu_read_unlock_special(struct task_struct *t);
  73. static int rcu_preempted_readers_exp(struct rcu_node *rnp);
  74. /*
  75. * Tell them what RCU they are running.
  76. */
  77. static void __init rcu_bootup_announce(void)
  78. {
  79. printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
  80. rcu_bootup_announce_oddness();
  81. }
  82. /*
  83. * Return the number of RCU-preempt batches processed thus far
  84. * for debug and statistics.
  85. */
  86. long rcu_batches_completed_preempt(void)
  87. {
  88. return rcu_preempt_state.completed;
  89. }
  90. EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
  91. /*
  92. * Return the number of RCU batches processed thus far for debug & stats.
  93. */
  94. long rcu_batches_completed(void)
  95. {
  96. return rcu_batches_completed_preempt();
  97. }
  98. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  99. /*
  100. * Force a quiescent state for preemptible RCU.
  101. */
  102. void rcu_force_quiescent_state(void)
  103. {
  104. force_quiescent_state(&rcu_preempt_state, 0);
  105. }
  106. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  107. /*
  108. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  109. * that this just means that the task currently running on the CPU is
  110. * not in a quiescent state. There might be any number of tasks blocked
  111. * while in an RCU read-side critical section.
  112. *
  113. * Unlike the other rcu_*_qs() functions, callers to this function
  114. * must disable irqs in order to protect the assignment to
  115. * ->rcu_read_unlock_special.
  116. */
  117. static void rcu_preempt_qs(int cpu)
  118. {
  119. struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
  120. rdp->passed_quiesce_gpnum = rdp->gpnum;
  121. barrier();
  122. if (rdp->passed_quiesce == 0)
  123. trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
  124. rdp->passed_quiesce = 1;
  125. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  126. }
  127. /*
  128. * We have entered the scheduler, and the current task might soon be
  129. * context-switched away from. If this task is in an RCU read-side
  130. * critical section, we will no longer be able to rely on the CPU to
  131. * record that fact, so we enqueue the task on the blkd_tasks list.
  132. * The task will dequeue itself when it exits the outermost enclosing
  133. * RCU read-side critical section. Therefore, the current grace period
  134. * cannot be permitted to complete until the blkd_tasks list entries
  135. * predating the current grace period drain, in other words, until
  136. * rnp->gp_tasks becomes NULL.
  137. *
  138. * Caller must disable preemption.
  139. */
  140. static void rcu_preempt_note_context_switch(int cpu)
  141. {
  142. struct task_struct *t = current;
  143. unsigned long flags;
  144. struct rcu_data *rdp;
  145. struct rcu_node *rnp;
  146. if (t->rcu_read_lock_nesting > 0 &&
  147. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  148. /* Possibly blocking in an RCU read-side critical section. */
  149. rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
  150. rnp = rdp->mynode;
  151. raw_spin_lock_irqsave(&rnp->lock, flags);
  152. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  153. t->rcu_blocked_node = rnp;
  154. /*
  155. * If this CPU has already checked in, then this task
  156. * will hold up the next grace period rather than the
  157. * current grace period. Queue the task accordingly.
  158. * If the task is queued for the current grace period
  159. * (i.e., this CPU has not yet passed through a quiescent
  160. * state for the current grace period), then as long
  161. * as that task remains queued, the current grace period
  162. * cannot end. Note that there is some uncertainty as
  163. * to exactly when the current grace period started.
  164. * We take a conservative approach, which can result
  165. * in unnecessarily waiting on tasks that started very
  166. * slightly after the current grace period began. C'est
  167. * la vie!!!
  168. *
  169. * But first, note that the current CPU must still be
  170. * on line!
  171. */
  172. WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
  173. WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
  174. if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
  175. list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
  176. rnp->gp_tasks = &t->rcu_node_entry;
  177. #ifdef CONFIG_RCU_BOOST
  178. if (rnp->boost_tasks != NULL)
  179. rnp->boost_tasks = rnp->gp_tasks;
  180. #endif /* #ifdef CONFIG_RCU_BOOST */
  181. } else {
  182. list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
  183. if (rnp->qsmask & rdp->grpmask)
  184. rnp->gp_tasks = &t->rcu_node_entry;
  185. }
  186. trace_rcu_preempt_task(rdp->rsp->name,
  187. t->pid,
  188. (rnp->qsmask & rdp->grpmask)
  189. ? rnp->gpnum
  190. : rnp->gpnum + 1);
  191. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  192. } else if (t->rcu_read_lock_nesting < 0 &&
  193. t->rcu_read_unlock_special) {
  194. /*
  195. * Complete exit from RCU read-side critical section on
  196. * behalf of preempted instance of __rcu_read_unlock().
  197. */
  198. rcu_read_unlock_special(t);
  199. }
  200. /*
  201. * Either we were not in an RCU read-side critical section to
  202. * begin with, or we have now recorded that critical section
  203. * globally. Either way, we can now note a quiescent state
  204. * for this CPU. Again, if we were in an RCU read-side critical
  205. * section, and if that critical section was blocking the current
  206. * grace period, then the fact that the task has been enqueued
  207. * means that we continue to block the current grace period.
  208. */
  209. local_irq_save(flags);
  210. rcu_preempt_qs(cpu);
  211. local_irq_restore(flags);
  212. }
  213. /*
  214. * Tree-preemptible RCU implementation for rcu_read_lock().
  215. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  216. * if we block.
  217. */
  218. void __rcu_read_lock(void)
  219. {
  220. current->rcu_read_lock_nesting++;
  221. barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
  222. }
  223. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  224. /*
  225. * Check for preempted RCU readers blocking the current grace period
  226. * for the specified rcu_node structure. If the caller needs a reliable
  227. * answer, it must hold the rcu_node's ->lock.
  228. */
  229. static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  230. {
  231. return rnp->gp_tasks != NULL;
  232. }
  233. /*
  234. * Record a quiescent state for all tasks that were previously queued
  235. * on the specified rcu_node structure and that were blocking the current
  236. * RCU grace period. The caller must hold the specified rnp->lock with
  237. * irqs disabled, and this lock is released upon return, but irqs remain
  238. * disabled.
  239. */
  240. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  241. __releases(rnp->lock)
  242. {
  243. unsigned long mask;
  244. struct rcu_node *rnp_p;
  245. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  246. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  247. return; /* Still need more quiescent states! */
  248. }
  249. rnp_p = rnp->parent;
  250. if (rnp_p == NULL) {
  251. /*
  252. * Either there is only one rcu_node in the tree,
  253. * or tasks were kicked up to root rcu_node due to
  254. * CPUs going offline.
  255. */
  256. rcu_report_qs_rsp(&rcu_preempt_state, flags);
  257. return;
  258. }
  259. /* Report up the rest of the hierarchy. */
  260. mask = rnp->grpmask;
  261. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  262. raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
  263. rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
  264. }
  265. /*
  266. * Advance a ->blkd_tasks-list pointer to the next entry, instead
  267. * returning NULL if at the end of the list.
  268. */
  269. static struct list_head *rcu_next_node_entry(struct task_struct *t,
  270. struct rcu_node *rnp)
  271. {
  272. struct list_head *np;
  273. np = t->rcu_node_entry.next;
  274. if (np == &rnp->blkd_tasks)
  275. np = NULL;
  276. return np;
  277. }
  278. /*
  279. * Handle special cases during rcu_read_unlock(), such as needing to
  280. * notify RCU core processing or task having blocked during the RCU
  281. * read-side critical section.
  282. */
  283. static noinline void rcu_read_unlock_special(struct task_struct *t)
  284. {
  285. int empty;
  286. int empty_exp;
  287. int empty_exp_now;
  288. unsigned long flags;
  289. struct list_head *np;
  290. #ifdef CONFIG_RCU_BOOST
  291. struct rt_mutex *rbmp = NULL;
  292. #endif /* #ifdef CONFIG_RCU_BOOST */
  293. struct rcu_node *rnp;
  294. int special;
  295. /* NMI handlers cannot block and cannot safely manipulate state. */
  296. if (in_nmi())
  297. return;
  298. local_irq_save(flags);
  299. /*
  300. * If RCU core is waiting for this CPU to exit critical section,
  301. * let it know that we have done so.
  302. */
  303. special = t->rcu_read_unlock_special;
  304. if (special & RCU_READ_UNLOCK_NEED_QS) {
  305. rcu_preempt_qs(smp_processor_id());
  306. }
  307. /* Hardware IRQ handlers cannot block. */
  308. if (in_irq() || in_serving_softirq()) {
  309. local_irq_restore(flags);
  310. return;
  311. }
  312. /* Clean up if blocked during RCU read-side critical section. */
  313. if (special & RCU_READ_UNLOCK_BLOCKED) {
  314. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  315. /*
  316. * Remove this task from the list it blocked on. The
  317. * task can migrate while we acquire the lock, but at
  318. * most one time. So at most two passes through loop.
  319. */
  320. for (;;) {
  321. rnp = t->rcu_blocked_node;
  322. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  323. if (rnp == t->rcu_blocked_node)
  324. break;
  325. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  326. }
  327. empty = !rcu_preempt_blocked_readers_cgp(rnp);
  328. empty_exp = !rcu_preempted_readers_exp(rnp);
  329. smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
  330. np = rcu_next_node_entry(t, rnp);
  331. list_del_init(&t->rcu_node_entry);
  332. t->rcu_blocked_node = NULL;
  333. trace_rcu_unlock_preempted_task("rcu_preempt",
  334. rnp->gpnum, t->pid);
  335. if (&t->rcu_node_entry == rnp->gp_tasks)
  336. rnp->gp_tasks = np;
  337. if (&t->rcu_node_entry == rnp->exp_tasks)
  338. rnp->exp_tasks = np;
  339. #ifdef CONFIG_RCU_BOOST
  340. if (&t->rcu_node_entry == rnp->boost_tasks)
  341. rnp->boost_tasks = np;
  342. /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
  343. if (t->rcu_boost_mutex) {
  344. rbmp = t->rcu_boost_mutex;
  345. t->rcu_boost_mutex = NULL;
  346. }
  347. #endif /* #ifdef CONFIG_RCU_BOOST */
  348. /*
  349. * If this was the last task on the current list, and if
  350. * we aren't waiting on any CPUs, report the quiescent state.
  351. * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
  352. * so we must take a snapshot of the expedited state.
  353. */
  354. empty_exp_now = !rcu_preempted_readers_exp(rnp);
  355. if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
  356. trace_rcu_quiescent_state_report("preempt_rcu",
  357. rnp->gpnum,
  358. 0, rnp->qsmask,
  359. rnp->level,
  360. rnp->grplo,
  361. rnp->grphi,
  362. !!rnp->gp_tasks);
  363. rcu_report_unblock_qs_rnp(rnp, flags);
  364. } else
  365. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  366. #ifdef CONFIG_RCU_BOOST
  367. /* Unboost if we were boosted. */
  368. if (rbmp)
  369. rt_mutex_unlock(rbmp);
  370. #endif /* #ifdef CONFIG_RCU_BOOST */
  371. /*
  372. * If this was the last task on the expedited lists,
  373. * then we need to report up the rcu_node hierarchy.
  374. */
  375. if (!empty_exp && empty_exp_now)
  376. rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
  377. } else {
  378. local_irq_restore(flags);
  379. }
  380. }
  381. /*
  382. * Tree-preemptible RCU implementation for rcu_read_unlock().
  383. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  384. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  385. * invoke rcu_read_unlock_special() to clean up after a context switch
  386. * in an RCU read-side critical section and other special cases.
  387. */
  388. void __rcu_read_unlock(void)
  389. {
  390. struct task_struct *t = current;
  391. if (t->rcu_read_lock_nesting != 1)
  392. --t->rcu_read_lock_nesting;
  393. else {
  394. barrier(); /* critical section before exit code. */
  395. t->rcu_read_lock_nesting = INT_MIN;
  396. barrier(); /* assign before ->rcu_read_unlock_special load */
  397. if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  398. rcu_read_unlock_special(t);
  399. barrier(); /* ->rcu_read_unlock_special load before assign */
  400. t->rcu_read_lock_nesting = 0;
  401. }
  402. #ifdef CONFIG_PROVE_LOCKING
  403. {
  404. int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
  405. WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
  406. }
  407. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  408. }
  409. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  410. #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
  411. /*
  412. * Dump detailed information for all tasks blocking the current RCU
  413. * grace period on the specified rcu_node structure.
  414. */
  415. static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  416. {
  417. unsigned long flags;
  418. struct task_struct *t;
  419. if (!rcu_preempt_blocked_readers_cgp(rnp))
  420. return;
  421. raw_spin_lock_irqsave(&rnp->lock, flags);
  422. t = list_entry(rnp->gp_tasks,
  423. struct task_struct, rcu_node_entry);
  424. list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
  425. sched_show_task(t);
  426. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  427. }
  428. /*
  429. * Dump detailed information for all tasks blocking the current RCU
  430. * grace period.
  431. */
  432. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  433. {
  434. struct rcu_node *rnp = rcu_get_root(rsp);
  435. rcu_print_detail_task_stall_rnp(rnp);
  436. rcu_for_each_leaf_node(rsp, rnp)
  437. rcu_print_detail_task_stall_rnp(rnp);
  438. }
  439. #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  440. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  441. {
  442. }
  443. #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  444. /*
  445. * Scan the current list of tasks blocked within RCU read-side critical
  446. * sections, printing out the tid of each.
  447. */
  448. static int rcu_print_task_stall(struct rcu_node *rnp)
  449. {
  450. struct task_struct *t;
  451. int ndetected = 0;
  452. if (!rcu_preempt_blocked_readers_cgp(rnp))
  453. return 0;
  454. t = list_entry(rnp->gp_tasks,
  455. struct task_struct, rcu_node_entry);
  456. list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
  457. printk(" P%d", t->pid);
  458. ndetected++;
  459. }
  460. return ndetected;
  461. }
  462. /*
  463. * Suppress preemptible RCU's CPU stall warnings by pushing the
  464. * time of the next stall-warning message comfortably far into the
  465. * future.
  466. */
  467. static void rcu_preempt_stall_reset(void)
  468. {
  469. rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  470. }
  471. /*
  472. * Check that the list of blocked tasks for the newly completed grace
  473. * period is in fact empty. It is a serious bug to complete a grace
  474. * period that still has RCU readers blocked! This function must be
  475. * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
  476. * must be held by the caller.
  477. *
  478. * Also, if there are blocked tasks on the list, they automatically
  479. * block the newly created grace period, so set up ->gp_tasks accordingly.
  480. */
  481. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  482. {
  483. WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  484. if (!list_empty(&rnp->blkd_tasks))
  485. rnp->gp_tasks = rnp->blkd_tasks.next;
  486. WARN_ON_ONCE(rnp->qsmask);
  487. }
  488. #ifdef CONFIG_HOTPLUG_CPU
  489. /*
  490. * Handle tasklist migration for case in which all CPUs covered by the
  491. * specified rcu_node have gone offline. Move them up to the root
  492. * rcu_node. The reason for not just moving them to the immediate
  493. * parent is to remove the need for rcu_read_unlock_special() to
  494. * make more than two attempts to acquire the target rcu_node's lock.
  495. * Returns true if there were tasks blocking the current RCU grace
  496. * period.
  497. *
  498. * Returns 1 if there was previously a task blocking the current grace
  499. * period on the specified rcu_node structure.
  500. *
  501. * The caller must hold rnp->lock with irqs disabled.
  502. */
  503. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  504. struct rcu_node *rnp,
  505. struct rcu_data *rdp)
  506. {
  507. struct list_head *lp;
  508. struct list_head *lp_root;
  509. int retval = 0;
  510. struct rcu_node *rnp_root = rcu_get_root(rsp);
  511. struct task_struct *t;
  512. if (rnp == rnp_root) {
  513. WARN_ONCE(1, "Last CPU thought to be offlined?");
  514. return 0; /* Shouldn't happen: at least one CPU online. */
  515. }
  516. /* If we are on an internal node, complain bitterly. */
  517. WARN_ON_ONCE(rnp != rdp->mynode);
  518. /*
  519. * Move tasks up to root rcu_node. Don't try to get fancy for
  520. * this corner-case operation -- just put this node's tasks
  521. * at the head of the root node's list, and update the root node's
  522. * ->gp_tasks and ->exp_tasks pointers to those of this node's,
  523. * if non-NULL. This might result in waiting for more tasks than
  524. * absolutely necessary, but this is a good performance/complexity
  525. * tradeoff.
  526. */
  527. if (rcu_preempt_blocked_readers_cgp(rnp))
  528. retval |= RCU_OFL_TASKS_NORM_GP;
  529. if (rcu_preempted_readers_exp(rnp))
  530. retval |= RCU_OFL_TASKS_EXP_GP;
  531. lp = &rnp->blkd_tasks;
  532. lp_root = &rnp_root->blkd_tasks;
  533. while (!list_empty(lp)) {
  534. t = list_entry(lp->next, typeof(*t), rcu_node_entry);
  535. raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
  536. list_del(&t->rcu_node_entry);
  537. t->rcu_blocked_node = rnp_root;
  538. list_add(&t->rcu_node_entry, lp_root);
  539. if (&t->rcu_node_entry == rnp->gp_tasks)
  540. rnp_root->gp_tasks = rnp->gp_tasks;
  541. if (&t->rcu_node_entry == rnp->exp_tasks)
  542. rnp_root->exp_tasks = rnp->exp_tasks;
  543. #ifdef CONFIG_RCU_BOOST
  544. if (&t->rcu_node_entry == rnp->boost_tasks)
  545. rnp_root->boost_tasks = rnp->boost_tasks;
  546. #endif /* #ifdef CONFIG_RCU_BOOST */
  547. raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
  548. }
  549. #ifdef CONFIG_RCU_BOOST
  550. /* In case root is being boosted and leaf is not. */
  551. raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
  552. if (rnp_root->boost_tasks != NULL &&
  553. rnp_root->boost_tasks != rnp_root->gp_tasks)
  554. rnp_root->boost_tasks = rnp_root->gp_tasks;
  555. raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
  556. #endif /* #ifdef CONFIG_RCU_BOOST */
  557. rnp->gp_tasks = NULL;
  558. rnp->exp_tasks = NULL;
  559. return retval;
  560. }
  561. /*
  562. * Do CPU-offline processing for preemptible RCU.
  563. */
  564. static void rcu_preempt_offline_cpu(int cpu)
  565. {
  566. __rcu_offline_cpu(cpu, &rcu_preempt_state);
  567. }
  568. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  569. /*
  570. * Check for a quiescent state from the current CPU. When a task blocks,
  571. * the task is recorded in the corresponding CPU's rcu_node structure,
  572. * which is checked elsewhere.
  573. *
  574. * Caller must disable hard irqs.
  575. */
  576. static void rcu_preempt_check_callbacks(int cpu)
  577. {
  578. struct task_struct *t = current;
  579. if (t->rcu_read_lock_nesting == 0) {
  580. rcu_preempt_qs(cpu);
  581. return;
  582. }
  583. if (t->rcu_read_lock_nesting > 0 &&
  584. per_cpu(rcu_preempt_data, cpu).qs_pending)
  585. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  586. }
  587. /*
  588. * Process callbacks for preemptible RCU.
  589. */
  590. static void rcu_preempt_process_callbacks(void)
  591. {
  592. __rcu_process_callbacks(&rcu_preempt_state,
  593. &__get_cpu_var(rcu_preempt_data));
  594. }
  595. #ifdef CONFIG_RCU_BOOST
  596. static void rcu_preempt_do_callbacks(void)
  597. {
  598. rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
  599. }
  600. #endif /* #ifdef CONFIG_RCU_BOOST */
  601. /*
  602. * Queue a preemptible-RCU callback for invocation after a grace period.
  603. */
  604. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  605. {
  606. __call_rcu(head, func, &rcu_preempt_state);
  607. }
  608. EXPORT_SYMBOL_GPL(call_rcu);
  609. /**
  610. * synchronize_rcu - wait until a grace period has elapsed.
  611. *
  612. * Control will return to the caller some time after a full grace
  613. * period has elapsed, in other words after all currently executing RCU
  614. * read-side critical sections have completed. Note, however, that
  615. * upon return from synchronize_rcu(), the caller might well be executing
  616. * concurrently with new RCU read-side critical sections that began while
  617. * synchronize_rcu() was waiting. RCU read-side critical sections are
  618. * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
  619. */
  620. void synchronize_rcu(void)
  621. {
  622. if (!rcu_scheduler_active)
  623. return;
  624. wait_rcu_gp(call_rcu);
  625. }
  626. EXPORT_SYMBOL_GPL(synchronize_rcu);
  627. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  628. static long sync_rcu_preempt_exp_count;
  629. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  630. /*
  631. * Return non-zero if there are any tasks in RCU read-side critical
  632. * sections blocking the current preemptible-RCU expedited grace period.
  633. * If there is no preemptible-RCU expedited grace period currently in
  634. * progress, returns zero unconditionally.
  635. */
  636. static int rcu_preempted_readers_exp(struct rcu_node *rnp)
  637. {
  638. return rnp->exp_tasks != NULL;
  639. }
  640. /*
  641. * return non-zero if there is no RCU expedited grace period in progress
  642. * for the specified rcu_node structure, in other words, if all CPUs and
  643. * tasks covered by the specified rcu_node structure have done their bit
  644. * for the current expedited grace period. Works only for preemptible
  645. * RCU -- other RCU implementation use other means.
  646. *
  647. * Caller must hold sync_rcu_preempt_exp_mutex.
  648. */
  649. static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  650. {
  651. return !rcu_preempted_readers_exp(rnp) &&
  652. ACCESS_ONCE(rnp->expmask) == 0;
  653. }
  654. /*
  655. * Report the exit from RCU read-side critical section for the last task
  656. * that queued itself during or before the current expedited preemptible-RCU
  657. * grace period. This event is reported either to the rcu_node structure on
  658. * which the task was queued or to one of that rcu_node structure's ancestors,
  659. * recursively up the tree. (Calm down, calm down, we do the recursion
  660. * iteratively!)
  661. *
  662. * Most callers will set the "wake" flag, but the task initiating the
  663. * expedited grace period need not wake itself.
  664. *
  665. * Caller must hold sync_rcu_preempt_exp_mutex.
  666. */
  667. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  668. bool wake)
  669. {
  670. unsigned long flags;
  671. unsigned long mask;
  672. raw_spin_lock_irqsave(&rnp->lock, flags);
  673. for (;;) {
  674. if (!sync_rcu_preempt_exp_done(rnp)) {
  675. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  676. break;
  677. }
  678. if (rnp->parent == NULL) {
  679. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  680. if (wake)
  681. wake_up(&sync_rcu_preempt_exp_wq);
  682. break;
  683. }
  684. mask = rnp->grpmask;
  685. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  686. rnp = rnp->parent;
  687. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  688. rnp->expmask &= ~mask;
  689. }
  690. }
  691. /*
  692. * Snapshot the tasks blocking the newly started preemptible-RCU expedited
  693. * grace period for the specified rcu_node structure. If there are no such
  694. * tasks, report it up the rcu_node hierarchy.
  695. *
  696. * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
  697. */
  698. static void
  699. sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
  700. {
  701. unsigned long flags;
  702. int must_wait = 0;
  703. raw_spin_lock_irqsave(&rnp->lock, flags);
  704. if (list_empty(&rnp->blkd_tasks))
  705. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  706. else {
  707. rnp->exp_tasks = rnp->blkd_tasks.next;
  708. rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
  709. must_wait = 1;
  710. }
  711. if (!must_wait)
  712. rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
  713. }
  714. /*
  715. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  716. * is to invoke synchronize_sched_expedited() to push all the tasks to
  717. * the ->blkd_tasks lists and wait for this list to drain.
  718. */
  719. void synchronize_rcu_expedited(void)
  720. {
  721. unsigned long flags;
  722. struct rcu_node *rnp;
  723. struct rcu_state *rsp = &rcu_preempt_state;
  724. long snap;
  725. int trycount = 0;
  726. smp_mb(); /* Caller's modifications seen first by other CPUs. */
  727. snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
  728. smp_mb(); /* Above access cannot bleed into critical section. */
  729. /*
  730. * Acquire lock, falling back to synchronize_rcu() if too many
  731. * lock-acquisition failures. Of course, if someone does the
  732. * expedited grace period for us, just leave.
  733. */
  734. while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
  735. if (trycount++ < 10)
  736. udelay(trycount * num_online_cpus());
  737. else {
  738. synchronize_rcu();
  739. return;
  740. }
  741. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  742. goto mb_ret; /* Others did our work for us. */
  743. }
  744. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  745. goto unlock_mb_ret; /* Others did our work for us. */
  746. /* force all RCU readers onto ->blkd_tasks lists. */
  747. synchronize_sched_expedited();
  748. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  749. /* Initialize ->expmask for all non-leaf rcu_node structures. */
  750. rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
  751. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  752. rnp->expmask = rnp->qsmaskinit;
  753. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  754. }
  755. /* Snapshot current state of ->blkd_tasks lists. */
  756. rcu_for_each_leaf_node(rsp, rnp)
  757. sync_rcu_preempt_exp_init(rsp, rnp);
  758. if (NUM_RCU_NODES > 1)
  759. sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
  760. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  761. /* Wait for snapshotted ->blkd_tasks lists to drain. */
  762. rnp = rcu_get_root(rsp);
  763. wait_event(sync_rcu_preempt_exp_wq,
  764. sync_rcu_preempt_exp_done(rnp));
  765. /* Clean up and exit. */
  766. smp_mb(); /* ensure expedited GP seen before counter increment. */
  767. ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
  768. unlock_mb_ret:
  769. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  770. mb_ret:
  771. smp_mb(); /* ensure subsequent action seen after grace period. */
  772. }
  773. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  774. /*
  775. * Check to see if there is any immediate preemptible-RCU-related work
  776. * to be done.
  777. */
  778. static int rcu_preempt_pending(int cpu)
  779. {
  780. return __rcu_pending(&rcu_preempt_state,
  781. &per_cpu(rcu_preempt_data, cpu));
  782. }
  783. /*
  784. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  785. */
  786. static int rcu_preempt_needs_cpu(int cpu)
  787. {
  788. return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
  789. }
  790. /**
  791. * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  792. */
  793. void rcu_barrier(void)
  794. {
  795. _rcu_barrier(&rcu_preempt_state, call_rcu);
  796. }
  797. EXPORT_SYMBOL_GPL(rcu_barrier);
  798. /*
  799. * Initialize preemptible RCU's per-CPU data.
  800. */
  801. static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
  802. {
  803. rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
  804. }
  805. /*
  806. * Move preemptible RCU's callbacks from dying CPU to other online CPU.
  807. */
  808. static void rcu_preempt_send_cbs_to_online(void)
  809. {
  810. rcu_send_cbs_to_online(&rcu_preempt_state);
  811. }
  812. /*
  813. * Initialize preemptible RCU's state structures.
  814. */
  815. static void __init __rcu_init_preempt(void)
  816. {
  817. rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
  818. }
  819. /*
  820. * Check for a task exiting while in a preemptible-RCU read-side
  821. * critical section, clean up if so. No need to issue warnings,
  822. * as debug_check_no_locks_held() already does this if lockdep
  823. * is enabled.
  824. */
  825. void exit_rcu(void)
  826. {
  827. struct task_struct *t = current;
  828. if (t->rcu_read_lock_nesting == 0)
  829. return;
  830. t->rcu_read_lock_nesting = 1;
  831. __rcu_read_unlock();
  832. }
  833. #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  834. static struct rcu_state *rcu_state = &rcu_sched_state;
  835. /*
  836. * Tell them what RCU they are running.
  837. */
  838. static void __init rcu_bootup_announce(void)
  839. {
  840. printk(KERN_INFO "Hierarchical RCU implementation.\n");
  841. rcu_bootup_announce_oddness();
  842. }
  843. /*
  844. * Return the number of RCU batches processed thus far for debug & stats.
  845. */
  846. long rcu_batches_completed(void)
  847. {
  848. return rcu_batches_completed_sched();
  849. }
  850. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  851. /*
  852. * Force a quiescent state for RCU, which, because there is no preemptible
  853. * RCU, becomes the same as rcu-sched.
  854. */
  855. void rcu_force_quiescent_state(void)
  856. {
  857. rcu_sched_force_quiescent_state();
  858. }
  859. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  860. /*
  861. * Because preemptible RCU does not exist, we never have to check for
  862. * CPUs being in quiescent states.
  863. */
  864. static void rcu_preempt_note_context_switch(int cpu)
  865. {
  866. }
  867. /*
  868. * Because preemptible RCU does not exist, there are never any preempted
  869. * RCU readers.
  870. */
  871. static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
  872. {
  873. return 0;
  874. }
  875. #ifdef CONFIG_HOTPLUG_CPU
  876. /* Because preemptible RCU does not exist, no quieting of tasks. */
  877. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  878. {
  879. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  880. }
  881. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  882. /*
  883. * Because preemptible RCU does not exist, we never have to check for
  884. * tasks blocked within RCU read-side critical sections.
  885. */
  886. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  887. {
  888. }
  889. /*
  890. * Because preemptible RCU does not exist, we never have to check for
  891. * tasks blocked within RCU read-side critical sections.
  892. */
  893. static int rcu_print_task_stall(struct rcu_node *rnp)
  894. {
  895. return 0;
  896. }
  897. /*
  898. * Because preemptible RCU does not exist, there is no need to suppress
  899. * its CPU stall warnings.
  900. */
  901. static void rcu_preempt_stall_reset(void)
  902. {
  903. }
  904. /*
  905. * Because there is no preemptible RCU, there can be no readers blocked,
  906. * so there is no need to check for blocked tasks. So check only for
  907. * bogus qsmask values.
  908. */
  909. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  910. {
  911. WARN_ON_ONCE(rnp->qsmask);
  912. }
  913. #ifdef CONFIG_HOTPLUG_CPU
  914. /*
  915. * Because preemptible RCU does not exist, it never needs to migrate
  916. * tasks that were blocked within RCU read-side critical sections, and
  917. * such non-existent tasks cannot possibly have been blocking the current
  918. * grace period.
  919. */
  920. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  921. struct rcu_node *rnp,
  922. struct rcu_data *rdp)
  923. {
  924. return 0;
  925. }
  926. /*
  927. * Because preemptible RCU does not exist, it never needs CPU-offline
  928. * processing.
  929. */
  930. static void rcu_preempt_offline_cpu(int cpu)
  931. {
  932. }
  933. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  934. /*
  935. * Because preemptible RCU does not exist, it never has any callbacks
  936. * to check.
  937. */
  938. static void rcu_preempt_check_callbacks(int cpu)
  939. {
  940. }
  941. /*
  942. * Because preemptible RCU does not exist, it never has any callbacks
  943. * to process.
  944. */
  945. static void rcu_preempt_process_callbacks(void)
  946. {
  947. }
  948. /*
  949. * Wait for an rcu-preempt grace period, but make it happen quickly.
  950. * But because preemptible RCU does not exist, map to rcu-sched.
  951. */
  952. void synchronize_rcu_expedited(void)
  953. {
  954. synchronize_sched_expedited();
  955. }
  956. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  957. #ifdef CONFIG_HOTPLUG_CPU
  958. /*
  959. * Because preemptible RCU does not exist, there is never any need to
  960. * report on tasks preempted in RCU read-side critical sections during
  961. * expedited RCU grace periods.
  962. */
  963. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  964. bool wake)
  965. {
  966. return;
  967. }
  968. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  969. /*
  970. * Because preemptible RCU does not exist, it never has any work to do.
  971. */
  972. static int rcu_preempt_pending(int cpu)
  973. {
  974. return 0;
  975. }
  976. /*
  977. * Because preemptible RCU does not exist, it never needs any CPU.
  978. */
  979. static int rcu_preempt_needs_cpu(int cpu)
  980. {
  981. return 0;
  982. }
  983. /*
  984. * Because preemptible RCU does not exist, rcu_barrier() is just
  985. * another name for rcu_barrier_sched().
  986. */
  987. void rcu_barrier(void)
  988. {
  989. rcu_barrier_sched();
  990. }
  991. EXPORT_SYMBOL_GPL(rcu_barrier);
  992. /*
  993. * Because preemptible RCU does not exist, there is no per-CPU
  994. * data to initialize.
  995. */
  996. static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
  997. {
  998. }
  999. /*
  1000. * Because there is no preemptible RCU, there are no callbacks to move.
  1001. */
  1002. static void rcu_preempt_send_cbs_to_online(void)
  1003. {
  1004. }
  1005. /*
  1006. * Because preemptible RCU does not exist, it need not be initialized.
  1007. */
  1008. static void __init __rcu_init_preempt(void)
  1009. {
  1010. }
  1011. #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
  1012. #ifdef CONFIG_RCU_BOOST
  1013. #include "rtmutex_common.h"
  1014. #ifdef CONFIG_RCU_TRACE
  1015. static void rcu_initiate_boost_trace(struct rcu_node *rnp)
  1016. {
  1017. if (list_empty(&rnp->blkd_tasks))
  1018. rnp->n_balk_blkd_tasks++;
  1019. else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
  1020. rnp->n_balk_exp_gp_tasks++;
  1021. else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
  1022. rnp->n_balk_boost_tasks++;
  1023. else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
  1024. rnp->n_balk_notblocked++;
  1025. else if (rnp->gp_tasks != NULL &&
  1026. ULONG_CMP_LT(jiffies, rnp->boost_time))
  1027. rnp->n_balk_notyet++;
  1028. else
  1029. rnp->n_balk_nos++;
  1030. }
  1031. #else /* #ifdef CONFIG_RCU_TRACE */
  1032. static void rcu_initiate_boost_trace(struct rcu_node *rnp)
  1033. {
  1034. }
  1035. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  1036. static struct lock_class_key rcu_boost_class;
  1037. /*
  1038. * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  1039. * or ->boost_tasks, advancing the pointer to the next task in the
  1040. * ->blkd_tasks list.
  1041. *
  1042. * Note that irqs must be enabled: boosting the task can block.
  1043. * Returns 1 if there are more tasks needing to be boosted.
  1044. */
  1045. static int rcu_boost(struct rcu_node *rnp)
  1046. {
  1047. unsigned long flags;
  1048. struct rt_mutex mtx;
  1049. struct task_struct *t;
  1050. struct list_head *tb;
  1051. if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
  1052. return 0; /* Nothing left to boost. */
  1053. raw_spin_lock_irqsave(&rnp->lock, flags);
  1054. /*
  1055. * Recheck under the lock: all tasks in need of boosting
  1056. * might exit their RCU read-side critical sections on their own.
  1057. */
  1058. if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
  1059. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1060. return 0;
  1061. }
  1062. /*
  1063. * Preferentially boost tasks blocking expedited grace periods.
  1064. * This cannot starve the normal grace periods because a second
  1065. * expedited grace period must boost all blocked tasks, including
  1066. * those blocking the pre-existing normal grace period.
  1067. */
  1068. if (rnp->exp_tasks != NULL) {
  1069. tb = rnp->exp_tasks;
  1070. rnp->n_exp_boosts++;
  1071. } else {
  1072. tb = rnp->boost_tasks;
  1073. rnp->n_normal_boosts++;
  1074. }
  1075. rnp->n_tasks_boosted++;
  1076. /*
  1077. * We boost task t by manufacturing an rt_mutex that appears to
  1078. * be held by task t. We leave a pointer to that rt_mutex where
  1079. * task t can find it, and task t will release the mutex when it
  1080. * exits its outermost RCU read-side critical section. Then
  1081. * simply acquiring this artificial rt_mutex will boost task
  1082. * t's priority. (Thanks to tglx for suggesting this approach!)
  1083. *
  1084. * Note that task t must acquire rnp->lock to remove itself from
  1085. * the ->blkd_tasks list, which it will do from exit() if from
  1086. * nowhere else. We therefore are guaranteed that task t will
  1087. * stay around at least until we drop rnp->lock. Note that
  1088. * rnp->lock also resolves races between our priority boosting
  1089. * and task t's exiting its outermost RCU read-side critical
  1090. * section.
  1091. */
  1092. t = container_of(tb, struct task_struct, rcu_node_entry);
  1093. rt_mutex_init_proxy_locked(&mtx, t);
  1094. /* Avoid lockdep false positives. This rt_mutex is its own thing. */
  1095. lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
  1096. "rcu_boost_mutex");
  1097. t->rcu_boost_mutex = &mtx;
  1098. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1099. rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
  1100. rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
  1101. return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
  1102. }
  1103. /*
  1104. * Timer handler to initiate waking up of boost kthreads that
  1105. * have yielded the CPU due to excessive numbers of tasks to
  1106. * boost. We wake up the per-rcu_node kthread, which in turn
  1107. * will wake up the booster kthread.
  1108. */
  1109. static void rcu_boost_kthread_timer(unsigned long arg)
  1110. {
  1111. invoke_rcu_node_kthread((struct rcu_node *)arg);
  1112. }
  1113. /*
  1114. * Priority-boosting kthread. One per leaf rcu_node and one for the
  1115. * root rcu_node.
  1116. */
  1117. static int rcu_boost_kthread(void *arg)
  1118. {
  1119. struct rcu_node *rnp = (struct rcu_node *)arg;
  1120. int spincnt = 0;
  1121. int more2boost;
  1122. trace_rcu_utilization("Start boost kthread@init");
  1123. for (;;) {
  1124. rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
  1125. trace_rcu_utilization("End boost kthread@rcu_wait");
  1126. rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
  1127. trace_rcu_utilization("Start boost kthread@rcu_wait");
  1128. rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
  1129. more2boost = rcu_boost(rnp);
  1130. if (more2boost)
  1131. spincnt++;
  1132. else
  1133. spincnt = 0;
  1134. if (spincnt > 10) {
  1135. trace_rcu_utilization("End boost kthread@rcu_yield");
  1136. rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
  1137. trace_rcu_utilization("Start boost kthread@rcu_yield");
  1138. spincnt = 0;
  1139. }
  1140. }
  1141. /* NOTREACHED */
  1142. trace_rcu_utilization("End boost kthread@notreached");
  1143. return 0;
  1144. }
  1145. /*
  1146. * Check to see if it is time to start boosting RCU readers that are
  1147. * blocking the current grace period, and, if so, tell the per-rcu_node
  1148. * kthread to start boosting them. If there is an expedited grace
  1149. * period in progress, it is always time to boost.
  1150. *
  1151. * The caller must hold rnp->lock, which this function releases,
  1152. * but irqs remain disabled. The ->boost_kthread_task is immortal,
  1153. * so we don't need to worry about it going away.
  1154. */
  1155. static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
  1156. {
  1157. struct task_struct *t;
  1158. if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
  1159. rnp->n_balk_exp_gp_tasks++;
  1160. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1161. return;
  1162. }
  1163. if (rnp->exp_tasks != NULL ||
  1164. (rnp->gp_tasks != NULL &&
  1165. rnp->boost_tasks == NULL &&
  1166. rnp->qsmask == 0 &&
  1167. ULONG_CMP_GE(jiffies, rnp->boost_time))) {
  1168. if (rnp->exp_tasks == NULL)
  1169. rnp->boost_tasks = rnp->gp_tasks;
  1170. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1171. t = rnp->boost_kthread_task;
  1172. if (t != NULL)
  1173. wake_up_process(t);
  1174. } else {
  1175. rcu_initiate_boost_trace(rnp);
  1176. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1177. }
  1178. }
  1179. /*
  1180. * Wake up the per-CPU kthread to invoke RCU callbacks.
  1181. */
  1182. static void invoke_rcu_callbacks_kthread(void)
  1183. {
  1184. unsigned long flags;
  1185. local_irq_save(flags);
  1186. __this_cpu_write(rcu_cpu_has_work, 1);
  1187. if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
  1188. current != __this_cpu_read(rcu_cpu_kthread_task))
  1189. wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
  1190. local_irq_restore(flags);
  1191. }
  1192. /*
  1193. * Set the affinity of the boost kthread. The CPU-hotplug locks are
  1194. * held, so no one should be messing with the existence of the boost
  1195. * kthread.
  1196. */
  1197. static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
  1198. cpumask_var_t cm)
  1199. {
  1200. struct task_struct *t;
  1201. t = rnp->boost_kthread_task;
  1202. if (t != NULL)
  1203. set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
  1204. }
  1205. #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  1206. /*
  1207. * Do priority-boost accounting for the start of a new grace period.
  1208. */
  1209. static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  1210. {
  1211. rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  1212. }
  1213. /*
  1214. * Create an RCU-boost kthread for the specified node if one does not
  1215. * already exist. We only create this kthread for preemptible RCU.
  1216. * Returns zero if all is well, a negated errno otherwise.
  1217. */
  1218. static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
  1219. struct rcu_node *rnp,
  1220. int rnp_index)
  1221. {
  1222. unsigned long flags;
  1223. struct sched_param sp;
  1224. struct task_struct *t;
  1225. if (&rcu_preempt_state != rsp)
  1226. return 0;
  1227. rsp->boost = 1;
  1228. if (rnp->boost_kthread_task != NULL)
  1229. return 0;
  1230. t = kthread_create(rcu_boost_kthread, (void *)rnp,
  1231. "rcub/%d", rnp_index);
  1232. if (IS_ERR(t))
  1233. return PTR_ERR(t);
  1234. raw_spin_lock_irqsave(&rnp->lock, flags);
  1235. rnp->boost_kthread_task = t;
  1236. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1237. sp.sched_priority = RCU_BOOST_PRIO;
  1238. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1239. wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
  1240. return 0;
  1241. }
  1242. #ifdef CONFIG_HOTPLUG_CPU
  1243. /*
  1244. * Stop the RCU's per-CPU kthread when its CPU goes offline,.
  1245. */
  1246. static void rcu_stop_cpu_kthread(int cpu)
  1247. {
  1248. struct task_struct *t;
  1249. /* Stop the CPU's kthread. */
  1250. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1251. if (t != NULL) {
  1252. per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
  1253. kthread_stop(t);
  1254. }
  1255. }
  1256. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  1257. static void rcu_kthread_do_work(void)
  1258. {
  1259. rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
  1260. rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
  1261. rcu_preempt_do_callbacks();
  1262. }
  1263. /*
  1264. * Wake up the specified per-rcu_node-structure kthread.
  1265. * Because the per-rcu_node kthreads are immortal, we don't need
  1266. * to do anything to keep them alive.
  1267. */
  1268. static void invoke_rcu_node_kthread(struct rcu_node *rnp)
  1269. {
  1270. struct task_struct *t;
  1271. t = rnp->node_kthread_task;
  1272. if (t != NULL)
  1273. wake_up_process(t);
  1274. }
  1275. /*
  1276. * Set the specified CPU's kthread to run RT or not, as specified by
  1277. * the to_rt argument. The CPU-hotplug locks are held, so the task
  1278. * is not going away.
  1279. */
  1280. static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
  1281. {
  1282. int policy;
  1283. struct sched_param sp;
  1284. struct task_struct *t;
  1285. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1286. if (t == NULL)
  1287. return;
  1288. if (to_rt) {
  1289. policy = SCHED_FIFO;
  1290. sp.sched_priority = RCU_KTHREAD_PRIO;
  1291. } else {
  1292. policy = SCHED_NORMAL;
  1293. sp.sched_priority = 0;
  1294. }
  1295. sched_setscheduler_nocheck(t, policy, &sp);
  1296. }
  1297. /*
  1298. * Timer handler to initiate the waking up of per-CPU kthreads that
  1299. * have yielded the CPU due to excess numbers of RCU callbacks.
  1300. * We wake up the per-rcu_node kthread, which in turn will wake up
  1301. * the booster kthread.
  1302. */
  1303. static void rcu_cpu_kthread_timer(unsigned long arg)
  1304. {
  1305. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
  1306. struct rcu_node *rnp = rdp->mynode;
  1307. atomic_or(rdp->grpmask, &rnp->wakemask);
  1308. invoke_rcu_node_kthread(rnp);
  1309. }
  1310. /*
  1311. * Drop to non-real-time priority and yield, but only after posting a
  1312. * timer that will cause us to regain our real-time priority if we
  1313. * remain preempted. Either way, we restore our real-time priority
  1314. * before returning.
  1315. */
  1316. static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
  1317. {
  1318. struct sched_param sp;
  1319. struct timer_list yield_timer;
  1320. int prio = current->rt_priority;
  1321. setup_timer_on_stack(&yield_timer, f, arg);
  1322. mod_timer(&yield_timer, jiffies + 2);
  1323. sp.sched_priority = 0;
  1324. sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
  1325. set_user_nice(current, 19);
  1326. schedule();
  1327. set_user_nice(current, 0);
  1328. sp.sched_priority = prio;
  1329. sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
  1330. del_timer(&yield_timer);
  1331. }
  1332. /*
  1333. * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
  1334. * This can happen while the corresponding CPU is either coming online
  1335. * or going offline. We cannot wait until the CPU is fully online
  1336. * before starting the kthread, because the various notifier functions
  1337. * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
  1338. * the corresponding CPU is online.
  1339. *
  1340. * Return 1 if the kthread needs to stop, 0 otherwise.
  1341. *
  1342. * Caller must disable bh. This function can momentarily enable it.
  1343. */
  1344. static int rcu_cpu_kthread_should_stop(int cpu)
  1345. {
  1346. while (cpu_is_offline(cpu) ||
  1347. !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
  1348. smp_processor_id() != cpu) {
  1349. if (kthread_should_stop())
  1350. return 1;
  1351. per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
  1352. per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
  1353. local_bh_enable();
  1354. schedule_timeout_uninterruptible(1);
  1355. if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
  1356. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  1357. local_bh_disable();
  1358. }
  1359. per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
  1360. return 0;
  1361. }
  1362. /*
  1363. * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
  1364. * RCU softirq used in flavors and configurations of RCU that do not
  1365. * support RCU priority boosting.
  1366. */
  1367. static int rcu_cpu_kthread(void *arg)
  1368. {
  1369. int cpu = (int)(long)arg;
  1370. unsigned long flags;
  1371. int spincnt = 0;
  1372. unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
  1373. char work;
  1374. char *workp = &per_cpu(rcu_cpu_has_work, cpu);
  1375. trace_rcu_utilization("Start CPU kthread@init");
  1376. for (;;) {
  1377. *statusp = RCU_KTHREAD_WAITING;
  1378. trace_rcu_utilization("End CPU kthread@rcu_wait");
  1379. rcu_wait(*workp != 0 || kthread_should_stop());
  1380. trace_rcu_utilization("Start CPU kthread@rcu_wait");
  1381. local_bh_disable();
  1382. if (rcu_cpu_kthread_should_stop(cpu)) {
  1383. local_bh_enable();
  1384. break;
  1385. }
  1386. *statusp = RCU_KTHREAD_RUNNING;
  1387. per_cpu(rcu_cpu_kthread_loops, cpu)++;
  1388. local_irq_save(flags);
  1389. work = *workp;
  1390. *workp = 0;
  1391. local_irq_restore(flags);
  1392. if (work)
  1393. rcu_kthread_do_work();
  1394. local_bh_enable();
  1395. if (*workp != 0)
  1396. spincnt++;
  1397. else
  1398. spincnt = 0;
  1399. if (spincnt > 10) {
  1400. *statusp = RCU_KTHREAD_YIELDING;
  1401. trace_rcu_utilization("End CPU kthread@rcu_yield");
  1402. rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
  1403. trace_rcu_utilization("Start CPU kthread@rcu_yield");
  1404. spincnt = 0;
  1405. }
  1406. }
  1407. *statusp = RCU_KTHREAD_STOPPED;
  1408. trace_rcu_utilization("End CPU kthread@term");
  1409. return 0;
  1410. }
  1411. /*
  1412. * Spawn a per-CPU kthread, setting up affinity and priority.
  1413. * Because the CPU hotplug lock is held, no other CPU will be attempting
  1414. * to manipulate rcu_cpu_kthread_task. There might be another CPU
  1415. * attempting to access it during boot, but the locking in kthread_bind()
  1416. * will enforce sufficient ordering.
  1417. *
  1418. * Please note that we cannot simply refuse to wake up the per-CPU
  1419. * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
  1420. * which can result in softlockup complaints if the task ends up being
  1421. * idle for more than a couple of minutes.
  1422. *
  1423. * However, please note also that we cannot bind the per-CPU kthread to its
  1424. * CPU until that CPU is fully online. We also cannot wait until the
  1425. * CPU is fully online before we create its per-CPU kthread, as this would
  1426. * deadlock the system when CPU notifiers tried waiting for grace
  1427. * periods. So we bind the per-CPU kthread to its CPU only if the CPU
  1428. * is online. If its CPU is not yet fully online, then the code in
  1429. * rcu_cpu_kthread() will wait until it is fully online, and then do
  1430. * the binding.
  1431. */
  1432. static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
  1433. {
  1434. struct sched_param sp;
  1435. struct task_struct *t;
  1436. if (!rcu_scheduler_fully_active ||
  1437. per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
  1438. return 0;
  1439. t = kthread_create_on_node(rcu_cpu_kthread,
  1440. (void *)(long)cpu,
  1441. cpu_to_node(cpu),
  1442. "rcuc/%d", cpu);
  1443. if (IS_ERR(t))
  1444. return PTR_ERR(t);
  1445. if (cpu_online(cpu))
  1446. kthread_bind(t, cpu);
  1447. per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
  1448. WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
  1449. sp.sched_priority = RCU_KTHREAD_PRIO;
  1450. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1451. per_cpu(rcu_cpu_kthread_task, cpu) = t;
  1452. wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
  1453. return 0;
  1454. }
  1455. /*
  1456. * Per-rcu_node kthread, which is in charge of waking up the per-CPU
  1457. * kthreads when needed. We ignore requests to wake up kthreads
  1458. * for offline CPUs, which is OK because force_quiescent_state()
  1459. * takes care of this case.
  1460. */
  1461. static int rcu_node_kthread(void *arg)
  1462. {
  1463. int cpu;
  1464. unsigned long flags;
  1465. unsigned long mask;
  1466. struct rcu_node *rnp = (struct rcu_node *)arg;
  1467. struct sched_param sp;
  1468. struct task_struct *t;
  1469. for (;;) {
  1470. rnp->node_kthread_status = RCU_KTHREAD_WAITING;
  1471. rcu_wait(atomic_read(&rnp->wakemask) != 0);
  1472. rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
  1473. raw_spin_lock_irqsave(&rnp->lock, flags);
  1474. mask = atomic_xchg(&rnp->wakemask, 0);
  1475. rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
  1476. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
  1477. if ((mask & 0x1) == 0)
  1478. continue;
  1479. preempt_disable();
  1480. t = per_cpu(rcu_cpu_kthread_task, cpu);
  1481. if (!cpu_online(cpu) || t == NULL) {
  1482. preempt_enable();
  1483. continue;
  1484. }
  1485. per_cpu(rcu_cpu_has_work, cpu) = 1;
  1486. sp.sched_priority = RCU_KTHREAD_PRIO;
  1487. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1488. preempt_enable();
  1489. }
  1490. }
  1491. /* NOTREACHED */
  1492. rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
  1493. return 0;
  1494. }
  1495. /*
  1496. * Set the per-rcu_node kthread's affinity to cover all CPUs that are
  1497. * served by the rcu_node in question. The CPU hotplug lock is still
  1498. * held, so the value of rnp->qsmaskinit will be stable.
  1499. *
  1500. * We don't include outgoingcpu in the affinity set, use -1 if there is
  1501. * no outgoing CPU. If there are no CPUs left in the affinity set,
  1502. * this function allows the kthread to execute on any CPU.
  1503. */
  1504. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
  1505. {
  1506. cpumask_var_t cm;
  1507. int cpu;
  1508. unsigned long mask = rnp->qsmaskinit;
  1509. if (rnp->node_kthread_task == NULL)
  1510. return;
  1511. if (!alloc_cpumask_var(&cm, GFP_KERNEL))
  1512. return;
  1513. cpumask_clear(cm);
  1514. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
  1515. if ((mask & 0x1) && cpu != outgoingcpu)
  1516. cpumask_set_cpu(cpu, cm);
  1517. if (cpumask_weight(cm) == 0) {
  1518. cpumask_setall(cm);
  1519. for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
  1520. cpumask_clear_cpu(cpu, cm);
  1521. WARN_ON_ONCE(cpumask_weight(cm) == 0);
  1522. }
  1523. set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
  1524. rcu_boost_kthread_setaffinity(rnp, cm);
  1525. free_cpumask_var(cm);
  1526. }
  1527. /*
  1528. * Spawn a per-rcu_node kthread, setting priority and affinity.
  1529. * Called during boot before online/offline can happen, or, if
  1530. * during runtime, with the main CPU-hotplug locks held. So only
  1531. * one of these can be executing at a time.
  1532. */
  1533. static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
  1534. struct rcu_node *rnp)
  1535. {
  1536. unsigned long flags;
  1537. int rnp_index = rnp - &rsp->node[0];
  1538. struct sched_param sp;
  1539. struct task_struct *t;
  1540. if (!rcu_scheduler_fully_active ||
  1541. rnp->qsmaskinit == 0)
  1542. return 0;
  1543. if (rnp->node_kthread_task == NULL) {
  1544. t = kthread_create(rcu_node_kthread, (void *)rnp,
  1545. "rcun/%d", rnp_index);
  1546. if (IS_ERR(t))
  1547. return PTR_ERR(t);
  1548. raw_spin_lock_irqsave(&rnp->lock, flags);
  1549. rnp->node_kthread_task = t;
  1550. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1551. sp.sched_priority = 99;
  1552. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  1553. wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
  1554. }
  1555. return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
  1556. }
  1557. /*
  1558. * Spawn all kthreads -- called as soon as the scheduler is running.
  1559. */
  1560. static int __init rcu_spawn_kthreads(void)
  1561. {
  1562. int cpu;
  1563. struct rcu_node *rnp;
  1564. rcu_scheduler_fully_active = 1;
  1565. for_each_possible_cpu(cpu) {
  1566. per_cpu(rcu_cpu_has_work, cpu) = 0;
  1567. if (cpu_online(cpu))
  1568. (void)rcu_spawn_one_cpu_kthread(cpu);
  1569. }
  1570. rnp = rcu_get_root(rcu_state);
  1571. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1572. if (NUM_RCU_NODES > 1) {
  1573. rcu_for_each_leaf_node(rcu_state, rnp)
  1574. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1575. }
  1576. return 0;
  1577. }
  1578. early_initcall(rcu_spawn_kthreads);
  1579. static void __cpuinit rcu_prepare_kthreads(int cpu)
  1580. {
  1581. struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
  1582. struct rcu_node *rnp = rdp->mynode;
  1583. /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
  1584. if (rcu_scheduler_fully_active) {
  1585. (void)rcu_spawn_one_cpu_kthread(cpu);
  1586. if (rnp->node_kthread_task == NULL)
  1587. (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
  1588. }
  1589. }
  1590. #else /* #ifdef CONFIG_RCU_BOOST */
  1591. static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
  1592. {
  1593. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1594. }
  1595. static void invoke_rcu_callbacks_kthread(void)
  1596. {
  1597. WARN_ON_ONCE(1);
  1598. }
  1599. static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  1600. {
  1601. }
  1602. #ifdef CONFIG_HOTPLUG_CPU
  1603. static void rcu_stop_cpu_kthread(int cpu)
  1604. {
  1605. }
  1606. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  1607. static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
  1608. {
  1609. }
  1610. static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
  1611. {
  1612. }
  1613. static int __init rcu_scheduler_really_started(void)
  1614. {
  1615. rcu_scheduler_fully_active = 1;
  1616. return 0;
  1617. }
  1618. early_initcall(rcu_scheduler_really_started);
  1619. static void __cpuinit rcu_prepare_kthreads(int cpu)
  1620. {
  1621. }
  1622. #endif /* #else #ifdef CONFIG_RCU_BOOST */
  1623. #ifndef CONFIG_SMP
  1624. void synchronize_sched_expedited(void)
  1625. {
  1626. cond_resched();
  1627. }
  1628. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  1629. #else /* #ifndef CONFIG_SMP */
  1630. static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
  1631. static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
  1632. static int synchronize_sched_expedited_cpu_stop(void *data)
  1633. {
  1634. /*
  1635. * There must be a full memory barrier on each affected CPU
  1636. * between the time that try_stop_cpus() is called and the
  1637. * time that it returns.
  1638. *
  1639. * In the current initial implementation of cpu_stop, the
  1640. * above condition is already met when the control reaches
  1641. * this point and the following smp_mb() is not strictly
  1642. * necessary. Do smp_mb() anyway for documentation and
  1643. * robustness against future implementation changes.
  1644. */
  1645. smp_mb(); /* See above comment block. */
  1646. return 0;
  1647. }
  1648. /*
  1649. * Wait for an rcu-sched grace period to elapse, but use "big hammer"
  1650. * approach to force grace period to end quickly. This consumes
  1651. * significant time on all CPUs, and is thus not recommended for
  1652. * any sort of common-case code.
  1653. *
  1654. * Note that it is illegal to call this function while holding any
  1655. * lock that is acquired by a CPU-hotplug notifier. Failing to
  1656. * observe this restriction will result in deadlock.
  1657. *
  1658. * This implementation can be thought of as an application of ticket
  1659. * locking to RCU, with sync_sched_expedited_started and
  1660. * sync_sched_expedited_done taking on the roles of the halves
  1661. * of the ticket-lock word. Each task atomically increments
  1662. * sync_sched_expedited_started upon entry, snapshotting the old value,
  1663. * then attempts to stop all the CPUs. If this succeeds, then each
  1664. * CPU will have executed a context switch, resulting in an RCU-sched
  1665. * grace period. We are then done, so we use atomic_cmpxchg() to
  1666. * update sync_sched_expedited_done to match our snapshot -- but
  1667. * only if someone else has not already advanced past our snapshot.
  1668. *
  1669. * On the other hand, if try_stop_cpus() fails, we check the value
  1670. * of sync_sched_expedited_done. If it has advanced past our
  1671. * initial snapshot, then someone else must have forced a grace period
  1672. * some time after we took our snapshot. In this case, our work is
  1673. * done for us, and we can simply return. Otherwise, we try again,
  1674. * but keep our initial snapshot for purposes of checking for someone
  1675. * doing our work for us.
  1676. *
  1677. * If we fail too many times in a row, we fall back to synchronize_sched().
  1678. */
  1679. void synchronize_sched_expedited(void)
  1680. {
  1681. int firstsnap, s, snap, trycount = 0;
  1682. /* Note that atomic_inc_return() implies full memory barrier. */
  1683. firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
  1684. get_online_cpus();
  1685. /*
  1686. * Each pass through the following loop attempts to force a
  1687. * context switch on each CPU.
  1688. */
  1689. while (try_stop_cpus(cpu_online_mask,
  1690. synchronize_sched_expedited_cpu_stop,
  1691. NULL) == -EAGAIN) {
  1692. put_online_cpus();
  1693. /* No joy, try again later. Or just synchronize_sched(). */
  1694. if (trycount++ < 10)
  1695. udelay(trycount * num_online_cpus());
  1696. else {
  1697. synchronize_sched();
  1698. return;
  1699. }
  1700. /* Check to see if someone else did our work for us. */
  1701. s = atomic_read(&sync_sched_expedited_done);
  1702. if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
  1703. smp_mb(); /* ensure test happens before caller kfree */
  1704. return;
  1705. }
  1706. /*
  1707. * Refetching sync_sched_expedited_started allows later
  1708. * callers to piggyback on our grace period. We subtract
  1709. * 1 to get the same token that the last incrementer got.
  1710. * We retry after they started, so our grace period works
  1711. * for them, and they started after our first try, so their
  1712. * grace period works for us.
  1713. */
  1714. get_online_cpus();
  1715. snap = atomic_read(&sync_sched_expedited_started);
  1716. smp_mb(); /* ensure read is before try_stop_cpus(). */
  1717. }
  1718. /*
  1719. * Everyone up to our most recent fetch is covered by our grace
  1720. * period. Update the counter, but only if our work is still
  1721. * relevant -- which it won't be if someone who started later
  1722. * than we did beat us to the punch.
  1723. */
  1724. do {
  1725. s = atomic_read(&sync_sched_expedited_done);
  1726. if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
  1727. smp_mb(); /* ensure test happens before caller kfree */
  1728. break;
  1729. }
  1730. } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
  1731. put_online_cpus();
  1732. }
  1733. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  1734. #endif /* #else #ifndef CONFIG_SMP */
  1735. #if !defined(CONFIG_RCU_FAST_NO_HZ)
  1736. /*
  1737. * Check to see if any future RCU-related work will need to be done
  1738. * by the current CPU, even if none need be done immediately, returning
  1739. * 1 if so. This function is part of the RCU implementation; it is -not-
  1740. * an exported member of the RCU API.
  1741. *
  1742. * Because we have preemptible RCU, just check whether this CPU needs
  1743. * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
  1744. * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
  1745. */
  1746. int rcu_needs_cpu(int cpu)
  1747. {
  1748. return rcu_needs_cpu_quick_check(cpu);
  1749. }
  1750. #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
  1751. #define RCU_NEEDS_CPU_FLUSHES 5
  1752. static DEFINE_PER_CPU(int, rcu_dyntick_drain);
  1753. static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  1754. /*
  1755. * Check to see if any future RCU-related work will need to be done
  1756. * by the current CPU, even if none need be done immediately, returning
  1757. * 1 if so. This function is part of the RCU implementation; it is -not-
  1758. * an exported member of the RCU API.
  1759. *
  1760. * Because we are not supporting preemptible RCU, attempt to accelerate
  1761. * any current grace periods so that RCU no longer needs this CPU, but
  1762. * only if all other CPUs are already in dynticks-idle mode. This will
  1763. * allow the CPU cores to be powered down immediately, as opposed to after
  1764. * waiting many milliseconds for grace periods to elapse.
  1765. *
  1766. * Because it is not legal to invoke rcu_process_callbacks() with irqs
  1767. * disabled, we do one pass of force_quiescent_state(), then do a
  1768. * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  1769. * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
  1770. */
  1771. int rcu_needs_cpu(int cpu)
  1772. {
  1773. int c = 0;
  1774. int snap;
  1775. int thatcpu;
  1776. /* Check for being in the holdoff period. */
  1777. if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
  1778. return rcu_needs_cpu_quick_check(cpu);
  1779. /* Don't bother unless we are the last non-dyntick-idle CPU. */
  1780. for_each_online_cpu(thatcpu) {
  1781. if (thatcpu == cpu)
  1782. continue;
  1783. snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
  1784. thatcpu).dynticks);
  1785. smp_mb(); /* Order sampling of snap with end of grace period. */
  1786. if ((snap & 0x1) != 0) {
  1787. per_cpu(rcu_dyntick_drain, cpu) = 0;
  1788. per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
  1789. return rcu_needs_cpu_quick_check(cpu);
  1790. }
  1791. }
  1792. /* Check and update the rcu_dyntick_drain sequencing. */
  1793. if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
  1794. /* First time through, initialize the counter. */
  1795. per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
  1796. } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
  1797. /* We have hit the limit, so time to give up. */
  1798. per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
  1799. return rcu_needs_cpu_quick_check(cpu);
  1800. }
  1801. /* Do one step pushing remaining RCU callbacks through. */
  1802. if (per_cpu(rcu_sched_data, cpu).nxtlist) {
  1803. rcu_sched_qs(cpu);
  1804. force_quiescent_state(&rcu_sched_state, 0);
  1805. c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
  1806. }
  1807. if (per_cpu(rcu_bh_data, cpu).nxtlist) {
  1808. rcu_bh_qs(cpu);
  1809. force_quiescent_state(&rcu_bh_state, 0);
  1810. c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
  1811. }
  1812. /* If RCU callbacks are still pending, RCU still needs this CPU. */
  1813. if (c)
  1814. invoke_rcu_core();
  1815. return c;
  1816. }
  1817. #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */