rcutree_plugin.h 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  3. * Internal non-public definitions that provide either classic
  4. * or preemptable semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright Red Hat, 2009
  21. * Copyright IBM Corporation, 2009
  22. *
  23. * Author: Ingo Molnar <mingo@elte.hu>
  24. * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/stop_machine.h>
  28. /*
  29. * Check the RCU kernel configuration parameters and print informative
  30. * messages about anything out of the ordinary. If you like #ifdef, you
  31. * will love this function.
  32. */
  33. static void __init rcu_bootup_announce_oddness(void)
  34. {
  35. #ifdef CONFIG_RCU_TRACE
  36. printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
  37. #endif
  38. #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
  39. printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
  40. CONFIG_RCU_FANOUT);
  41. #endif
  42. #ifdef CONFIG_RCU_FANOUT_EXACT
  43. printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
  44. #endif
  45. #ifdef CONFIG_RCU_FAST_NO_HZ
  46. printk(KERN_INFO
  47. "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
  48. #endif
  49. #ifdef CONFIG_PROVE_RCU
  50. printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
  51. #endif
  52. #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
  53. printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
  54. #endif
  55. #ifndef CONFIG_RCU_CPU_STALL_DETECTOR
  56. printk(KERN_INFO
  57. "\tRCU-based detection of stalled CPUs is disabled.\n");
  58. #endif
  59. #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
  60. printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
  61. #endif
  62. #if NUM_RCU_LVL_4 != 0
  63. printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
  64. #endif
  65. }
  66. #ifdef CONFIG_TREE_PREEMPT_RCU
  67. struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
  68. DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
  69. static int rcu_preempted_readers_exp(struct rcu_node *rnp);
  70. /*
  71. * Tell them what RCU they are running.
  72. */
  73. static void __init rcu_bootup_announce(void)
  74. {
  75. printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
  76. rcu_bootup_announce_oddness();
  77. }
  78. /*
  79. * Return the number of RCU-preempt batches processed thus far
  80. * for debug and statistics.
  81. */
  82. long rcu_batches_completed_preempt(void)
  83. {
  84. return rcu_preempt_state.completed;
  85. }
  86. EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
  87. /*
  88. * Return the number of RCU batches processed thus far for debug & stats.
  89. */
  90. long rcu_batches_completed(void)
  91. {
  92. return rcu_batches_completed_preempt();
  93. }
  94. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  95. /*
  96. * Force a quiescent state for preemptible RCU.
  97. */
  98. void rcu_force_quiescent_state(void)
  99. {
  100. force_quiescent_state(&rcu_preempt_state, 0);
  101. }
  102. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  103. /*
  104. * Record a preemptable-RCU quiescent state for the specified CPU. Note
  105. * that this just means that the task currently running on the CPU is
  106. * not in a quiescent state. There might be any number of tasks blocked
  107. * while in an RCU read-side critical section.
  108. *
  109. * Unlike the other rcu_*_qs() functions, callers to this function
  110. * must disable irqs in order to protect the assignment to
  111. * ->rcu_read_unlock_special.
  112. */
  113. static void rcu_preempt_qs(int cpu)
  114. {
  115. struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
  116. rdp->passed_quiesc_completed = rdp->gpnum - 1;
  117. barrier();
  118. rdp->passed_quiesc = 1;
  119. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  120. }
  121. /*
  122. * We have entered the scheduler, and the current task might soon be
  123. * context-switched away from. If this task is in an RCU read-side
  124. * critical section, we will no longer be able to rely on the CPU to
  125. * record that fact, so we enqueue the task on the appropriate entry
  126. * of the blocked_tasks[] array. The task will dequeue itself when
  127. * it exits the outermost enclosing RCU read-side critical section.
  128. * Therefore, the current grace period cannot be permitted to complete
  129. * until the blocked_tasks[] entry indexed by the low-order bit of
  130. * rnp->gpnum empties.
  131. *
  132. * Caller must disable preemption.
  133. */
  134. static void rcu_preempt_note_context_switch(int cpu)
  135. {
  136. struct task_struct *t = current;
  137. unsigned long flags;
  138. int phase;
  139. struct rcu_data *rdp;
  140. struct rcu_node *rnp;
  141. if (t->rcu_read_lock_nesting &&
  142. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  143. /* Possibly blocking in an RCU read-side critical section. */
  144. rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
  145. rnp = rdp->mynode;
  146. raw_spin_lock_irqsave(&rnp->lock, flags);
  147. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  148. t->rcu_blocked_node = rnp;
  149. /*
  150. * If this CPU has already checked in, then this task
  151. * will hold up the next grace period rather than the
  152. * current grace period. Queue the task accordingly.
  153. * If the task is queued for the current grace period
  154. * (i.e., this CPU has not yet passed through a quiescent
  155. * state for the current grace period), then as long
  156. * as that task remains queued, the current grace period
  157. * cannot end.
  158. *
  159. * But first, note that the current CPU must still be
  160. * on line!
  161. */
  162. WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
  163. WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
  164. phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
  165. list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
  166. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  167. }
  168. /*
  169. * Either we were not in an RCU read-side critical section to
  170. * begin with, or we have now recorded that critical section
  171. * globally. Either way, we can now note a quiescent state
  172. * for this CPU. Again, if we were in an RCU read-side critical
  173. * section, and if that critical section was blocking the current
  174. * grace period, then the fact that the task has been enqueued
  175. * means that we continue to block the current grace period.
  176. */
  177. local_irq_save(flags);
  178. rcu_preempt_qs(cpu);
  179. local_irq_restore(flags);
  180. }
  181. /*
  182. * Tree-preemptable RCU implementation for rcu_read_lock().
  183. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  184. * if we block.
  185. */
  186. void __rcu_read_lock(void)
  187. {
  188. current->rcu_read_lock_nesting++;
  189. barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
  190. }
  191. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  192. /*
  193. * Check for preempted RCU readers blocking the current grace period
  194. * for the specified rcu_node structure. If the caller needs a reliable
  195. * answer, it must hold the rcu_node's ->lock.
  196. */
  197. static int rcu_preempted_readers(struct rcu_node *rnp)
  198. {
  199. int phase = rnp->gpnum & 0x1;
  200. return !list_empty(&rnp->blocked_tasks[phase]) ||
  201. !list_empty(&rnp->blocked_tasks[phase + 2]);
  202. }
  203. /*
  204. * Record a quiescent state for all tasks that were previously queued
  205. * on the specified rcu_node structure and that were blocking the current
  206. * RCU grace period. The caller must hold the specified rnp->lock with
  207. * irqs disabled, and this lock is released upon return, but irqs remain
  208. * disabled.
  209. */
  210. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  211. __releases(rnp->lock)
  212. {
  213. unsigned long mask;
  214. struct rcu_node *rnp_p;
  215. if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
  216. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  217. return; /* Still need more quiescent states! */
  218. }
  219. rnp_p = rnp->parent;
  220. if (rnp_p == NULL) {
  221. /*
  222. * Either there is only one rcu_node in the tree,
  223. * or tasks were kicked up to root rcu_node due to
  224. * CPUs going offline.
  225. */
  226. rcu_report_qs_rsp(&rcu_preempt_state, flags);
  227. return;
  228. }
  229. /* Report up the rest of the hierarchy. */
  230. mask = rnp->grpmask;
  231. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  232. raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
  233. rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
  234. }
  235. /*
  236. * Handle special cases during rcu_read_unlock(), such as needing to
  237. * notify RCU core processing or task having blocked during the RCU
  238. * read-side critical section.
  239. */
  240. static void rcu_read_unlock_special(struct task_struct *t)
  241. {
  242. int empty;
  243. int empty_exp;
  244. unsigned long flags;
  245. struct rcu_node *rnp;
  246. int special;
  247. /* NMI handlers cannot block and cannot safely manipulate state. */
  248. if (in_nmi())
  249. return;
  250. local_irq_save(flags);
  251. /*
  252. * If RCU core is waiting for this CPU to exit critical section,
  253. * let it know that we have done so.
  254. */
  255. special = t->rcu_read_unlock_special;
  256. if (special & RCU_READ_UNLOCK_NEED_QS) {
  257. rcu_preempt_qs(smp_processor_id());
  258. }
  259. /* Hardware IRQ handlers cannot block. */
  260. if (in_irq()) {
  261. local_irq_restore(flags);
  262. return;
  263. }
  264. /* Clean up if blocked during RCU read-side critical section. */
  265. if (special & RCU_READ_UNLOCK_BLOCKED) {
  266. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  267. /*
  268. * Remove this task from the list it blocked on. The
  269. * task can migrate while we acquire the lock, but at
  270. * most one time. So at most two passes through loop.
  271. */
  272. for (;;) {
  273. rnp = t->rcu_blocked_node;
  274. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  275. if (rnp == t->rcu_blocked_node)
  276. break;
  277. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  278. }
  279. empty = !rcu_preempted_readers(rnp);
  280. empty_exp = !rcu_preempted_readers_exp(rnp);
  281. smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
  282. list_del_init(&t->rcu_node_entry);
  283. t->rcu_blocked_node = NULL;
  284. /*
  285. * If this was the last task on the current list, and if
  286. * we aren't waiting on any CPUs, report the quiescent state.
  287. * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
  288. */
  289. if (empty)
  290. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  291. else
  292. rcu_report_unblock_qs_rnp(rnp, flags);
  293. /*
  294. * If this was the last task on the expedited lists,
  295. * then we need to report up the rcu_node hierarchy.
  296. */
  297. if (!empty_exp && !rcu_preempted_readers_exp(rnp))
  298. rcu_report_exp_rnp(&rcu_preempt_state, rnp);
  299. } else {
  300. local_irq_restore(flags);
  301. }
  302. }
  303. /*
  304. * Tree-preemptable RCU implementation for rcu_read_unlock().
  305. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  306. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  307. * invoke rcu_read_unlock_special() to clean up after a context switch
  308. * in an RCU read-side critical section and other special cases.
  309. */
  310. void __rcu_read_unlock(void)
  311. {
  312. struct task_struct *t = current;
  313. barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
  314. --t->rcu_read_lock_nesting;
  315. barrier(); /* decrement before load of ->rcu_read_unlock_special */
  316. if (t->rcu_read_lock_nesting == 0 &&
  317. unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  318. rcu_read_unlock_special(t);
  319. #ifdef CONFIG_PROVE_LOCKING
  320. WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
  321. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  322. }
  323. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  324. #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
  325. #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
  326. /*
  327. * Dump detailed information for all tasks blocking the current RCU
  328. * grace period on the specified rcu_node structure.
  329. */
  330. static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
  331. {
  332. unsigned long flags;
  333. struct list_head *lp;
  334. int phase;
  335. struct task_struct *t;
  336. if (rcu_preempted_readers(rnp)) {
  337. raw_spin_lock_irqsave(&rnp->lock, flags);
  338. phase = rnp->gpnum & 0x1;
  339. lp = &rnp->blocked_tasks[phase];
  340. list_for_each_entry(t, lp, rcu_node_entry)
  341. sched_show_task(t);
  342. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  343. }
  344. }
  345. /*
  346. * Dump detailed information for all tasks blocking the current RCU
  347. * grace period.
  348. */
  349. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  350. {
  351. struct rcu_node *rnp = rcu_get_root(rsp);
  352. rcu_print_detail_task_stall_rnp(rnp);
  353. rcu_for_each_leaf_node(rsp, rnp)
  354. rcu_print_detail_task_stall_rnp(rnp);
  355. }
  356. #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  357. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  358. {
  359. }
  360. #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
  361. /*
  362. * Scan the current list of tasks blocked within RCU read-side critical
  363. * sections, printing out the tid of each.
  364. */
  365. static void rcu_print_task_stall(struct rcu_node *rnp)
  366. {
  367. struct list_head *lp;
  368. int phase;
  369. struct task_struct *t;
  370. if (rcu_preempted_readers(rnp)) {
  371. phase = rnp->gpnum & 0x1;
  372. lp = &rnp->blocked_tasks[phase];
  373. list_for_each_entry(t, lp, rcu_node_entry)
  374. printk(" P%d", t->pid);
  375. }
  376. }
  377. /*
  378. * Suppress preemptible RCU's CPU stall warnings by pushing the
  379. * time of the next stall-warning message comfortably far into the
  380. * future.
  381. */
  382. static void rcu_preempt_stall_reset(void)
  383. {
  384. rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
  385. }
  386. #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
  387. /*
  388. * Check that the list of blocked tasks for the newly completed grace
  389. * period is in fact empty. It is a serious bug to complete a grace
  390. * period that still has RCU readers blocked! This function must be
  391. * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
  392. * must be held by the caller.
  393. */
  394. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  395. {
  396. WARN_ON_ONCE(rcu_preempted_readers(rnp));
  397. WARN_ON_ONCE(rnp->qsmask);
  398. }
  399. #ifdef CONFIG_HOTPLUG_CPU
  400. /*
  401. * Handle tasklist migration for case in which all CPUs covered by the
  402. * specified rcu_node have gone offline. Move them up to the root
  403. * rcu_node. The reason for not just moving them to the immediate
  404. * parent is to remove the need for rcu_read_unlock_special() to
  405. * make more than two attempts to acquire the target rcu_node's lock.
  406. * Returns true if there were tasks blocking the current RCU grace
  407. * period.
  408. *
  409. * Returns 1 if there was previously a task blocking the current grace
  410. * period on the specified rcu_node structure.
  411. *
  412. * The caller must hold rnp->lock with irqs disabled.
  413. */
  414. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  415. struct rcu_node *rnp,
  416. struct rcu_data *rdp)
  417. {
  418. int i;
  419. struct list_head *lp;
  420. struct list_head *lp_root;
  421. int retval = 0;
  422. struct rcu_node *rnp_root = rcu_get_root(rsp);
  423. struct task_struct *tp;
  424. if (rnp == rnp_root) {
  425. WARN_ONCE(1, "Last CPU thought to be offlined?");
  426. return 0; /* Shouldn't happen: at least one CPU online. */
  427. }
  428. WARN_ON_ONCE(rnp != rdp->mynode &&
  429. (!list_empty(&rnp->blocked_tasks[0]) ||
  430. !list_empty(&rnp->blocked_tasks[1]) ||
  431. !list_empty(&rnp->blocked_tasks[2]) ||
  432. !list_empty(&rnp->blocked_tasks[3])));
  433. /*
  434. * Move tasks up to root rcu_node. Rely on the fact that the
  435. * root rcu_node can be at most one ahead of the rest of the
  436. * rcu_nodes in terms of gp_num value. This fact allows us to
  437. * move the blocked_tasks[] array directly, element by element.
  438. */
  439. if (rcu_preempted_readers(rnp))
  440. retval |= RCU_OFL_TASKS_NORM_GP;
  441. if (rcu_preempted_readers_exp(rnp))
  442. retval |= RCU_OFL_TASKS_EXP_GP;
  443. for (i = 0; i < 4; i++) {
  444. lp = &rnp->blocked_tasks[i];
  445. lp_root = &rnp_root->blocked_tasks[i];
  446. while (!list_empty(lp)) {
  447. tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
  448. raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
  449. list_del(&tp->rcu_node_entry);
  450. tp->rcu_blocked_node = rnp_root;
  451. list_add(&tp->rcu_node_entry, lp_root);
  452. raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
  453. }
  454. }
  455. return retval;
  456. }
  457. /*
  458. * Do CPU-offline processing for preemptable RCU.
  459. */
  460. static void rcu_preempt_offline_cpu(int cpu)
  461. {
  462. __rcu_offline_cpu(cpu, &rcu_preempt_state);
  463. }
  464. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  465. /*
  466. * Check for a quiescent state from the current CPU. When a task blocks,
  467. * the task is recorded in the corresponding CPU's rcu_node structure,
  468. * which is checked elsewhere.
  469. *
  470. * Caller must disable hard irqs.
  471. */
  472. static void rcu_preempt_check_callbacks(int cpu)
  473. {
  474. struct task_struct *t = current;
  475. if (t->rcu_read_lock_nesting == 0) {
  476. rcu_preempt_qs(cpu);
  477. return;
  478. }
  479. if (per_cpu(rcu_preempt_data, cpu).qs_pending)
  480. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  481. }
  482. /*
  483. * Process callbacks for preemptable RCU.
  484. */
  485. static void rcu_preempt_process_callbacks(void)
  486. {
  487. __rcu_process_callbacks(&rcu_preempt_state,
  488. &__get_cpu_var(rcu_preempt_data));
  489. }
  490. /*
  491. * Queue a preemptable-RCU callback for invocation after a grace period.
  492. */
  493. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  494. {
  495. __call_rcu(head, func, &rcu_preempt_state);
  496. }
  497. EXPORT_SYMBOL_GPL(call_rcu);
  498. /**
  499. * synchronize_rcu - wait until a grace period has elapsed.
  500. *
  501. * Control will return to the caller some time after a full grace
  502. * period has elapsed, in other words after all currently executing RCU
  503. * read-side critical sections have completed. Note, however, that
  504. * upon return from synchronize_rcu(), the caller might well be executing
  505. * concurrently with new RCU read-side critical sections that began while
  506. * synchronize_rcu() was waiting. RCU read-side critical sections are
  507. * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
  508. */
  509. void synchronize_rcu(void)
  510. {
  511. struct rcu_synchronize rcu;
  512. if (!rcu_scheduler_active)
  513. return;
  514. init_rcu_head_on_stack(&rcu.head);
  515. init_completion(&rcu.completion);
  516. /* Will wake me after RCU finished. */
  517. call_rcu(&rcu.head, wakeme_after_rcu);
  518. /* Wait for it. */
  519. wait_for_completion(&rcu.completion);
  520. destroy_rcu_head_on_stack(&rcu.head);
  521. }
  522. EXPORT_SYMBOL_GPL(synchronize_rcu);
  523. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  524. static long sync_rcu_preempt_exp_count;
  525. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  526. /*
  527. * Return non-zero if there are any tasks in RCU read-side critical
  528. * sections blocking the current preemptible-RCU expedited grace period.
  529. * If there is no preemptible-RCU expedited grace period currently in
  530. * progress, returns zero unconditionally.
  531. */
  532. static int rcu_preempted_readers_exp(struct rcu_node *rnp)
  533. {
  534. return !list_empty(&rnp->blocked_tasks[2]) ||
  535. !list_empty(&rnp->blocked_tasks[3]);
  536. }
  537. /*
  538. * return non-zero if there is no RCU expedited grace period in progress
  539. * for the specified rcu_node structure, in other words, if all CPUs and
  540. * tasks covered by the specified rcu_node structure have done their bit
  541. * for the current expedited grace period. Works only for preemptible
  542. * RCU -- other RCU implementation use other means.
  543. *
  544. * Caller must hold sync_rcu_preempt_exp_mutex.
  545. */
  546. static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  547. {
  548. return !rcu_preempted_readers_exp(rnp) &&
  549. ACCESS_ONCE(rnp->expmask) == 0;
  550. }
  551. /*
  552. * Report the exit from RCU read-side critical section for the last task
  553. * that queued itself during or before the current expedited preemptible-RCU
  554. * grace period. This event is reported either to the rcu_node structure on
  555. * which the task was queued or to one of that rcu_node structure's ancestors,
  556. * recursively up the tree. (Calm down, calm down, we do the recursion
  557. * iteratively!)
  558. *
  559. * Caller must hold sync_rcu_preempt_exp_mutex.
  560. */
  561. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
  562. {
  563. unsigned long flags;
  564. unsigned long mask;
  565. raw_spin_lock_irqsave(&rnp->lock, flags);
  566. for (;;) {
  567. if (!sync_rcu_preempt_exp_done(rnp))
  568. break;
  569. if (rnp->parent == NULL) {
  570. wake_up(&sync_rcu_preempt_exp_wq);
  571. break;
  572. }
  573. mask = rnp->grpmask;
  574. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  575. rnp = rnp->parent;
  576. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  577. rnp->expmask &= ~mask;
  578. }
  579. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  580. }
  581. /*
  582. * Snapshot the tasks blocking the newly started preemptible-RCU expedited
  583. * grace period for the specified rcu_node structure. If there are no such
  584. * tasks, report it up the rcu_node hierarchy.
  585. *
  586. * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
  587. */
  588. static void
  589. sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
  590. {
  591. int must_wait;
  592. raw_spin_lock(&rnp->lock); /* irqs already disabled */
  593. list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
  594. list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
  595. must_wait = rcu_preempted_readers_exp(rnp);
  596. raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
  597. if (!must_wait)
  598. rcu_report_exp_rnp(rsp, rnp);
  599. }
  600. /*
  601. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  602. * is to invoke synchronize_sched_expedited() to push all the tasks to
  603. * the ->blocked_tasks[] lists, move all entries from the first set of
  604. * ->blocked_tasks[] lists to the second set, and finally wait for this
  605. * second set to drain.
  606. */
  607. void synchronize_rcu_expedited(void)
  608. {
  609. unsigned long flags;
  610. struct rcu_node *rnp;
  611. struct rcu_state *rsp = &rcu_preempt_state;
  612. long snap;
  613. int trycount = 0;
  614. smp_mb(); /* Caller's modifications seen first by other CPUs. */
  615. snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
  616. smp_mb(); /* Above access cannot bleed into critical section. */
  617. /*
  618. * Acquire lock, falling back to synchronize_rcu() if too many
  619. * lock-acquisition failures. Of course, if someone does the
  620. * expedited grace period for us, just leave.
  621. */
  622. while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
  623. if (trycount++ < 10)
  624. udelay(trycount * num_online_cpus());
  625. else {
  626. synchronize_rcu();
  627. return;
  628. }
  629. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  630. goto mb_ret; /* Others did our work for us. */
  631. }
  632. if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
  633. goto unlock_mb_ret; /* Others did our work for us. */
  634. /* force all RCU readers onto blocked_tasks[]. */
  635. synchronize_sched_expedited();
  636. raw_spin_lock_irqsave(&rsp->onofflock, flags);
  637. /* Initialize ->expmask for all non-leaf rcu_node structures. */
  638. rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
  639. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  640. rnp->expmask = rnp->qsmaskinit;
  641. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  642. }
  643. /* Snapshot current state of ->blocked_tasks[] lists. */
  644. rcu_for_each_leaf_node(rsp, rnp)
  645. sync_rcu_preempt_exp_init(rsp, rnp);
  646. if (NUM_RCU_NODES > 1)
  647. sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
  648. raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
  649. /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
  650. rnp = rcu_get_root(rsp);
  651. wait_event(sync_rcu_preempt_exp_wq,
  652. sync_rcu_preempt_exp_done(rnp));
  653. /* Clean up and exit. */
  654. smp_mb(); /* ensure expedited GP seen before counter increment. */
  655. ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
  656. unlock_mb_ret:
  657. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  658. mb_ret:
  659. smp_mb(); /* ensure subsequent action seen after grace period. */
  660. }
  661. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  662. /*
  663. * Check to see if there is any immediate preemptable-RCU-related work
  664. * to be done.
  665. */
  666. static int rcu_preempt_pending(int cpu)
  667. {
  668. return __rcu_pending(&rcu_preempt_state,
  669. &per_cpu(rcu_preempt_data, cpu));
  670. }
  671. /*
  672. * Does preemptable RCU need the CPU to stay out of dynticks mode?
  673. */
  674. static int rcu_preempt_needs_cpu(int cpu)
  675. {
  676. return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
  677. }
  678. /**
  679. * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  680. */
  681. void rcu_barrier(void)
  682. {
  683. _rcu_barrier(&rcu_preempt_state, call_rcu);
  684. }
  685. EXPORT_SYMBOL_GPL(rcu_barrier);
  686. /*
  687. * Initialize preemptable RCU's per-CPU data.
  688. */
  689. static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
  690. {
  691. rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
  692. }
  693. /*
  694. * Move preemptable RCU's callbacks from dying CPU to other online CPU.
  695. */
  696. static void rcu_preempt_send_cbs_to_online(void)
  697. {
  698. rcu_send_cbs_to_online(&rcu_preempt_state);
  699. }
  700. /*
  701. * Initialize preemptable RCU's state structures.
  702. */
  703. static void __init __rcu_init_preempt(void)
  704. {
  705. rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
  706. }
  707. /*
  708. * Check for a task exiting while in a preemptable-RCU read-side
  709. * critical section, clean up if so. No need to issue warnings,
  710. * as debug_check_no_locks_held() already does this if lockdep
  711. * is enabled.
  712. */
  713. void exit_rcu(void)
  714. {
  715. struct task_struct *t = current;
  716. if (t->rcu_read_lock_nesting == 0)
  717. return;
  718. t->rcu_read_lock_nesting = 1;
  719. rcu_read_unlock();
  720. }
  721. #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
  722. /*
  723. * Tell them what RCU they are running.
  724. */
  725. static void __init rcu_bootup_announce(void)
  726. {
  727. printk(KERN_INFO "Hierarchical RCU implementation.\n");
  728. rcu_bootup_announce_oddness();
  729. }
  730. /*
  731. * Return the number of RCU batches processed thus far for debug & stats.
  732. */
  733. long rcu_batches_completed(void)
  734. {
  735. return rcu_batches_completed_sched();
  736. }
  737. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  738. /*
  739. * Force a quiescent state for RCU, which, because there is no preemptible
  740. * RCU, becomes the same as rcu-sched.
  741. */
  742. void rcu_force_quiescent_state(void)
  743. {
  744. rcu_sched_force_quiescent_state();
  745. }
  746. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  747. /*
  748. * Because preemptable RCU does not exist, we never have to check for
  749. * CPUs being in quiescent states.
  750. */
  751. static void rcu_preempt_note_context_switch(int cpu)
  752. {
  753. }
  754. /*
  755. * Because preemptable RCU does not exist, there are never any preempted
  756. * RCU readers.
  757. */
  758. static int rcu_preempted_readers(struct rcu_node *rnp)
  759. {
  760. return 0;
  761. }
  762. #ifdef CONFIG_HOTPLUG_CPU
  763. /* Because preemptible RCU does not exist, no quieting of tasks. */
  764. static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
  765. {
  766. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  767. }
  768. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  769. #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
  770. /*
  771. * Because preemptable RCU does not exist, we never have to check for
  772. * tasks blocked within RCU read-side critical sections.
  773. */
  774. static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  775. {
  776. }
  777. /*
  778. * Because preemptable RCU does not exist, we never have to check for
  779. * tasks blocked within RCU read-side critical sections.
  780. */
  781. static void rcu_print_task_stall(struct rcu_node *rnp)
  782. {
  783. }
  784. /*
  785. * Because preemptible RCU does not exist, there is no need to suppress
  786. * its CPU stall warnings.
  787. */
  788. static void rcu_preempt_stall_reset(void)
  789. {
  790. }
  791. #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
  792. /*
  793. * Because there is no preemptable RCU, there can be no readers blocked,
  794. * so there is no need to check for blocked tasks. So check only for
  795. * bogus qsmask values.
  796. */
  797. static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  798. {
  799. WARN_ON_ONCE(rnp->qsmask);
  800. }
  801. #ifdef CONFIG_HOTPLUG_CPU
  802. /*
  803. * Because preemptable RCU does not exist, it never needs to migrate
  804. * tasks that were blocked within RCU read-side critical sections, and
  805. * such non-existent tasks cannot possibly have been blocking the current
  806. * grace period.
  807. */
  808. static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
  809. struct rcu_node *rnp,
  810. struct rcu_data *rdp)
  811. {
  812. return 0;
  813. }
  814. /*
  815. * Because preemptable RCU does not exist, it never needs CPU-offline
  816. * processing.
  817. */
  818. static void rcu_preempt_offline_cpu(int cpu)
  819. {
  820. }
  821. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  822. /*
  823. * Because preemptable RCU does not exist, it never has any callbacks
  824. * to check.
  825. */
  826. static void rcu_preempt_check_callbacks(int cpu)
  827. {
  828. }
  829. /*
  830. * Because preemptable RCU does not exist, it never has any callbacks
  831. * to process.
  832. */
  833. static void rcu_preempt_process_callbacks(void)
  834. {
  835. }
  836. /*
  837. * Wait for an rcu-preempt grace period, but make it happen quickly.
  838. * But because preemptable RCU does not exist, map to rcu-sched.
  839. */
  840. void synchronize_rcu_expedited(void)
  841. {
  842. synchronize_sched_expedited();
  843. }
  844. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  845. #ifdef CONFIG_HOTPLUG_CPU
  846. /*
  847. * Because preemptable RCU does not exist, there is never any need to
  848. * report on tasks preempted in RCU read-side critical sections during
  849. * expedited RCU grace periods.
  850. */
  851. static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
  852. {
  853. return;
  854. }
  855. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  856. /*
  857. * Because preemptable RCU does not exist, it never has any work to do.
  858. */
  859. static int rcu_preempt_pending(int cpu)
  860. {
  861. return 0;
  862. }
  863. /*
  864. * Because preemptable RCU does not exist, it never needs any CPU.
  865. */
  866. static int rcu_preempt_needs_cpu(int cpu)
  867. {
  868. return 0;
  869. }
  870. /*
  871. * Because preemptable RCU does not exist, rcu_barrier() is just
  872. * another name for rcu_barrier_sched().
  873. */
  874. void rcu_barrier(void)
  875. {
  876. rcu_barrier_sched();
  877. }
  878. EXPORT_SYMBOL_GPL(rcu_barrier);
  879. /*
  880. * Because preemptable RCU does not exist, there is no per-CPU
  881. * data to initialize.
  882. */
  883. static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
  884. {
  885. }
  886. /*
  887. * Because there is no preemptable RCU, there are no callbacks to move.
  888. */
  889. static void rcu_preempt_send_cbs_to_online(void)
  890. {
  891. }
  892. /*
  893. * Because preemptable RCU does not exist, it need not be initialized.
  894. */
  895. static void __init __rcu_init_preempt(void)
  896. {
  897. }
  898. #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
  899. #ifndef CONFIG_SMP
  900. void synchronize_sched_expedited(void)
  901. {
  902. cond_resched();
  903. }
  904. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  905. #else /* #ifndef CONFIG_SMP */
  906. static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
  907. static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
  908. static int synchronize_sched_expedited_cpu_stop(void *data)
  909. {
  910. /*
  911. * There must be a full memory barrier on each affected CPU
  912. * between the time that try_stop_cpus() is called and the
  913. * time that it returns.
  914. *
  915. * In the current initial implementation of cpu_stop, the
  916. * above condition is already met when the control reaches
  917. * this point and the following smp_mb() is not strictly
  918. * necessary. Do smp_mb() anyway for documentation and
  919. * robustness against future implementation changes.
  920. */
  921. smp_mb(); /* See above comment block. */
  922. return 0;
  923. }
  924. /*
  925. * Wait for an rcu-sched grace period to elapse, but use "big hammer"
  926. * approach to force grace period to end quickly. This consumes
  927. * significant time on all CPUs, and is thus not recommended for
  928. * any sort of common-case code.
  929. *
  930. * Note that it is illegal to call this function while holding any
  931. * lock that is acquired by a CPU-hotplug notifier. Failing to
  932. * observe this restriction will result in deadlock.
  933. *
  934. * This implementation can be thought of as an application of ticket
  935. * locking to RCU, with sync_sched_expedited_started and
  936. * sync_sched_expedited_done taking on the roles of the halves
  937. * of the ticket-lock word. Each task atomically increments
  938. * sync_sched_expedited_started upon entry, snapshotting the old value,
  939. * then attempts to stop all the CPUs. If this succeeds, then each
  940. * CPU will have executed a context switch, resulting in an RCU-sched
  941. * grace period. We are then done, so we use atomic_cmpxchg() to
  942. * update sync_sched_expedited_done to match our snapshot -- but
  943. * only if someone else has not already advanced past our snapshot.
  944. *
  945. * On the other hand, if try_stop_cpus() fails, we check the value
  946. * of sync_sched_expedited_done. If it has advanced past our
  947. * initial snapshot, then someone else must have forced a grace period
  948. * some time after we took our snapshot. In this case, our work is
  949. * done for us, and we can simply return. Otherwise, we try again,
  950. * but keep our initial snapshot for purposes of checking for someone
  951. * doing our work for us.
  952. *
  953. * If we fail too many times in a row, we fall back to synchronize_sched().
  954. */
  955. void synchronize_sched_expedited(void)
  956. {
  957. int firstsnap, s, snap, trycount = 0;
  958. /* Note that atomic_inc_return() implies full memory barrier. */
  959. firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
  960. get_online_cpus();
  961. /*
  962. * Each pass through the following loop attempts to force a
  963. * context switch on each CPU.
  964. */
  965. while (try_stop_cpus(cpu_online_mask,
  966. synchronize_sched_expedited_cpu_stop,
  967. NULL) == -EAGAIN) {
  968. put_online_cpus();
  969. /* No joy, try again later. Or just synchronize_sched(). */
  970. if (trycount++ < 10)
  971. udelay(trycount * num_online_cpus());
  972. else {
  973. synchronize_sched();
  974. return;
  975. }
  976. /* Check to see if someone else did our work for us. */
  977. s = atomic_read(&sync_sched_expedited_done);
  978. if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
  979. smp_mb(); /* ensure test happens before caller kfree */
  980. return;
  981. }
  982. /*
  983. * Refetching sync_sched_expedited_started allows later
  984. * callers to piggyback on our grace period. We subtract
  985. * 1 to get the same token that the last incrementer got.
  986. * We retry after they started, so our grace period works
  987. * for them, and they started after our first try, so their
  988. * grace period works for us.
  989. */
  990. get_online_cpus();
  991. snap = atomic_read(&sync_sched_expedited_started) - 1;
  992. smp_mb(); /* ensure read is before try_stop_cpus(). */
  993. }
  994. /*
  995. * Everyone up to our most recent fetch is covered by our grace
  996. * period. Update the counter, but only if our work is still
  997. * relevant -- which it won't be if someone who started later
  998. * than we did beat us to the punch.
  999. */
  1000. do {
  1001. s = atomic_read(&sync_sched_expedited_done);
  1002. if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
  1003. smp_mb(); /* ensure test happens before caller kfree */
  1004. break;
  1005. }
  1006. } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
  1007. put_online_cpus();
  1008. }
  1009. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  1010. #endif /* #else #ifndef CONFIG_SMP */
  1011. #if !defined(CONFIG_RCU_FAST_NO_HZ)
  1012. /*
  1013. * Check to see if any future RCU-related work will need to be done
  1014. * by the current CPU, even if none need be done immediately, returning
  1015. * 1 if so. This function is part of the RCU implementation; it is -not-
  1016. * an exported member of the RCU API.
  1017. *
  1018. * Because we have preemptible RCU, just check whether this CPU needs
  1019. * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
  1020. * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
  1021. */
  1022. int rcu_needs_cpu(int cpu)
  1023. {
  1024. return rcu_needs_cpu_quick_check(cpu);
  1025. }
  1026. /*
  1027. * Check to see if we need to continue a callback-flush operations to
  1028. * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
  1029. * entry is not configured, so we never do need to.
  1030. */
  1031. static void rcu_needs_cpu_flush(void)
  1032. {
  1033. }
  1034. #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
  1035. #define RCU_NEEDS_CPU_FLUSHES 5
  1036. static DEFINE_PER_CPU(int, rcu_dyntick_drain);
  1037. static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
  1038. /*
  1039. * Check to see if any future RCU-related work will need to be done
  1040. * by the current CPU, even if none need be done immediately, returning
  1041. * 1 if so. This function is part of the RCU implementation; it is -not-
  1042. * an exported member of the RCU API.
  1043. *
  1044. * Because we are not supporting preemptible RCU, attempt to accelerate
  1045. * any current grace periods so that RCU no longer needs this CPU, but
  1046. * only if all other CPUs are already in dynticks-idle mode. This will
  1047. * allow the CPU cores to be powered down immediately, as opposed to after
  1048. * waiting many milliseconds for grace periods to elapse.
  1049. *
  1050. * Because it is not legal to invoke rcu_process_callbacks() with irqs
  1051. * disabled, we do one pass of force_quiescent_state(), then do a
  1052. * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
  1053. * The per-cpu rcu_dyntick_drain variable controls the sequencing.
  1054. */
  1055. int rcu_needs_cpu(int cpu)
  1056. {
  1057. int c = 0;
  1058. int snap;
  1059. int snap_nmi;
  1060. int thatcpu;
  1061. /* Check for being in the holdoff period. */
  1062. if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
  1063. return rcu_needs_cpu_quick_check(cpu);
  1064. /* Don't bother unless we are the last non-dyntick-idle CPU. */
  1065. for_each_online_cpu(thatcpu) {
  1066. if (thatcpu == cpu)
  1067. continue;
  1068. snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
  1069. snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
  1070. smp_mb(); /* Order sampling of snap with end of grace period. */
  1071. if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
  1072. per_cpu(rcu_dyntick_drain, cpu) = 0;
  1073. per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
  1074. return rcu_needs_cpu_quick_check(cpu);
  1075. }
  1076. }
  1077. /* Check and update the rcu_dyntick_drain sequencing. */
  1078. if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
  1079. /* First time through, initialize the counter. */
  1080. per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
  1081. } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
  1082. /* We have hit the limit, so time to give up. */
  1083. per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
  1084. return rcu_needs_cpu_quick_check(cpu);
  1085. }
  1086. /* Do one step pushing remaining RCU callbacks through. */
  1087. if (per_cpu(rcu_sched_data, cpu).nxtlist) {
  1088. rcu_sched_qs(cpu);
  1089. force_quiescent_state(&rcu_sched_state, 0);
  1090. c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
  1091. }
  1092. if (per_cpu(rcu_bh_data, cpu).nxtlist) {
  1093. rcu_bh_qs(cpu);
  1094. force_quiescent_state(&rcu_bh_state, 0);
  1095. c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
  1096. }
  1097. /* If RCU callbacks are still pending, RCU still needs this CPU. */
  1098. if (c)
  1099. raise_softirq(RCU_SOFTIRQ);
  1100. return c;
  1101. }
  1102. /*
  1103. * Check to see if we need to continue a callback-flush operations to
  1104. * allow the last CPU to enter dyntick-idle mode.
  1105. */
  1106. static void rcu_needs_cpu_flush(void)
  1107. {
  1108. int cpu = smp_processor_id();
  1109. unsigned long flags;
  1110. if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
  1111. return;
  1112. local_irq_save(flags);
  1113. (void)rcu_needs_cpu(cpu);
  1114. local_irq_restore(flags);
  1115. }
  1116. #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */