rcutiny_plugin.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright (c) 2010 Linaro
  21. *
  22. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  23. */
  24. #include <linux/kthread.h>
  25. #ifdef CONFIG_TINY_PREEMPT_RCU
  26. #include <linux/delay.h>
  27. /* Global control variables for preemptible RCU. */
  28. struct rcu_preempt_ctrlblk {
  29. struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
  30. struct rcu_head **nexttail;
  31. /* Tasks blocked in a preemptible RCU */
  32. /* read-side critical section while an */
  33. /* preemptible-RCU grace period is in */
  34. /* progress must wait for a later grace */
  35. /* period. This pointer points to the */
  36. /* ->next pointer of the last task that */
  37. /* must wait for a later grace period, or */
  38. /* to &->rcb.rcucblist if there is no */
  39. /* such task. */
  40. struct list_head blkd_tasks;
  41. /* Tasks blocked in RCU read-side critical */
  42. /* section. Tasks are placed at the head */
  43. /* of this list and age towards the tail. */
  44. struct list_head *gp_tasks;
  45. /* Pointer to the first task blocking the */
  46. /* current grace period, or NULL if there */
  47. /* is not such task. */
  48. struct list_head *exp_tasks;
  49. /* Pointer to first task blocking the */
  50. /* current expedited grace period, or NULL */
  51. /* if there is no such task. If there */
  52. /* is no current expedited grace period, */
  53. /* then there cannot be any such task. */
  54. u8 gpnum; /* Current grace period. */
  55. u8 gpcpu; /* Last grace period blocked by the CPU. */
  56. u8 completed; /* Last grace period completed. */
  57. /* If all three are equal, RCU is idle. */
  58. };
  59. static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
  60. .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  61. .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  62. .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  63. .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
  64. };
  65. static int rcu_preempted_readers_exp(void);
  66. static void rcu_report_exp_done(void);
  67. /*
  68. * Return true if the CPU has not yet responded to the current grace period.
  69. */
  70. static int rcu_cpu_blocking_cur_gp(void)
  71. {
  72. return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
  73. }
  74. /*
  75. * Check for a running RCU reader. Because there is only one CPU,
  76. * there can be but one running RCU reader at a time. ;-)
  77. */
  78. static int rcu_preempt_running_reader(void)
  79. {
  80. return current->rcu_read_lock_nesting;
  81. }
  82. /*
  83. * Check for preempted RCU readers blocking any grace period.
  84. * If the caller needs a reliable answer, it must disable hard irqs.
  85. */
  86. static int rcu_preempt_blocked_readers_any(void)
  87. {
  88. return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
  89. }
  90. /*
  91. * Check for preempted RCU readers blocking the current grace period.
  92. * If the caller needs a reliable answer, it must disable hard irqs.
  93. */
  94. static int rcu_preempt_blocked_readers_cgp(void)
  95. {
  96. return rcu_preempt_ctrlblk.gp_tasks != NULL;
  97. }
  98. /*
  99. * Return true if another preemptible-RCU grace period is needed.
  100. */
  101. static int rcu_preempt_needs_another_gp(void)
  102. {
  103. return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
  104. }
  105. /*
  106. * Return true if a preemptible-RCU grace period is in progress.
  107. * The caller must disable hardirqs.
  108. */
  109. static int rcu_preempt_gp_in_progress(void)
  110. {
  111. return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
  112. }
  113. /*
  114. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  115. * that this just means that the task currently running on the CPU is
  116. * in a quiescent state. There might be any number of tasks blocked
  117. * while in an RCU read-side critical section.
  118. *
  119. * Unlike the other rcu_*_qs() functions, callers to this function
  120. * must disable irqs in order to protect the assignment to
  121. * ->rcu_read_unlock_special.
  122. *
  123. * Because this is a single-CPU implementation, the only way a grace
  124. * period can end is if the CPU is in a quiescent state. The reason is
  125. * that a blocked preemptible-RCU reader can exit its critical section
  126. * only if the CPU is running it at the time. Therefore, when the
  127. * last task blocking the current grace period exits its RCU read-side
  128. * critical section, neither the CPU nor blocked tasks will be stopping
  129. * the current grace period. (In contrast, SMP implementations
  130. * might have CPUs running in RCU read-side critical sections that
  131. * block later grace periods -- but this is not possible given only
  132. * one CPU.)
  133. */
  134. static void rcu_preempt_cpu_qs(void)
  135. {
  136. /* Record both CPU and task as having responded to current GP. */
  137. rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
  138. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  139. /*
  140. * If there is no GP, or if blocked readers are still blocking GP,
  141. * then there is nothing more to do.
  142. */
  143. if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
  144. return;
  145. /* Advance callbacks. */
  146. rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
  147. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
  148. rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
  149. /* If there are no blocked readers, next GP is done instantly. */
  150. if (!rcu_preempt_blocked_readers_any())
  151. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
  152. /* If there are done callbacks, cause them to be invoked. */
  153. if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
  154. invoke_rcu_cbs();
  155. }
  156. /*
  157. * Start a new RCU grace period if warranted. Hard irqs must be disabled.
  158. */
  159. static void rcu_preempt_start_gp(void)
  160. {
  161. if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
  162. /* Official start of GP. */
  163. rcu_preempt_ctrlblk.gpnum++;
  164. /* Any blocked RCU readers block new GP. */
  165. if (rcu_preempt_blocked_readers_any())
  166. rcu_preempt_ctrlblk.gp_tasks =
  167. rcu_preempt_ctrlblk.blkd_tasks.next;
  168. /* If there is no running reader, CPU is done with GP. */
  169. if (!rcu_preempt_running_reader())
  170. rcu_preempt_cpu_qs();
  171. }
  172. }
  173. /*
  174. * We have entered the scheduler, and the current task might soon be
  175. * context-switched away from. If this task is in an RCU read-side
  176. * critical section, we will no longer be able to rely on the CPU to
  177. * record that fact, so we enqueue the task on the blkd_tasks list.
  178. * If the task started after the current grace period began, as recorded
  179. * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
  180. * before the element referenced by ->gp_tasks (or at the tail if
  181. * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
  182. * The task will dequeue itself when it exits the outermost enclosing
  183. * RCU read-side critical section. Therefore, the current grace period
  184. * cannot be permitted to complete until the ->gp_tasks pointer becomes
  185. * NULL.
  186. *
  187. * Caller must disable preemption.
  188. */
  189. void rcu_preempt_note_context_switch(void)
  190. {
  191. struct task_struct *t = current;
  192. unsigned long flags;
  193. local_irq_save(flags); /* must exclude scheduler_tick(). */
  194. if (rcu_preempt_running_reader() &&
  195. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  196. /* Possibly blocking in an RCU read-side critical section. */
  197. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  198. /*
  199. * If this CPU has already checked in, then this task
  200. * will hold up the next grace period rather than the
  201. * current grace period. Queue the task accordingly.
  202. * If the task is queued for the current grace period
  203. * (i.e., this CPU has not yet passed through a quiescent
  204. * state for the current grace period), then as long
  205. * as that task remains queued, the current grace period
  206. * cannot end.
  207. */
  208. list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
  209. if (rcu_cpu_blocking_cur_gp())
  210. rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
  211. }
  212. /*
  213. * Either we were not in an RCU read-side critical section to
  214. * begin with, or we have now recorded that critical section
  215. * globally. Either way, we can now note a quiescent state
  216. * for this CPU. Again, if we were in an RCU read-side critical
  217. * section, and if that critical section was blocking the current
  218. * grace period, then the fact that the task has been enqueued
  219. * means that current grace period continues to be blocked.
  220. */
  221. rcu_preempt_cpu_qs();
  222. local_irq_restore(flags);
  223. }
  224. /*
  225. * Tiny-preemptible RCU implementation for rcu_read_lock().
  226. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  227. * if we block.
  228. */
  229. void __rcu_read_lock(void)
  230. {
  231. current->rcu_read_lock_nesting++;
  232. barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
  233. }
  234. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  235. /*
  236. * Handle special cases during rcu_read_unlock(), such as needing to
  237. * notify RCU core processing or task having blocked during the RCU
  238. * read-side critical section.
  239. */
  240. static void rcu_read_unlock_special(struct task_struct *t)
  241. {
  242. int empty;
  243. int empty_exp;
  244. unsigned long flags;
  245. struct list_head *np;
  246. int special;
  247. /*
  248. * NMI handlers cannot block and cannot safely manipulate state.
  249. * They therefore cannot possibly be special, so just leave.
  250. */
  251. if (in_nmi())
  252. return;
  253. local_irq_save(flags);
  254. /*
  255. * If RCU core is waiting for this CPU to exit critical section,
  256. * let it know that we have done so.
  257. */
  258. special = t->rcu_read_unlock_special;
  259. if (special & RCU_READ_UNLOCK_NEED_QS)
  260. rcu_preempt_cpu_qs();
  261. /* Hardware IRQ handlers cannot block. */
  262. if (in_irq()) {
  263. local_irq_restore(flags);
  264. return;
  265. }
  266. /* Clean up if blocked during RCU read-side critical section. */
  267. if (special & RCU_READ_UNLOCK_BLOCKED) {
  268. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  269. /*
  270. * Remove this task from the ->blkd_tasks list and adjust
  271. * any pointers that might have been referencing it.
  272. */
  273. empty = !rcu_preempt_blocked_readers_cgp();
  274. empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
  275. np = t->rcu_node_entry.next;
  276. if (np == &rcu_preempt_ctrlblk.blkd_tasks)
  277. np = NULL;
  278. list_del(&t->rcu_node_entry);
  279. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
  280. rcu_preempt_ctrlblk.gp_tasks = np;
  281. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
  282. rcu_preempt_ctrlblk.exp_tasks = np;
  283. INIT_LIST_HEAD(&t->rcu_node_entry);
  284. /*
  285. * If this was the last task on the current list, and if
  286. * we aren't waiting on the CPU, report the quiescent state
  287. * and start a new grace period if needed.
  288. */
  289. if (!empty && !rcu_preempt_blocked_readers_cgp()) {
  290. rcu_preempt_cpu_qs();
  291. rcu_preempt_start_gp();
  292. }
  293. /*
  294. * If this was the last task on the expedited lists,
  295. * then we need wake up the waiting task.
  296. */
  297. if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
  298. rcu_report_exp_done();
  299. }
  300. local_irq_restore(flags);
  301. }
  302. /*
  303. * Tiny-preemptible RCU implementation for rcu_read_unlock().
  304. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  305. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  306. * invoke rcu_read_unlock_special() to clean up after a context switch
  307. * in an RCU read-side critical section and other special cases.
  308. */
  309. void __rcu_read_unlock(void)
  310. {
  311. struct task_struct *t = current;
  312. barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
  313. --t->rcu_read_lock_nesting;
  314. barrier(); /* decrement before load of ->rcu_read_unlock_special */
  315. if (t->rcu_read_lock_nesting == 0 &&
  316. unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  317. rcu_read_unlock_special(t);
  318. #ifdef CONFIG_PROVE_LOCKING
  319. WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
  320. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  321. }
  322. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  323. /*
  324. * Check for a quiescent state from the current CPU. When a task blocks,
  325. * the task is recorded in the rcu_preempt_ctrlblk structure, which is
  326. * checked elsewhere. This is called from the scheduling-clock interrupt.
  327. *
  328. * Caller must disable hard irqs.
  329. */
  330. static void rcu_preempt_check_callbacks(void)
  331. {
  332. struct task_struct *t = current;
  333. if (rcu_preempt_gp_in_progress() &&
  334. (!rcu_preempt_running_reader() ||
  335. !rcu_cpu_blocking_cur_gp()))
  336. rcu_preempt_cpu_qs();
  337. if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
  338. rcu_preempt_ctrlblk.rcb.donetail)
  339. invoke_rcu_cbs();
  340. if (rcu_preempt_gp_in_progress() &&
  341. rcu_cpu_blocking_cur_gp() &&
  342. rcu_preempt_running_reader())
  343. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  344. }
  345. /*
  346. * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
  347. * update, so this is invoked from rcu_process_callbacks() to
  348. * handle that case. Of course, it is invoked for all flavors of
  349. * RCU, but RCU callbacks can appear only on one of the lists, and
  350. * neither ->nexttail nor ->donetail can possibly be NULL, so there
  351. * is no need for an explicit check.
  352. */
  353. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  354. {
  355. if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
  356. rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
  357. }
  358. /*
  359. * Process callbacks for preemptible RCU.
  360. */
  361. static void rcu_preempt_process_callbacks(void)
  362. {
  363. rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
  364. }
  365. /*
  366. * Queue a preemptible -RCU callback for invocation after a grace period.
  367. */
  368. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  369. {
  370. unsigned long flags;
  371. debug_rcu_head_queue(head);
  372. head->func = func;
  373. head->next = NULL;
  374. local_irq_save(flags);
  375. *rcu_preempt_ctrlblk.nexttail = head;
  376. rcu_preempt_ctrlblk.nexttail = &head->next;
  377. rcu_preempt_start_gp(); /* checks to see if GP needed. */
  378. local_irq_restore(flags);
  379. }
  380. EXPORT_SYMBOL_GPL(call_rcu);
  381. void rcu_barrier(void)
  382. {
  383. struct rcu_synchronize rcu;
  384. init_rcu_head_on_stack(&rcu.head);
  385. init_completion(&rcu.completion);
  386. /* Will wake me after RCU finished. */
  387. call_rcu(&rcu.head, wakeme_after_rcu);
  388. /* Wait for it. */
  389. wait_for_completion(&rcu.completion);
  390. destroy_rcu_head_on_stack(&rcu.head);
  391. }
  392. EXPORT_SYMBOL_GPL(rcu_barrier);
  393. /*
  394. * synchronize_rcu - wait until a grace period has elapsed.
  395. *
  396. * Control will return to the caller some time after a full grace
  397. * period has elapsed, in other words after all currently executing RCU
  398. * read-side critical sections have completed. RCU read-side critical
  399. * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  400. * and may be nested.
  401. */
  402. void synchronize_rcu(void)
  403. {
  404. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  405. if (!rcu_scheduler_active)
  406. return;
  407. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  408. WARN_ON_ONCE(rcu_preempt_running_reader());
  409. if (!rcu_preempt_blocked_readers_any())
  410. return;
  411. /* Once we get past the fastpath checks, same code as rcu_barrier(). */
  412. rcu_barrier();
  413. }
  414. EXPORT_SYMBOL_GPL(synchronize_rcu);
  415. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  416. static unsigned long sync_rcu_preempt_exp_count;
  417. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  418. /*
  419. * Return non-zero if there are any tasks in RCU read-side critical
  420. * sections blocking the current preemptible-RCU expedited grace period.
  421. * If there is no preemptible-RCU expedited grace period currently in
  422. * progress, returns zero unconditionally.
  423. */
  424. static int rcu_preempted_readers_exp(void)
  425. {
  426. return rcu_preempt_ctrlblk.exp_tasks != NULL;
  427. }
  428. /*
  429. * Report the exit from RCU read-side critical section for the last task
  430. * that queued itself during or before the current expedited preemptible-RCU
  431. * grace period.
  432. */
  433. static void rcu_report_exp_done(void)
  434. {
  435. wake_up(&sync_rcu_preempt_exp_wq);
  436. }
  437. /*
  438. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  439. * is to rely in the fact that there is but one CPU, and that it is
  440. * illegal for a task to invoke synchronize_rcu_expedited() while in a
  441. * preemptible-RCU read-side critical section. Therefore, any such
  442. * critical sections must correspond to blocked tasks, which must therefore
  443. * be on the ->blkd_tasks list. So just record the current head of the
  444. * list in the ->exp_tasks pointer, and wait for all tasks including and
  445. * after the task pointed to by ->exp_tasks to drain.
  446. */
  447. void synchronize_rcu_expedited(void)
  448. {
  449. unsigned long flags;
  450. struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
  451. unsigned long snap;
  452. barrier(); /* ensure prior action seen before grace period. */
  453. WARN_ON_ONCE(rcu_preempt_running_reader());
  454. /*
  455. * Acquire lock so that there is only one preemptible RCU grace
  456. * period in flight. Of course, if someone does the expedited
  457. * grace period for us while we are acquiring the lock, just leave.
  458. */
  459. snap = sync_rcu_preempt_exp_count + 1;
  460. mutex_lock(&sync_rcu_preempt_exp_mutex);
  461. if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
  462. goto unlock_mb_ret; /* Others did our work for us. */
  463. local_irq_save(flags);
  464. /*
  465. * All RCU readers have to already be on blkd_tasks because
  466. * we cannot legally be executing in an RCU read-side critical
  467. * section.
  468. */
  469. /* Snapshot current head of ->blkd_tasks list. */
  470. rpcp->exp_tasks = rpcp->blkd_tasks.next;
  471. if (rpcp->exp_tasks == &rpcp->blkd_tasks)
  472. rpcp->exp_tasks = NULL;
  473. local_irq_restore(flags);
  474. /* Wait for tail of ->blkd_tasks list to drain. */
  475. if (rcu_preempted_readers_exp())
  476. wait_event(sync_rcu_preempt_exp_wq,
  477. !rcu_preempted_readers_exp());
  478. /* Clean up and exit. */
  479. barrier(); /* ensure expedited GP seen before counter increment. */
  480. sync_rcu_preempt_exp_count++;
  481. unlock_mb_ret:
  482. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  483. barrier(); /* ensure subsequent action seen after grace period. */
  484. }
  485. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  486. /*
  487. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  488. */
  489. int rcu_preempt_needs_cpu(void)
  490. {
  491. if (!rcu_preempt_running_reader())
  492. rcu_preempt_cpu_qs();
  493. return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
  494. }
  495. /*
  496. * Check for a task exiting while in a preemptible -RCU read-side
  497. * critical section, clean up if so. No need to issue warnings,
  498. * as debug_check_no_locks_held() already does this if lockdep
  499. * is enabled.
  500. */
  501. void exit_rcu(void)
  502. {
  503. struct task_struct *t = current;
  504. if (t->rcu_read_lock_nesting == 0)
  505. return;
  506. t->rcu_read_lock_nesting = 1;
  507. rcu_read_unlock();
  508. }
  509. #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  510. /*
  511. * Because preemptible RCU does not exist, it never has any callbacks
  512. * to check.
  513. */
  514. static void rcu_preempt_check_callbacks(void)
  515. {
  516. }
  517. /*
  518. * Because preemptible RCU does not exist, it never has any callbacks
  519. * to remove.
  520. */
  521. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  522. {
  523. }
  524. /*
  525. * Because preemptible RCU does not exist, it never has any callbacks
  526. * to process.
  527. */
  528. static void rcu_preempt_process_callbacks(void)
  529. {
  530. }
  531. #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
  532. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  533. #include <linux/kernel_stat.h>
  534. /*
  535. * During boot, we forgive RCU lockdep issues. After this function is
  536. * invoked, we start taking RCU lockdep issues seriously.
  537. */
  538. void __init rcu_scheduler_starting(void)
  539. {
  540. WARN_ON(nr_context_switches() > 0);
  541. rcu_scheduler_active = 1;
  542. }
  543. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */