rcutiny_plugin.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright (c) 2010 Linaro
  21. *
  22. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  23. */
  24. #ifdef CONFIG_TINY_PREEMPT_RCU
  25. #include <linux/delay.h>
  26. /* Global control variables for preemptible RCU. */
  27. struct rcu_preempt_ctrlblk {
  28. struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
  29. struct rcu_head **nexttail;
  30. /* Tasks blocked in a preemptible RCU */
  31. /* read-side critical section while an */
  32. /* preemptible-RCU grace period is in */
  33. /* progress must wait for a later grace */
  34. /* period. This pointer points to the */
  35. /* ->next pointer of the last task that */
  36. /* must wait for a later grace period, or */
  37. /* to &->rcb.rcucblist if there is no */
  38. /* such task. */
  39. struct list_head blkd_tasks;
  40. /* Tasks blocked in RCU read-side critical */
  41. /* section. Tasks are placed at the head */
  42. /* of this list and age towards the tail. */
  43. struct list_head *gp_tasks;
  44. /* Pointer to the first task blocking the */
  45. /* current grace period, or NULL if there */
  46. /* is not such task. */
  47. struct list_head *exp_tasks;
  48. /* Pointer to first task blocking the */
  49. /* current expedited grace period, or NULL */
  50. /* if there is no such task. If there */
  51. /* is no current expedited grace period, */
  52. /* then there cannot be any such task. */
  53. u8 gpnum; /* Current grace period. */
  54. u8 gpcpu; /* Last grace period blocked by the CPU. */
  55. u8 completed; /* Last grace period completed. */
  56. /* If all three are equal, RCU is idle. */
  57. };
  58. static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
  59. .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  60. .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  61. .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  62. .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
  63. };
  64. static int rcu_preempted_readers_exp(void);
  65. static void rcu_report_exp_done(void);
  66. /*
  67. * Return true if the CPU has not yet responded to the current grace period.
  68. */
  69. static int rcu_cpu_blocking_cur_gp(void)
  70. {
  71. return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
  72. }
  73. /*
  74. * Check for a running RCU reader. Because there is only one CPU,
  75. * there can be but one running RCU reader at a time. ;-)
  76. */
  77. static int rcu_preempt_running_reader(void)
  78. {
  79. return current->rcu_read_lock_nesting;
  80. }
  81. /*
  82. * Check for preempted RCU readers blocking any grace period.
  83. * If the caller needs a reliable answer, it must disable hard irqs.
  84. */
  85. static int rcu_preempt_blocked_readers_any(void)
  86. {
  87. return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
  88. }
  89. /*
  90. * Check for preempted RCU readers blocking the current grace period.
  91. * If the caller needs a reliable answer, it must disable hard irqs.
  92. */
  93. static int rcu_preempt_blocked_readers_cgp(void)
  94. {
  95. return rcu_preempt_ctrlblk.gp_tasks != NULL;
  96. }
  97. /*
  98. * Return true if another preemptible-RCU grace period is needed.
  99. */
  100. static int rcu_preempt_needs_another_gp(void)
  101. {
  102. return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
  103. }
  104. /*
  105. * Return true if a preemptible-RCU grace period is in progress.
  106. * The caller must disable hardirqs.
  107. */
  108. static int rcu_preempt_gp_in_progress(void)
  109. {
  110. return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
  111. }
  112. /*
  113. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  114. * that this just means that the task currently running on the CPU is
  115. * in a quiescent state. There might be any number of tasks blocked
  116. * while in an RCU read-side critical section.
  117. *
  118. * Unlike the other rcu_*_qs() functions, callers to this function
  119. * must disable irqs in order to protect the assignment to
  120. * ->rcu_read_unlock_special.
  121. *
  122. * Because this is a single-CPU implementation, the only way a grace
  123. * period can end is if the CPU is in a quiescent state. The reason is
  124. * that a blocked preemptible-RCU reader can exit its critical section
  125. * only if the CPU is running it at the time. Therefore, when the
  126. * last task blocking the current grace period exits its RCU read-side
  127. * critical section, neither the CPU nor blocked tasks will be stopping
  128. * the current grace period. (In contrast, SMP implementations
  129. * might have CPUs running in RCU read-side critical sections that
  130. * block later grace periods -- but this is not possible given only
  131. * one CPU.)
  132. */
  133. static void rcu_preempt_cpu_qs(void)
  134. {
  135. /* Record both CPU and task as having responded to current GP. */
  136. rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
  137. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  138. /*
  139. * If there is no GP, or if blocked readers are still blocking GP,
  140. * then there is nothing more to do.
  141. */
  142. if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp())
  143. return;
  144. /* Advance callbacks. */
  145. rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
  146. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
  147. rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
  148. /* If there are no blocked readers, next GP is done instantly. */
  149. if (!rcu_preempt_blocked_readers_any())
  150. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
  151. /* If there are done callbacks, make RCU_SOFTIRQ process them. */
  152. if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
  153. raise_softirq(RCU_SOFTIRQ);
  154. }
  155. /*
  156. * Start a new RCU grace period if warranted. Hard irqs must be disabled.
  157. */
  158. static void rcu_preempt_start_gp(void)
  159. {
  160. if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
  161. /* Official start of GP. */
  162. rcu_preempt_ctrlblk.gpnum++;
  163. /* Any blocked RCU readers block new GP. */
  164. if (rcu_preempt_blocked_readers_any())
  165. rcu_preempt_ctrlblk.gp_tasks =
  166. rcu_preempt_ctrlblk.blkd_tasks.next;
  167. /* If there is no running reader, CPU is done with GP. */
  168. if (!rcu_preempt_running_reader())
  169. rcu_preempt_cpu_qs();
  170. }
  171. }
  172. /*
  173. * We have entered the scheduler, and the current task might soon be
  174. * context-switched away from. If this task is in an RCU read-side
  175. * critical section, we will no longer be able to rely on the CPU to
  176. * record that fact, so we enqueue the task on the blkd_tasks list.
  177. * If the task started after the current grace period began, as recorded
  178. * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
  179. * before the element referenced by ->gp_tasks (or at the tail if
  180. * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
  181. * The task will dequeue itself when it exits the outermost enclosing
  182. * RCU read-side critical section. Therefore, the current grace period
  183. * cannot be permitted to complete until the ->gp_tasks pointer becomes
  184. * NULL.
  185. *
  186. * Caller must disable preemption.
  187. */
  188. void rcu_preempt_note_context_switch(void)
  189. {
  190. struct task_struct *t = current;
  191. unsigned long flags;
  192. local_irq_save(flags); /* must exclude scheduler_tick(). */
  193. if (rcu_preempt_running_reader() &&
  194. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  195. /* Possibly blocking in an RCU read-side critical section. */
  196. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  197. /*
  198. * If this CPU has already checked in, then this task
  199. * will hold up the next grace period rather than the
  200. * current grace period. Queue the task accordingly.
  201. * If the task is queued for the current grace period
  202. * (i.e., this CPU has not yet passed through a quiescent
  203. * state for the current grace period), then as long
  204. * as that task remains queued, the current grace period
  205. * cannot end.
  206. */
  207. list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
  208. if (rcu_cpu_blocking_cur_gp())
  209. rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
  210. }
  211. /*
  212. * Either we were not in an RCU read-side critical section to
  213. * begin with, or we have now recorded that critical section
  214. * globally. Either way, we can now note a quiescent state
  215. * for this CPU. Again, if we were in an RCU read-side critical
  216. * section, and if that critical section was blocking the current
  217. * grace period, then the fact that the task has been enqueued
  218. * means that current grace period continues to be blocked.
  219. */
  220. rcu_preempt_cpu_qs();
  221. local_irq_restore(flags);
  222. }
  223. /*
  224. * Tiny-preemptible RCU implementation for rcu_read_lock().
  225. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  226. * if we block.
  227. */
  228. void __rcu_read_lock(void)
  229. {
  230. current->rcu_read_lock_nesting++;
  231. barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
  232. }
  233. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  234. /*
  235. * Handle special cases during rcu_read_unlock(), such as needing to
  236. * notify RCU core processing or task having blocked during the RCU
  237. * read-side critical section.
  238. */
  239. static void rcu_read_unlock_special(struct task_struct *t)
  240. {
  241. int empty;
  242. int empty_exp;
  243. unsigned long flags;
  244. struct list_head *np;
  245. int special;
  246. /*
  247. * NMI handlers cannot block and cannot safely manipulate state.
  248. * They therefore cannot possibly be special, so just leave.
  249. */
  250. if (in_nmi())
  251. return;
  252. local_irq_save(flags);
  253. /*
  254. * If RCU core is waiting for this CPU to exit critical section,
  255. * let it know that we have done so.
  256. */
  257. special = t->rcu_read_unlock_special;
  258. if (special & RCU_READ_UNLOCK_NEED_QS)
  259. rcu_preempt_cpu_qs();
  260. /* Hardware IRQ handlers cannot block. */
  261. if (in_irq()) {
  262. local_irq_restore(flags);
  263. return;
  264. }
  265. /* Clean up if blocked during RCU read-side critical section. */
  266. if (special & RCU_READ_UNLOCK_BLOCKED) {
  267. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  268. /*
  269. * Remove this task from the ->blkd_tasks list and adjust
  270. * any pointers that might have been referencing it.
  271. */
  272. empty = !rcu_preempt_blocked_readers_cgp();
  273. empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
  274. np = t->rcu_node_entry.next;
  275. if (np == &rcu_preempt_ctrlblk.blkd_tasks)
  276. np = NULL;
  277. list_del(&t->rcu_node_entry);
  278. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
  279. rcu_preempt_ctrlblk.gp_tasks = np;
  280. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
  281. rcu_preempt_ctrlblk.exp_tasks = np;
  282. INIT_LIST_HEAD(&t->rcu_node_entry);
  283. /*
  284. * If this was the last task on the current list, and if
  285. * we aren't waiting on the CPU, report the quiescent state
  286. * and start a new grace period if needed.
  287. */
  288. if (!empty && !rcu_preempt_blocked_readers_cgp()) {
  289. rcu_preempt_cpu_qs();
  290. rcu_preempt_start_gp();
  291. }
  292. /*
  293. * If this was the last task on the expedited lists,
  294. * then we need wake up the waiting task.
  295. */
  296. if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
  297. rcu_report_exp_done();
  298. }
  299. local_irq_restore(flags);
  300. }
  301. /*
  302. * Tiny-preemptible RCU implementation for rcu_read_unlock().
  303. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  304. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  305. * invoke rcu_read_unlock_special() to clean up after a context switch
  306. * in an RCU read-side critical section and other special cases.
  307. */
  308. void __rcu_read_unlock(void)
  309. {
  310. struct task_struct *t = current;
  311. barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
  312. --t->rcu_read_lock_nesting;
  313. barrier(); /* decrement before load of ->rcu_read_unlock_special */
  314. if (t->rcu_read_lock_nesting == 0 &&
  315. unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  316. rcu_read_unlock_special(t);
  317. #ifdef CONFIG_PROVE_LOCKING
  318. WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
  319. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  320. }
  321. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  322. /*
  323. * Check for a quiescent state from the current CPU. When a task blocks,
  324. * the task is recorded in the rcu_preempt_ctrlblk structure, which is
  325. * checked elsewhere. This is called from the scheduling-clock interrupt.
  326. *
  327. * Caller must disable hard irqs.
  328. */
  329. static void rcu_preempt_check_callbacks(void)
  330. {
  331. struct task_struct *t = current;
  332. if (rcu_preempt_gp_in_progress() &&
  333. (!rcu_preempt_running_reader() ||
  334. !rcu_cpu_blocking_cur_gp()))
  335. rcu_preempt_cpu_qs();
  336. if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
  337. rcu_preempt_ctrlblk.rcb.donetail)
  338. raise_softirq(RCU_SOFTIRQ);
  339. if (rcu_preempt_gp_in_progress() &&
  340. rcu_cpu_blocking_cur_gp() &&
  341. rcu_preempt_running_reader())
  342. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  343. }
  344. /*
  345. * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
  346. * update, so this is invoked from __rcu_process_callbacks() to
  347. * handle that case. Of course, it is invoked for all flavors of
  348. * RCU, but RCU callbacks can appear only on one of the lists, and
  349. * neither ->nexttail nor ->donetail can possibly be NULL, so there
  350. * is no need for an explicit check.
  351. */
  352. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  353. {
  354. if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
  355. rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
  356. }
  357. /*
  358. * Process callbacks for preemptible RCU.
  359. */
  360. static void rcu_preempt_process_callbacks(void)
  361. {
  362. __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
  363. }
  364. /*
  365. * Queue a preemptible -RCU callback for invocation after a grace period.
  366. */
  367. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  368. {
  369. unsigned long flags;
  370. debug_rcu_head_queue(head);
  371. head->func = func;
  372. head->next = NULL;
  373. local_irq_save(flags);
  374. *rcu_preempt_ctrlblk.nexttail = head;
  375. rcu_preempt_ctrlblk.nexttail = &head->next;
  376. rcu_preempt_start_gp(); /* checks to see if GP needed. */
  377. local_irq_restore(flags);
  378. }
  379. EXPORT_SYMBOL_GPL(call_rcu);
  380. void rcu_barrier(void)
  381. {
  382. struct rcu_synchronize rcu;
  383. init_rcu_head_on_stack(&rcu.head);
  384. init_completion(&rcu.completion);
  385. /* Will wake me after RCU finished. */
  386. call_rcu(&rcu.head, wakeme_after_rcu);
  387. /* Wait for it. */
  388. wait_for_completion(&rcu.completion);
  389. destroy_rcu_head_on_stack(&rcu.head);
  390. }
  391. EXPORT_SYMBOL_GPL(rcu_barrier);
  392. /*
  393. * synchronize_rcu - wait until a grace period has elapsed.
  394. *
  395. * Control will return to the caller some time after a full grace
  396. * period has elapsed, in other words after all currently executing RCU
  397. * read-side critical sections have completed. RCU read-side critical
  398. * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  399. * and may be nested.
  400. */
  401. void synchronize_rcu(void)
  402. {
  403. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  404. if (!rcu_scheduler_active)
  405. return;
  406. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  407. WARN_ON_ONCE(rcu_preempt_running_reader());
  408. if (!rcu_preempt_blocked_readers_any())
  409. return;
  410. /* Once we get past the fastpath checks, same code as rcu_barrier(). */
  411. rcu_barrier();
  412. }
  413. EXPORT_SYMBOL_GPL(synchronize_rcu);
  414. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  415. static unsigned long sync_rcu_preempt_exp_count;
  416. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  417. /*
  418. * Return non-zero if there are any tasks in RCU read-side critical
  419. * sections blocking the current preemptible-RCU expedited grace period.
  420. * If there is no preemptible-RCU expedited grace period currently in
  421. * progress, returns zero unconditionally.
  422. */
  423. static int rcu_preempted_readers_exp(void)
  424. {
  425. return rcu_preempt_ctrlblk.exp_tasks != NULL;
  426. }
  427. /*
  428. * Report the exit from RCU read-side critical section for the last task
  429. * that queued itself during or before the current expedited preemptible-RCU
  430. * grace period.
  431. */
  432. static void rcu_report_exp_done(void)
  433. {
  434. wake_up(&sync_rcu_preempt_exp_wq);
  435. }
  436. /*
  437. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  438. * is to rely in the fact that there is but one CPU, and that it is
  439. * illegal for a task to invoke synchronize_rcu_expedited() while in a
  440. * preemptible-RCU read-side critical section. Therefore, any such
  441. * critical sections must correspond to blocked tasks, which must therefore
  442. * be on the ->blkd_tasks list. So just record the current head of the
  443. * list in the ->exp_tasks pointer, and wait for all tasks including and
  444. * after the task pointed to by ->exp_tasks to drain.
  445. */
  446. void synchronize_rcu_expedited(void)
  447. {
  448. unsigned long flags;
  449. struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
  450. unsigned long snap;
  451. barrier(); /* ensure prior action seen before grace period. */
  452. WARN_ON_ONCE(rcu_preempt_running_reader());
  453. /*
  454. * Acquire lock so that there is only one preemptible RCU grace
  455. * period in flight. Of course, if someone does the expedited
  456. * grace period for us while we are acquiring the lock, just leave.
  457. */
  458. snap = sync_rcu_preempt_exp_count + 1;
  459. mutex_lock(&sync_rcu_preempt_exp_mutex);
  460. if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
  461. goto unlock_mb_ret; /* Others did our work for us. */
  462. local_irq_save(flags);
  463. /*
  464. * All RCU readers have to already be on blkd_tasks because
  465. * we cannot legally be executing in an RCU read-side critical
  466. * section.
  467. */
  468. /* Snapshot current head of ->blkd_tasks list. */
  469. rpcp->exp_tasks = rpcp->blkd_tasks.next;
  470. if (rpcp->exp_tasks == &rpcp->blkd_tasks)
  471. rpcp->exp_tasks = NULL;
  472. local_irq_restore(flags);
  473. /* Wait for tail of ->blkd_tasks list to drain. */
  474. if (rcu_preempted_readers_exp())
  475. wait_event(sync_rcu_preempt_exp_wq,
  476. !rcu_preempted_readers_exp());
  477. /* Clean up and exit. */
  478. barrier(); /* ensure expedited GP seen before counter increment. */
  479. sync_rcu_preempt_exp_count++;
  480. unlock_mb_ret:
  481. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  482. barrier(); /* ensure subsequent action seen after grace period. */
  483. }
  484. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  485. /*
  486. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  487. */
  488. int rcu_preempt_needs_cpu(void)
  489. {
  490. if (!rcu_preempt_running_reader())
  491. rcu_preempt_cpu_qs();
  492. return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
  493. }
  494. /*
  495. * Check for a task exiting while in a preemptible -RCU read-side
  496. * critical section, clean up if so. No need to issue warnings,
  497. * as debug_check_no_locks_held() already does this if lockdep
  498. * is enabled.
  499. */
  500. void exit_rcu(void)
  501. {
  502. struct task_struct *t = current;
  503. if (t->rcu_read_lock_nesting == 0)
  504. return;
  505. t->rcu_read_lock_nesting = 1;
  506. rcu_read_unlock();
  507. }
  508. #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  509. /*
  510. * Because preemptible RCU does not exist, it never has any callbacks
  511. * to check.
  512. */
  513. static void rcu_preempt_check_callbacks(void)
  514. {
  515. }
  516. /*
  517. * Because preemptible RCU does not exist, it never has any callbacks
  518. * to remove.
  519. */
  520. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  521. {
  522. }
  523. /*
  524. * Because preemptible RCU does not exist, it never has any callbacks
  525. * to process.
  526. */
  527. static void rcu_preempt_process_callbacks(void)
  528. {
  529. }
  530. #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
  531. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  532. #include <linux/kernel_stat.h>
  533. /*
  534. * During boot, we forgive RCU lockdep issues. After this function is
  535. * invoked, we start taking RCU lockdep issues seriously.
  536. */
  537. void rcu_scheduler_starting(void)
  538. {
  539. WARN_ON(nr_context_switches() > 0);
  540. rcu_scheduler_active = 1;
  541. }
  542. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */