rcutiny_plugin.h 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright (c) 2010 Linaro
  21. *
  22. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  23. */
  24. #include <linux/kthread.h>
  25. #include <linux/module.h>
  26. #include <linux/debugfs.h>
  27. #include <linux/seq_file.h>
  28. /* Global control variables for rcupdate callback mechanism. */
  29. struct rcu_ctrlblk {
  30. struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
  31. struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
  32. struct rcu_head **curtail; /* ->next pointer of last CB. */
  33. RCU_TRACE(long qlen); /* Number of pending CBs. */
  34. RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
  35. RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
  36. RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
  37. RCU_TRACE(char *name); /* Name of RCU type. */
  38. };
  39. /* Definition for rcupdate control block. */
  40. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  41. .donetail = &rcu_sched_ctrlblk.rcucblist,
  42. .curtail = &rcu_sched_ctrlblk.rcucblist,
  43. RCU_TRACE(.name = "rcu_sched")
  44. };
  45. static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  46. .donetail = &rcu_bh_ctrlblk.rcucblist,
  47. .curtail = &rcu_bh_ctrlblk.rcucblist,
  48. RCU_TRACE(.name = "rcu_bh")
  49. };
  50. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  51. int rcu_scheduler_active __read_mostly;
  52. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  53. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  54. #ifdef CONFIG_RCU_TRACE
  55. static void check_cpu_stall(struct rcu_ctrlblk *rcp)
  56. {
  57. unsigned long j;
  58. unsigned long js;
  59. if (rcu_cpu_stall_suppress)
  60. return;
  61. rcp->ticks_this_gp++;
  62. j = jiffies;
  63. js = rcp->jiffies_stall;
  64. if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
  65. pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
  66. rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
  67. jiffies - rcp->gp_start, rcp->qlen);
  68. dump_stack();
  69. }
  70. if (*rcp->curtail && ULONG_CMP_GE(j, js))
  71. rcp->jiffies_stall = jiffies +
  72. 3 * rcu_jiffies_till_stall_check() + 3;
  73. else if (ULONG_CMP_GE(j, js))
  74. rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
  75. }
  76. static void check_cpu_stall_preempt(void);
  77. #endif /* #ifdef CONFIG_RCU_TRACE */
  78. static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
  79. {
  80. #ifdef CONFIG_RCU_TRACE
  81. rcp->ticks_this_gp = 0;
  82. rcp->gp_start = jiffies;
  83. rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
  84. #endif /* #ifdef CONFIG_RCU_TRACE */
  85. }
  86. static void check_cpu_stalls(void)
  87. {
  88. RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
  89. RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
  90. RCU_TRACE(check_cpu_stall_preempt());
  91. }
  92. #ifdef CONFIG_TINY_PREEMPT_RCU
  93. #include <linux/delay.h>
  94. /* Global control variables for preemptible RCU. */
  95. struct rcu_preempt_ctrlblk {
  96. struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
  97. struct rcu_head **nexttail;
  98. /* Tasks blocked in a preemptible RCU */
  99. /* read-side critical section while an */
  100. /* preemptible-RCU grace period is in */
  101. /* progress must wait for a later grace */
  102. /* period. This pointer points to the */
  103. /* ->next pointer of the last task that */
  104. /* must wait for a later grace period, or */
  105. /* to &->rcb.rcucblist if there is no */
  106. /* such task. */
  107. struct list_head blkd_tasks;
  108. /* Tasks blocked in RCU read-side critical */
  109. /* section. Tasks are placed at the head */
  110. /* of this list and age towards the tail. */
  111. struct list_head *gp_tasks;
  112. /* Pointer to the first task blocking the */
  113. /* current grace period, or NULL if there */
  114. /* is no such task. */
  115. struct list_head *exp_tasks;
  116. /* Pointer to first task blocking the */
  117. /* current expedited grace period, or NULL */
  118. /* if there is no such task. If there */
  119. /* is no current expedited grace period, */
  120. /* then there cannot be any such task. */
  121. #ifdef CONFIG_RCU_BOOST
  122. struct list_head *boost_tasks;
  123. /* Pointer to first task that needs to be */
  124. /* priority-boosted, or NULL if no priority */
  125. /* boosting is needed. If there is no */
  126. /* current or expedited grace period, there */
  127. /* can be no such task. */
  128. #endif /* #ifdef CONFIG_RCU_BOOST */
  129. u8 gpnum; /* Current grace period. */
  130. u8 gpcpu; /* Last grace period blocked by the CPU. */
  131. u8 completed; /* Last grace period completed. */
  132. /* If all three are equal, RCU is idle. */
  133. #ifdef CONFIG_RCU_BOOST
  134. unsigned long boost_time; /* When to start boosting (jiffies) */
  135. #endif /* #ifdef CONFIG_RCU_BOOST */
  136. #ifdef CONFIG_RCU_TRACE
  137. unsigned long n_grace_periods;
  138. #ifdef CONFIG_RCU_BOOST
  139. unsigned long n_tasks_boosted;
  140. /* Total number of tasks boosted. */
  141. unsigned long n_exp_boosts;
  142. /* Number of tasks boosted for expedited GP. */
  143. unsigned long n_normal_boosts;
  144. /* Number of tasks boosted for normal GP. */
  145. unsigned long n_balk_blkd_tasks;
  146. /* Refused to boost: no blocked tasks. */
  147. unsigned long n_balk_exp_gp_tasks;
  148. /* Refused to boost: nothing blocking GP. */
  149. unsigned long n_balk_boost_tasks;
  150. /* Refused to boost: already boosting. */
  151. unsigned long n_balk_notyet;
  152. /* Refused to boost: not yet time. */
  153. unsigned long n_balk_nos;
  154. /* Refused to boost: not sure why, though. */
  155. /* This can happen due to race conditions. */
  156. #endif /* #ifdef CONFIG_RCU_BOOST */
  157. #endif /* #ifdef CONFIG_RCU_TRACE */
  158. };
  159. static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
  160. .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  161. .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  162. .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  163. .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
  164. RCU_TRACE(.rcb.name = "rcu_preempt")
  165. };
  166. static int rcu_preempted_readers_exp(void);
  167. static void rcu_report_exp_done(void);
  168. /*
  169. * Return true if the CPU has not yet responded to the current grace period.
  170. */
  171. static int rcu_cpu_blocking_cur_gp(void)
  172. {
  173. return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
  174. }
  175. /*
  176. * Check for a running RCU reader. Because there is only one CPU,
  177. * there can be but one running RCU reader at a time. ;-)
  178. *
  179. * Returns zero if there are no running readers. Returns a positive
  180. * number if there is at least one reader within its RCU read-side
  181. * critical section. Returns a negative number if an outermost reader
  182. * is in the midst of exiting from its RCU read-side critical section
  183. *
  184. * Returns zero if there are no running readers. Returns a positive
  185. * number if there is at least one reader within its RCU read-side
  186. * critical section. Returns a negative number if an outermost reader
  187. * is in the midst of exiting from its RCU read-side critical section.
  188. */
  189. static int rcu_preempt_running_reader(void)
  190. {
  191. return current->rcu_read_lock_nesting;
  192. }
  193. /*
  194. * Check for preempted RCU readers blocking any grace period.
  195. * If the caller needs a reliable answer, it must disable hard irqs.
  196. */
  197. static int rcu_preempt_blocked_readers_any(void)
  198. {
  199. return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
  200. }
  201. /*
  202. * Check for preempted RCU readers blocking the current grace period.
  203. * If the caller needs a reliable answer, it must disable hard irqs.
  204. */
  205. static int rcu_preempt_blocked_readers_cgp(void)
  206. {
  207. return rcu_preempt_ctrlblk.gp_tasks != NULL;
  208. }
  209. /*
  210. * Return true if another preemptible-RCU grace period is needed.
  211. */
  212. static int rcu_preempt_needs_another_gp(void)
  213. {
  214. return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
  215. }
  216. /*
  217. * Return true if a preemptible-RCU grace period is in progress.
  218. * The caller must disable hardirqs.
  219. */
  220. static int rcu_preempt_gp_in_progress(void)
  221. {
  222. return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
  223. }
  224. /*
  225. * Advance a ->blkd_tasks-list pointer to the next entry, instead
  226. * returning NULL if at the end of the list.
  227. */
  228. static struct list_head *rcu_next_node_entry(struct task_struct *t)
  229. {
  230. struct list_head *np;
  231. np = t->rcu_node_entry.next;
  232. if (np == &rcu_preempt_ctrlblk.blkd_tasks)
  233. np = NULL;
  234. return np;
  235. }
  236. #ifdef CONFIG_RCU_TRACE
  237. #ifdef CONFIG_RCU_BOOST
  238. static void rcu_initiate_boost_trace(void);
  239. #endif /* #ifdef CONFIG_RCU_BOOST */
  240. /*
  241. * Dump additional statistice for TINY_PREEMPT_RCU.
  242. */
  243. static void show_tiny_preempt_stats(struct seq_file *m)
  244. {
  245. seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
  246. rcu_preempt_ctrlblk.rcb.qlen,
  247. rcu_preempt_ctrlblk.n_grace_periods,
  248. rcu_preempt_ctrlblk.gpnum,
  249. rcu_preempt_ctrlblk.gpcpu,
  250. rcu_preempt_ctrlblk.completed,
  251. "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
  252. "N."[!rcu_preempt_ctrlblk.gp_tasks],
  253. "E."[!rcu_preempt_ctrlblk.exp_tasks]);
  254. #ifdef CONFIG_RCU_BOOST
  255. seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
  256. " ",
  257. "B."[!rcu_preempt_ctrlblk.boost_tasks],
  258. rcu_preempt_ctrlblk.n_tasks_boosted,
  259. rcu_preempt_ctrlblk.n_exp_boosts,
  260. rcu_preempt_ctrlblk.n_normal_boosts,
  261. (int)(jiffies & 0xffff),
  262. (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
  263. seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
  264. " balk",
  265. rcu_preempt_ctrlblk.n_balk_blkd_tasks,
  266. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
  267. rcu_preempt_ctrlblk.n_balk_boost_tasks,
  268. rcu_preempt_ctrlblk.n_balk_notyet,
  269. rcu_preempt_ctrlblk.n_balk_nos);
  270. #endif /* #ifdef CONFIG_RCU_BOOST */
  271. }
  272. #endif /* #ifdef CONFIG_RCU_TRACE */
  273. #ifdef CONFIG_RCU_BOOST
  274. #include "rtmutex_common.h"
  275. #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  276. /* Controls for rcu_kthread() kthread. */
  277. static struct task_struct *rcu_kthread_task;
  278. static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
  279. static unsigned long have_rcu_kthread_work;
  280. /*
  281. * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
  282. * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
  283. */
  284. static int rcu_boost(void)
  285. {
  286. unsigned long flags;
  287. struct rt_mutex mtx;
  288. struct task_struct *t;
  289. struct list_head *tb;
  290. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  291. rcu_preempt_ctrlblk.exp_tasks == NULL)
  292. return 0; /* Nothing to boost. */
  293. local_irq_save(flags);
  294. /*
  295. * Recheck with irqs disabled: all tasks in need of boosting
  296. * might exit their RCU read-side critical sections on their own
  297. * if we are preempted just before disabling irqs.
  298. */
  299. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  300. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  301. local_irq_restore(flags);
  302. return 0;
  303. }
  304. /*
  305. * Preferentially boost tasks blocking expedited grace periods.
  306. * This cannot starve the normal grace periods because a second
  307. * expedited grace period must boost all blocked tasks, including
  308. * those blocking the pre-existing normal grace period.
  309. */
  310. if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
  311. tb = rcu_preempt_ctrlblk.exp_tasks;
  312. RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
  313. } else {
  314. tb = rcu_preempt_ctrlblk.boost_tasks;
  315. RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
  316. }
  317. RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
  318. /*
  319. * We boost task t by manufacturing an rt_mutex that appears to
  320. * be held by task t. We leave a pointer to that rt_mutex where
  321. * task t can find it, and task t will release the mutex when it
  322. * exits its outermost RCU read-side critical section. Then
  323. * simply acquiring this artificial rt_mutex will boost task
  324. * t's priority. (Thanks to tglx for suggesting this approach!)
  325. */
  326. t = container_of(tb, struct task_struct, rcu_node_entry);
  327. rt_mutex_init_proxy_locked(&mtx, t);
  328. t->rcu_boost_mutex = &mtx;
  329. local_irq_restore(flags);
  330. rt_mutex_lock(&mtx);
  331. rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
  332. return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
  333. ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
  334. }
  335. /*
  336. * Check to see if it is now time to start boosting RCU readers blocking
  337. * the current grace period, and, if so, tell the rcu_kthread_task to
  338. * start boosting them. If there is an expedited boost in progress,
  339. * we wait for it to complete.
  340. *
  341. * If there are no blocked readers blocking the current grace period,
  342. * return 0 to let the caller know, otherwise return 1. Note that this
  343. * return value is independent of whether or not boosting was done.
  344. */
  345. static int rcu_initiate_boost(void)
  346. {
  347. if (!rcu_preempt_blocked_readers_cgp() &&
  348. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  349. RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
  350. return 0;
  351. }
  352. if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
  353. (rcu_preempt_ctrlblk.gp_tasks != NULL &&
  354. rcu_preempt_ctrlblk.boost_tasks == NULL &&
  355. ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
  356. if (rcu_preempt_ctrlblk.exp_tasks == NULL)
  357. rcu_preempt_ctrlblk.boost_tasks =
  358. rcu_preempt_ctrlblk.gp_tasks;
  359. invoke_rcu_callbacks();
  360. } else {
  361. RCU_TRACE(rcu_initiate_boost_trace());
  362. }
  363. return 1;
  364. }
  365. #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  366. /*
  367. * Do priority-boost accounting for the start of a new grace period.
  368. */
  369. static void rcu_preempt_boost_start_gp(void)
  370. {
  371. rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  372. }
  373. #else /* #ifdef CONFIG_RCU_BOOST */
  374. /*
  375. * If there is no RCU priority boosting, we don't initiate boosting,
  376. * but we do indicate whether there are blocked readers blocking the
  377. * current grace period.
  378. */
  379. static int rcu_initiate_boost(void)
  380. {
  381. return rcu_preempt_blocked_readers_cgp();
  382. }
  383. /*
  384. * If there is no RCU priority boosting, nothing to do at grace-period start.
  385. */
  386. static void rcu_preempt_boost_start_gp(void)
  387. {
  388. }
  389. #endif /* else #ifdef CONFIG_RCU_BOOST */
  390. /*
  391. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  392. * that this just means that the task currently running on the CPU is
  393. * in a quiescent state. There might be any number of tasks blocked
  394. * while in an RCU read-side critical section.
  395. *
  396. * Unlike the other rcu_*_qs() functions, callers to this function
  397. * must disable irqs in order to protect the assignment to
  398. * ->rcu_read_unlock_special.
  399. *
  400. * Because this is a single-CPU implementation, the only way a grace
  401. * period can end is if the CPU is in a quiescent state. The reason is
  402. * that a blocked preemptible-RCU reader can exit its critical section
  403. * only if the CPU is running it at the time. Therefore, when the
  404. * last task blocking the current grace period exits its RCU read-side
  405. * critical section, neither the CPU nor blocked tasks will be stopping
  406. * the current grace period. (In contrast, SMP implementations
  407. * might have CPUs running in RCU read-side critical sections that
  408. * block later grace periods -- but this is not possible given only
  409. * one CPU.)
  410. */
  411. static void rcu_preempt_cpu_qs(void)
  412. {
  413. /* Record both CPU and task as having responded to current GP. */
  414. rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
  415. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  416. /* If there is no GP then there is nothing more to do. */
  417. if (!rcu_preempt_gp_in_progress())
  418. return;
  419. /*
  420. * Check up on boosting. If there are readers blocking the
  421. * current grace period, leave.
  422. */
  423. if (rcu_initiate_boost())
  424. return;
  425. /* Advance callbacks. */
  426. rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
  427. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
  428. rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
  429. /* If there are no blocked readers, next GP is done instantly. */
  430. if (!rcu_preempt_blocked_readers_any())
  431. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
  432. /* If there are done callbacks, cause them to be invoked. */
  433. if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
  434. invoke_rcu_callbacks();
  435. }
  436. /*
  437. * Start a new RCU grace period if warranted. Hard irqs must be disabled.
  438. */
  439. static void rcu_preempt_start_gp(void)
  440. {
  441. if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
  442. /* Official start of GP. */
  443. rcu_preempt_ctrlblk.gpnum++;
  444. RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
  445. reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);
  446. /* Any blocked RCU readers block new GP. */
  447. if (rcu_preempt_blocked_readers_any())
  448. rcu_preempt_ctrlblk.gp_tasks =
  449. rcu_preempt_ctrlblk.blkd_tasks.next;
  450. /* Set up for RCU priority boosting. */
  451. rcu_preempt_boost_start_gp();
  452. /* If there is no running reader, CPU is done with GP. */
  453. if (!rcu_preempt_running_reader())
  454. rcu_preempt_cpu_qs();
  455. }
  456. }
  457. /*
  458. * We have entered the scheduler, and the current task might soon be
  459. * context-switched away from. If this task is in an RCU read-side
  460. * critical section, we will no longer be able to rely on the CPU to
  461. * record that fact, so we enqueue the task on the blkd_tasks list.
  462. * If the task started after the current grace period began, as recorded
  463. * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
  464. * before the element referenced by ->gp_tasks (or at the tail if
  465. * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
  466. * The task will dequeue itself when it exits the outermost enclosing
  467. * RCU read-side critical section. Therefore, the current grace period
  468. * cannot be permitted to complete until the ->gp_tasks pointer becomes
  469. * NULL.
  470. *
  471. * Caller must disable preemption.
  472. */
  473. void rcu_preempt_note_context_switch(void)
  474. {
  475. struct task_struct *t = current;
  476. unsigned long flags;
  477. local_irq_save(flags); /* must exclude scheduler_tick(). */
  478. if (rcu_preempt_running_reader() > 0 &&
  479. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  480. /* Possibly blocking in an RCU read-side critical section. */
  481. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  482. /*
  483. * If this CPU has already checked in, then this task
  484. * will hold up the next grace period rather than the
  485. * current grace period. Queue the task accordingly.
  486. * If the task is queued for the current grace period
  487. * (i.e., this CPU has not yet passed through a quiescent
  488. * state for the current grace period), then as long
  489. * as that task remains queued, the current grace period
  490. * cannot end.
  491. */
  492. list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
  493. if (rcu_cpu_blocking_cur_gp())
  494. rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
  495. } else if (rcu_preempt_running_reader() < 0 &&
  496. t->rcu_read_unlock_special) {
  497. /*
  498. * Complete exit from RCU read-side critical section on
  499. * behalf of preempted instance of __rcu_read_unlock().
  500. */
  501. rcu_read_unlock_special(t);
  502. }
  503. /*
  504. * Either we were not in an RCU read-side critical section to
  505. * begin with, or we have now recorded that critical section
  506. * globally. Either way, we can now note a quiescent state
  507. * for this CPU. Again, if we were in an RCU read-side critical
  508. * section, and if that critical section was blocking the current
  509. * grace period, then the fact that the task has been enqueued
  510. * means that current grace period continues to be blocked.
  511. */
  512. rcu_preempt_cpu_qs();
  513. local_irq_restore(flags);
  514. }
  515. /*
  516. * Handle special cases during rcu_read_unlock(), such as needing to
  517. * notify RCU core processing or task having blocked during the RCU
  518. * read-side critical section.
  519. */
  520. void rcu_read_unlock_special(struct task_struct *t)
  521. {
  522. int empty;
  523. int empty_exp;
  524. unsigned long flags;
  525. struct list_head *np;
  526. #ifdef CONFIG_RCU_BOOST
  527. struct rt_mutex *rbmp = NULL;
  528. #endif /* #ifdef CONFIG_RCU_BOOST */
  529. int special;
  530. /*
  531. * NMI handlers cannot block and cannot safely manipulate state.
  532. * They therefore cannot possibly be special, so just leave.
  533. */
  534. if (in_nmi())
  535. return;
  536. local_irq_save(flags);
  537. /*
  538. * If RCU core is waiting for this CPU to exit critical section,
  539. * let it know that we have done so.
  540. */
  541. special = t->rcu_read_unlock_special;
  542. if (special & RCU_READ_UNLOCK_NEED_QS)
  543. rcu_preempt_cpu_qs();
  544. /* Hardware IRQ handlers cannot block. */
  545. if (in_irq() || in_serving_softirq()) {
  546. local_irq_restore(flags);
  547. return;
  548. }
  549. /* Clean up if blocked during RCU read-side critical section. */
  550. if (special & RCU_READ_UNLOCK_BLOCKED) {
  551. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  552. /*
  553. * Remove this task from the ->blkd_tasks list and adjust
  554. * any pointers that might have been referencing it.
  555. */
  556. empty = !rcu_preempt_blocked_readers_cgp();
  557. empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
  558. np = rcu_next_node_entry(t);
  559. list_del_init(&t->rcu_node_entry);
  560. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
  561. rcu_preempt_ctrlblk.gp_tasks = np;
  562. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
  563. rcu_preempt_ctrlblk.exp_tasks = np;
  564. #ifdef CONFIG_RCU_BOOST
  565. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
  566. rcu_preempt_ctrlblk.boost_tasks = np;
  567. #endif /* #ifdef CONFIG_RCU_BOOST */
  568. /*
  569. * If this was the last task on the current list, and if
  570. * we aren't waiting on the CPU, report the quiescent state
  571. * and start a new grace period if needed.
  572. */
  573. if (!empty && !rcu_preempt_blocked_readers_cgp()) {
  574. rcu_preempt_cpu_qs();
  575. rcu_preempt_start_gp();
  576. }
  577. /*
  578. * If this was the last task on the expedited lists,
  579. * then we need wake up the waiting task.
  580. */
  581. if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
  582. rcu_report_exp_done();
  583. }
  584. #ifdef CONFIG_RCU_BOOST
  585. /* Unboost self if was boosted. */
  586. if (t->rcu_boost_mutex != NULL) {
  587. rbmp = t->rcu_boost_mutex;
  588. t->rcu_boost_mutex = NULL;
  589. rt_mutex_unlock(rbmp);
  590. }
  591. #endif /* #ifdef CONFIG_RCU_BOOST */
  592. local_irq_restore(flags);
  593. }
  594. /*
  595. * Check for a quiescent state from the current CPU. When a task blocks,
  596. * the task is recorded in the rcu_preempt_ctrlblk structure, which is
  597. * checked elsewhere. This is called from the scheduling-clock interrupt.
  598. *
  599. * Caller must disable hard irqs.
  600. */
  601. static void rcu_preempt_check_callbacks(void)
  602. {
  603. struct task_struct *t = current;
  604. if (rcu_preempt_gp_in_progress() &&
  605. (!rcu_preempt_running_reader() ||
  606. !rcu_cpu_blocking_cur_gp()))
  607. rcu_preempt_cpu_qs();
  608. if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
  609. rcu_preempt_ctrlblk.rcb.donetail)
  610. invoke_rcu_callbacks();
  611. if (rcu_preempt_gp_in_progress() &&
  612. rcu_cpu_blocking_cur_gp() &&
  613. rcu_preempt_running_reader() > 0)
  614. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  615. }
  616. /*
  617. * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
  618. * update, so this is invoked from rcu_process_callbacks() to
  619. * handle that case. Of course, it is invoked for all flavors of
  620. * RCU, but RCU callbacks can appear only on one of the lists, and
  621. * neither ->nexttail nor ->donetail can possibly be NULL, so there
  622. * is no need for an explicit check.
  623. */
  624. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  625. {
  626. if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
  627. rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
  628. }
  629. /*
  630. * Process callbacks for preemptible RCU.
  631. */
  632. static void rcu_preempt_process_callbacks(void)
  633. {
  634. __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
  635. }
  636. /*
  637. * Queue a preemptible -RCU callback for invocation after a grace period.
  638. */
  639. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  640. {
  641. unsigned long flags;
  642. debug_rcu_head_queue(head);
  643. head->func = func;
  644. head->next = NULL;
  645. local_irq_save(flags);
  646. *rcu_preempt_ctrlblk.nexttail = head;
  647. rcu_preempt_ctrlblk.nexttail = &head->next;
  648. RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
  649. rcu_preempt_start_gp(); /* checks to see if GP needed. */
  650. local_irq_restore(flags);
  651. }
  652. EXPORT_SYMBOL_GPL(call_rcu);
  653. /*
  654. * synchronize_rcu - wait until a grace period has elapsed.
  655. *
  656. * Control will return to the caller some time after a full grace
  657. * period has elapsed, in other words after all currently executing RCU
  658. * read-side critical sections have completed. RCU read-side critical
  659. * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  660. * and may be nested.
  661. */
  662. void synchronize_rcu(void)
  663. {
  664. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  665. !lock_is_held(&rcu_lock_map) &&
  666. !lock_is_held(&rcu_sched_lock_map),
  667. "Illegal synchronize_rcu() in RCU read-side critical section");
  668. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  669. if (!rcu_scheduler_active)
  670. return;
  671. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  672. WARN_ON_ONCE(rcu_preempt_running_reader());
  673. if (!rcu_preempt_blocked_readers_any())
  674. return;
  675. /* Once we get past the fastpath checks, same code as rcu_barrier(). */
  676. if (rcu_expedited)
  677. synchronize_rcu_expedited();
  678. else
  679. rcu_barrier();
  680. }
  681. EXPORT_SYMBOL_GPL(synchronize_rcu);
  682. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  683. static unsigned long sync_rcu_preempt_exp_count;
  684. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  685. /*
  686. * Return non-zero if there are any tasks in RCU read-side critical
  687. * sections blocking the current preemptible-RCU expedited grace period.
  688. * If there is no preemptible-RCU expedited grace period currently in
  689. * progress, returns zero unconditionally.
  690. */
  691. static int rcu_preempted_readers_exp(void)
  692. {
  693. return rcu_preempt_ctrlblk.exp_tasks != NULL;
  694. }
  695. /*
  696. * Report the exit from RCU read-side critical section for the last task
  697. * that queued itself during or before the current expedited preemptible-RCU
  698. * grace period.
  699. */
  700. static void rcu_report_exp_done(void)
  701. {
  702. wake_up(&sync_rcu_preempt_exp_wq);
  703. }
  704. /*
  705. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  706. * is to rely in the fact that there is but one CPU, and that it is
  707. * illegal for a task to invoke synchronize_rcu_expedited() while in a
  708. * preemptible-RCU read-side critical section. Therefore, any such
  709. * critical sections must correspond to blocked tasks, which must therefore
  710. * be on the ->blkd_tasks list. So just record the current head of the
  711. * list in the ->exp_tasks pointer, and wait for all tasks including and
  712. * after the task pointed to by ->exp_tasks to drain.
  713. */
  714. void synchronize_rcu_expedited(void)
  715. {
  716. unsigned long flags;
  717. struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
  718. unsigned long snap;
  719. barrier(); /* ensure prior action seen before grace period. */
  720. WARN_ON_ONCE(rcu_preempt_running_reader());
  721. /*
  722. * Acquire lock so that there is only one preemptible RCU grace
  723. * period in flight. Of course, if someone does the expedited
  724. * grace period for us while we are acquiring the lock, just leave.
  725. */
  726. snap = sync_rcu_preempt_exp_count + 1;
  727. mutex_lock(&sync_rcu_preempt_exp_mutex);
  728. if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
  729. goto unlock_mb_ret; /* Others did our work for us. */
  730. local_irq_save(flags);
  731. /*
  732. * All RCU readers have to already be on blkd_tasks because
  733. * we cannot legally be executing in an RCU read-side critical
  734. * section.
  735. */
  736. /* Snapshot current head of ->blkd_tasks list. */
  737. rpcp->exp_tasks = rpcp->blkd_tasks.next;
  738. if (rpcp->exp_tasks == &rpcp->blkd_tasks)
  739. rpcp->exp_tasks = NULL;
  740. /* Wait for tail of ->blkd_tasks list to drain. */
  741. if (!rcu_preempted_readers_exp()) {
  742. local_irq_restore(flags);
  743. } else {
  744. rcu_initiate_boost();
  745. local_irq_restore(flags);
  746. wait_event(sync_rcu_preempt_exp_wq,
  747. !rcu_preempted_readers_exp());
  748. }
  749. /* Clean up and exit. */
  750. barrier(); /* ensure expedited GP seen before counter increment. */
  751. sync_rcu_preempt_exp_count++;
  752. unlock_mb_ret:
  753. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  754. barrier(); /* ensure subsequent action seen after grace period. */
  755. }
  756. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  757. /*
  758. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  759. */
  760. int rcu_preempt_needs_cpu(void)
  761. {
  762. return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
  763. }
  764. #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  765. #ifdef CONFIG_RCU_TRACE
  766. /*
  767. * Because preemptible RCU does not exist, it is not necessary to
  768. * dump out its statistics.
  769. */
  770. static void show_tiny_preempt_stats(struct seq_file *m)
  771. {
  772. }
  773. #endif /* #ifdef CONFIG_RCU_TRACE */
  774. /*
  775. * Because preemptible RCU does not exist, it never has any callbacks
  776. * to check.
  777. */
  778. static void rcu_preempt_check_callbacks(void)
  779. {
  780. }
  781. /*
  782. * Because preemptible RCU does not exist, it never has any callbacks
  783. * to remove.
  784. */
  785. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  786. {
  787. }
  788. /*
  789. * Because preemptible RCU does not exist, it never has any callbacks
  790. * to process.
  791. */
  792. static void rcu_preempt_process_callbacks(void)
  793. {
  794. }
  795. #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
  796. #ifdef CONFIG_RCU_BOOST
  797. /*
  798. * Wake up rcu_kthread() to process callbacks now eligible for invocation
  799. * or to boost readers.
  800. */
  801. static void invoke_rcu_callbacks(void)
  802. {
  803. have_rcu_kthread_work = 1;
  804. if (rcu_kthread_task != NULL)
  805. wake_up(&rcu_kthread_wq);
  806. }
  807. #ifdef CONFIG_RCU_TRACE
  808. /*
  809. * Is the current CPU running the RCU-callbacks kthread?
  810. * Caller must have preemption disabled.
  811. */
  812. static bool rcu_is_callbacks_kthread(void)
  813. {
  814. return rcu_kthread_task == current;
  815. }
  816. #endif /* #ifdef CONFIG_RCU_TRACE */
  817. /*
  818. * This kthread invokes RCU callbacks whose grace periods have
  819. * elapsed. It is awakened as needed, and takes the place of the
  820. * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
  821. * This is a kthread, but it is never stopped, at least not until
  822. * the system goes down.
  823. */
  824. static int rcu_kthread(void *arg)
  825. {
  826. unsigned long work;
  827. unsigned long morework;
  828. unsigned long flags;
  829. for (;;) {
  830. wait_event_interruptible(rcu_kthread_wq,
  831. have_rcu_kthread_work != 0);
  832. morework = rcu_boost();
  833. local_irq_save(flags);
  834. work = have_rcu_kthread_work;
  835. have_rcu_kthread_work = morework;
  836. local_irq_restore(flags);
  837. if (work)
  838. rcu_process_callbacks(NULL);
  839. schedule_timeout_interruptible(1); /* Leave CPU for others. */
  840. }
  841. return 0; /* Not reached, but needed to shut gcc up. */
  842. }
  843. /*
  844. * Spawn the kthread that invokes RCU callbacks.
  845. */
  846. static int __init rcu_spawn_kthreads(void)
  847. {
  848. struct sched_param sp;
  849. rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
  850. sp.sched_priority = RCU_BOOST_PRIO;
  851. sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
  852. return 0;
  853. }
  854. early_initcall(rcu_spawn_kthreads);
  855. #else /* #ifdef CONFIG_RCU_BOOST */
  856. /* Hold off callback invocation until early_initcall() time. */
  857. static int rcu_scheduler_fully_active __read_mostly;
  858. /*
  859. * Start up softirq processing of callbacks.
  860. */
  861. void invoke_rcu_callbacks(void)
  862. {
  863. if (rcu_scheduler_fully_active)
  864. raise_softirq(RCU_SOFTIRQ);
  865. }
  866. #ifdef CONFIG_RCU_TRACE
  867. /*
  868. * There is no callback kthread, so this thread is never it.
  869. */
  870. static bool rcu_is_callbacks_kthread(void)
  871. {
  872. return false;
  873. }
  874. #endif /* #ifdef CONFIG_RCU_TRACE */
  875. static int __init rcu_scheduler_really_started(void)
  876. {
  877. rcu_scheduler_fully_active = 1;
  878. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  879. raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */
  880. return 0;
  881. }
  882. early_initcall(rcu_scheduler_really_started);
  883. #endif /* #else #ifdef CONFIG_RCU_BOOST */
  884. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  885. #include <linux/kernel_stat.h>
  886. /*
  887. * During boot, we forgive RCU lockdep issues. After this function is
  888. * invoked, we start taking RCU lockdep issues seriously.
  889. */
  890. void __init rcu_scheduler_starting(void)
  891. {
  892. WARN_ON(nr_context_switches() > 0);
  893. rcu_scheduler_active = 1;
  894. }
  895. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  896. #ifdef CONFIG_RCU_TRACE
  897. #ifdef CONFIG_RCU_BOOST
  898. static void rcu_initiate_boost_trace(void)
  899. {
  900. if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
  901. rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
  902. else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
  903. rcu_preempt_ctrlblk.exp_tasks == NULL)
  904. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
  905. else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
  906. rcu_preempt_ctrlblk.n_balk_boost_tasks++;
  907. else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
  908. rcu_preempt_ctrlblk.n_balk_notyet++;
  909. else
  910. rcu_preempt_ctrlblk.n_balk_nos++;
  911. }
  912. #endif /* #ifdef CONFIG_RCU_BOOST */
  913. static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
  914. {
  915. unsigned long flags;
  916. local_irq_save(flags);
  917. rcp->qlen -= n;
  918. local_irq_restore(flags);
  919. }
  920. /*
  921. * Dump statistics for TINY_RCU, such as they are.
  922. */
  923. static int show_tiny_stats(struct seq_file *m, void *unused)
  924. {
  925. show_tiny_preempt_stats(m);
  926. seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
  927. seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
  928. return 0;
  929. }
  930. static int show_tiny_stats_open(struct inode *inode, struct file *file)
  931. {
  932. return single_open(file, show_tiny_stats, NULL);
  933. }
  934. static const struct file_operations show_tiny_stats_fops = {
  935. .owner = THIS_MODULE,
  936. .open = show_tiny_stats_open,
  937. .read = seq_read,
  938. .llseek = seq_lseek,
  939. .release = single_release,
  940. };
  941. static struct dentry *rcudir;
  942. static int __init rcutiny_trace_init(void)
  943. {
  944. struct dentry *retval;
  945. rcudir = debugfs_create_dir("rcu", NULL);
  946. if (!rcudir)
  947. goto free_out;
  948. retval = debugfs_create_file("rcudata", 0444, rcudir,
  949. NULL, &show_tiny_stats_fops);
  950. if (!retval)
  951. goto free_out;
  952. return 0;
  953. free_out:
  954. debugfs_remove_recursive(rcudir);
  955. return 1;
  956. }
  957. static void __exit rcutiny_trace_cleanup(void)
  958. {
  959. debugfs_remove_recursive(rcudir);
  960. }
  961. module_init(rcutiny_trace_init);
  962. module_exit(rcutiny_trace_cleanup);
  963. MODULE_AUTHOR("Paul E. McKenney");
  964. MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
  965. MODULE_LICENSE("GPL");
  966. static void check_cpu_stall_preempt(void)
  967. {
  968. #ifdef CONFIG_TINY_PREEMPT_RCU
  969. check_cpu_stall(&rcu_preempt_ctrlblk.rcb);
  970. #endif /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  971. }
  972. #endif /* #ifdef CONFIG_RCU_TRACE */