rcutiny_plugin.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
  3. * Internal non-public definitions that provide either classic
  4. * or preemptible semantics.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. *
  20. * Copyright (c) 2010 Linaro
  21. *
  22. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  23. */
  24. #include <linux/kthread.h>
  25. #include <linux/module.h>
  26. #include <linux/debugfs.h>
  27. #include <linux/seq_file.h>
  28. /* Global control variables for rcupdate callback mechanism. */
  29. struct rcu_ctrlblk {
  30. struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
  31. struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
  32. struct rcu_head **curtail; /* ->next pointer of last CB. */
  33. RCU_TRACE(long qlen); /* Number of pending CBs. */
  34. RCU_TRACE(char *name); /* Name of RCU type. */
  35. };
  36. /* Definition for rcupdate control block. */
  37. static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  38. .donetail = &rcu_sched_ctrlblk.rcucblist,
  39. .curtail = &rcu_sched_ctrlblk.rcucblist,
  40. RCU_TRACE(.name = "rcu_sched")
  41. };
  42. static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  43. .donetail = &rcu_bh_ctrlblk.rcucblist,
  44. .curtail = &rcu_bh_ctrlblk.rcucblist,
  45. RCU_TRACE(.name = "rcu_bh")
  46. };
  47. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  48. int rcu_scheduler_active __read_mostly;
  49. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  50. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  51. #ifdef CONFIG_TINY_PREEMPT_RCU
  52. #include <linux/delay.h>
  53. /* Global control variables for preemptible RCU. */
  54. struct rcu_preempt_ctrlblk {
  55. struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */
  56. struct rcu_head **nexttail;
  57. /* Tasks blocked in a preemptible RCU */
  58. /* read-side critical section while an */
  59. /* preemptible-RCU grace period is in */
  60. /* progress must wait for a later grace */
  61. /* period. This pointer points to the */
  62. /* ->next pointer of the last task that */
  63. /* must wait for a later grace period, or */
  64. /* to &->rcb.rcucblist if there is no */
  65. /* such task. */
  66. struct list_head blkd_tasks;
  67. /* Tasks blocked in RCU read-side critical */
  68. /* section. Tasks are placed at the head */
  69. /* of this list and age towards the tail. */
  70. struct list_head *gp_tasks;
  71. /* Pointer to the first task blocking the */
  72. /* current grace period, or NULL if there */
  73. /* is no such task. */
  74. struct list_head *exp_tasks;
  75. /* Pointer to first task blocking the */
  76. /* current expedited grace period, or NULL */
  77. /* if there is no such task. If there */
  78. /* is no current expedited grace period, */
  79. /* then there cannot be any such task. */
  80. #ifdef CONFIG_RCU_BOOST
  81. struct list_head *boost_tasks;
  82. /* Pointer to first task that needs to be */
  83. /* priority-boosted, or NULL if no priority */
  84. /* boosting is needed. If there is no */
  85. /* current or expedited grace period, there */
  86. /* can be no such task. */
  87. #endif /* #ifdef CONFIG_RCU_BOOST */
  88. u8 gpnum; /* Current grace period. */
  89. u8 gpcpu; /* Last grace period blocked by the CPU. */
  90. u8 completed; /* Last grace period completed. */
  91. /* If all three are equal, RCU is idle. */
  92. #ifdef CONFIG_RCU_BOOST
  93. unsigned long boost_time; /* When to start boosting (jiffies) */
  94. #endif /* #ifdef CONFIG_RCU_BOOST */
  95. #ifdef CONFIG_RCU_TRACE
  96. unsigned long n_grace_periods;
  97. #ifdef CONFIG_RCU_BOOST
  98. unsigned long n_tasks_boosted;
  99. /* Total number of tasks boosted. */
  100. unsigned long n_exp_boosts;
  101. /* Number of tasks boosted for expedited GP. */
  102. unsigned long n_normal_boosts;
  103. /* Number of tasks boosted for normal GP. */
  104. unsigned long n_balk_blkd_tasks;
  105. /* Refused to boost: no blocked tasks. */
  106. unsigned long n_balk_exp_gp_tasks;
  107. /* Refused to boost: nothing blocking GP. */
  108. unsigned long n_balk_boost_tasks;
  109. /* Refused to boost: already boosting. */
  110. unsigned long n_balk_notyet;
  111. /* Refused to boost: not yet time. */
  112. unsigned long n_balk_nos;
  113. /* Refused to boost: not sure why, though. */
  114. /* This can happen due to race conditions. */
  115. #endif /* #ifdef CONFIG_RCU_BOOST */
  116. #endif /* #ifdef CONFIG_RCU_TRACE */
  117. };
  118. static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
  119. .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  120. .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  121. .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
  122. .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
  123. RCU_TRACE(.rcb.name = "rcu_preempt")
  124. };
  125. static int rcu_preempted_readers_exp(void);
  126. static void rcu_report_exp_done(void);
  127. /*
  128. * Return true if the CPU has not yet responded to the current grace period.
  129. */
  130. static int rcu_cpu_blocking_cur_gp(void)
  131. {
  132. return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
  133. }
  134. /*
  135. * Check for a running RCU reader. Because there is only one CPU,
  136. * there can be but one running RCU reader at a time. ;-)
  137. *
  138. * Returns zero if there are no running readers. Returns a positive
  139. * number if there is at least one reader within its RCU read-side
  140. * critical section. Returns a negative number if an outermost reader
  141. * is in the midst of exiting from its RCU read-side critical section
  142. *
  143. * Returns zero if there are no running readers. Returns a positive
  144. * number if there is at least one reader within its RCU read-side
  145. * critical section. Returns a negative number if an outermost reader
  146. * is in the midst of exiting from its RCU read-side critical section.
  147. */
  148. static int rcu_preempt_running_reader(void)
  149. {
  150. return current->rcu_read_lock_nesting;
  151. }
  152. /*
  153. * Check for preempted RCU readers blocking any grace period.
  154. * If the caller needs a reliable answer, it must disable hard irqs.
  155. */
  156. static int rcu_preempt_blocked_readers_any(void)
  157. {
  158. return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
  159. }
  160. /*
  161. * Check for preempted RCU readers blocking the current grace period.
  162. * If the caller needs a reliable answer, it must disable hard irqs.
  163. */
  164. static int rcu_preempt_blocked_readers_cgp(void)
  165. {
  166. return rcu_preempt_ctrlblk.gp_tasks != NULL;
  167. }
  168. /*
  169. * Return true if another preemptible-RCU grace period is needed.
  170. */
  171. static int rcu_preempt_needs_another_gp(void)
  172. {
  173. return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
  174. }
  175. /*
  176. * Return true if a preemptible-RCU grace period is in progress.
  177. * The caller must disable hardirqs.
  178. */
  179. static int rcu_preempt_gp_in_progress(void)
  180. {
  181. return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
  182. }
  183. /*
  184. * Advance a ->blkd_tasks-list pointer to the next entry, instead
  185. * returning NULL if at the end of the list.
  186. */
  187. static struct list_head *rcu_next_node_entry(struct task_struct *t)
  188. {
  189. struct list_head *np;
  190. np = t->rcu_node_entry.next;
  191. if (np == &rcu_preempt_ctrlblk.blkd_tasks)
  192. np = NULL;
  193. return np;
  194. }
  195. #ifdef CONFIG_RCU_TRACE
  196. #ifdef CONFIG_RCU_BOOST
  197. static void rcu_initiate_boost_trace(void);
  198. #endif /* #ifdef CONFIG_RCU_BOOST */
  199. /*
  200. * Dump additional statistice for TINY_PREEMPT_RCU.
  201. */
  202. static void show_tiny_preempt_stats(struct seq_file *m)
  203. {
  204. seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
  205. rcu_preempt_ctrlblk.rcb.qlen,
  206. rcu_preempt_ctrlblk.n_grace_periods,
  207. rcu_preempt_ctrlblk.gpnum,
  208. rcu_preempt_ctrlblk.gpcpu,
  209. rcu_preempt_ctrlblk.completed,
  210. "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
  211. "N."[!rcu_preempt_ctrlblk.gp_tasks],
  212. "E."[!rcu_preempt_ctrlblk.exp_tasks]);
  213. #ifdef CONFIG_RCU_BOOST
  214. seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
  215. " ",
  216. "B."[!rcu_preempt_ctrlblk.boost_tasks],
  217. rcu_preempt_ctrlblk.n_tasks_boosted,
  218. rcu_preempt_ctrlblk.n_exp_boosts,
  219. rcu_preempt_ctrlblk.n_normal_boosts,
  220. (int)(jiffies & 0xffff),
  221. (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
  222. seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
  223. " balk",
  224. rcu_preempt_ctrlblk.n_balk_blkd_tasks,
  225. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
  226. rcu_preempt_ctrlblk.n_balk_boost_tasks,
  227. rcu_preempt_ctrlblk.n_balk_notyet,
  228. rcu_preempt_ctrlblk.n_balk_nos);
  229. #endif /* #ifdef CONFIG_RCU_BOOST */
  230. }
  231. #endif /* #ifdef CONFIG_RCU_TRACE */
  232. #ifdef CONFIG_RCU_BOOST
  233. #include "rtmutex_common.h"
  234. #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
  235. /* Controls for rcu_kthread() kthread. */
  236. static struct task_struct *rcu_kthread_task;
  237. static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
  238. static unsigned long have_rcu_kthread_work;
  239. /*
  240. * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
  241. * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
  242. */
  243. static int rcu_boost(void)
  244. {
  245. unsigned long flags;
  246. struct rt_mutex mtx;
  247. struct task_struct *t;
  248. struct list_head *tb;
  249. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  250. rcu_preempt_ctrlblk.exp_tasks == NULL)
  251. return 0; /* Nothing to boost. */
  252. raw_local_irq_save(flags);
  253. /*
  254. * Recheck with irqs disabled: all tasks in need of boosting
  255. * might exit their RCU read-side critical sections on their own
  256. * if we are preempted just before disabling irqs.
  257. */
  258. if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
  259. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  260. raw_local_irq_restore(flags);
  261. return 0;
  262. }
  263. /*
  264. * Preferentially boost tasks blocking expedited grace periods.
  265. * This cannot starve the normal grace periods because a second
  266. * expedited grace period must boost all blocked tasks, including
  267. * those blocking the pre-existing normal grace period.
  268. */
  269. if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
  270. tb = rcu_preempt_ctrlblk.exp_tasks;
  271. RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
  272. } else {
  273. tb = rcu_preempt_ctrlblk.boost_tasks;
  274. RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
  275. }
  276. RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
  277. /*
  278. * We boost task t by manufacturing an rt_mutex that appears to
  279. * be held by task t. We leave a pointer to that rt_mutex where
  280. * task t can find it, and task t will release the mutex when it
  281. * exits its outermost RCU read-side critical section. Then
  282. * simply acquiring this artificial rt_mutex will boost task
  283. * t's priority. (Thanks to tglx for suggesting this approach!)
  284. */
  285. t = container_of(tb, struct task_struct, rcu_node_entry);
  286. rt_mutex_init_proxy_locked(&mtx, t);
  287. t->rcu_boost_mutex = &mtx;
  288. raw_local_irq_restore(flags);
  289. rt_mutex_lock(&mtx);
  290. rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
  291. return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
  292. ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
  293. }
  294. /*
  295. * Check to see if it is now time to start boosting RCU readers blocking
  296. * the current grace period, and, if so, tell the rcu_kthread_task to
  297. * start boosting them. If there is an expedited boost in progress,
  298. * we wait for it to complete.
  299. *
  300. * If there are no blocked readers blocking the current grace period,
  301. * return 0 to let the caller know, otherwise return 1. Note that this
  302. * return value is independent of whether or not boosting was done.
  303. */
  304. static int rcu_initiate_boost(void)
  305. {
  306. if (!rcu_preempt_blocked_readers_cgp() &&
  307. rcu_preempt_ctrlblk.exp_tasks == NULL) {
  308. RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
  309. return 0;
  310. }
  311. if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
  312. (rcu_preempt_ctrlblk.gp_tasks != NULL &&
  313. rcu_preempt_ctrlblk.boost_tasks == NULL &&
  314. ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
  315. if (rcu_preempt_ctrlblk.exp_tasks == NULL)
  316. rcu_preempt_ctrlblk.boost_tasks =
  317. rcu_preempt_ctrlblk.gp_tasks;
  318. invoke_rcu_callbacks();
  319. } else
  320. RCU_TRACE(rcu_initiate_boost_trace());
  321. return 1;
  322. }
  323. #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  324. /*
  325. * Do priority-boost accounting for the start of a new grace period.
  326. */
  327. static void rcu_preempt_boost_start_gp(void)
  328. {
  329. rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  330. }
  331. #else /* #ifdef CONFIG_RCU_BOOST */
  332. /*
  333. * If there is no RCU priority boosting, we don't initiate boosting,
  334. * but we do indicate whether there are blocked readers blocking the
  335. * current grace period.
  336. */
  337. static int rcu_initiate_boost(void)
  338. {
  339. return rcu_preempt_blocked_readers_cgp();
  340. }
  341. /*
  342. * If there is no RCU priority boosting, nothing to do at grace-period start.
  343. */
  344. static void rcu_preempt_boost_start_gp(void)
  345. {
  346. }
  347. #endif /* else #ifdef CONFIG_RCU_BOOST */
  348. /*
  349. * Record a preemptible-RCU quiescent state for the specified CPU. Note
  350. * that this just means that the task currently running on the CPU is
  351. * in a quiescent state. There might be any number of tasks blocked
  352. * while in an RCU read-side critical section.
  353. *
  354. * Unlike the other rcu_*_qs() functions, callers to this function
  355. * must disable irqs in order to protect the assignment to
  356. * ->rcu_read_unlock_special.
  357. *
  358. * Because this is a single-CPU implementation, the only way a grace
  359. * period can end is if the CPU is in a quiescent state. The reason is
  360. * that a blocked preemptible-RCU reader can exit its critical section
  361. * only if the CPU is running it at the time. Therefore, when the
  362. * last task blocking the current grace period exits its RCU read-side
  363. * critical section, neither the CPU nor blocked tasks will be stopping
  364. * the current grace period. (In contrast, SMP implementations
  365. * might have CPUs running in RCU read-side critical sections that
  366. * block later grace periods -- but this is not possible given only
  367. * one CPU.)
  368. */
  369. static void rcu_preempt_cpu_qs(void)
  370. {
  371. /* Record both CPU and task as having responded to current GP. */
  372. rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
  373. current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
  374. /* If there is no GP then there is nothing more to do. */
  375. if (!rcu_preempt_gp_in_progress())
  376. return;
  377. /*
  378. * Check up on boosting. If there are readers blocking the
  379. * current grace period, leave.
  380. */
  381. if (rcu_initiate_boost())
  382. return;
  383. /* Advance callbacks. */
  384. rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
  385. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
  386. rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
  387. /* If there are no blocked readers, next GP is done instantly. */
  388. if (!rcu_preempt_blocked_readers_any())
  389. rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
  390. /* If there are done callbacks, cause them to be invoked. */
  391. if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
  392. invoke_rcu_callbacks();
  393. }
  394. /*
  395. * Start a new RCU grace period if warranted. Hard irqs must be disabled.
  396. */
  397. static void rcu_preempt_start_gp(void)
  398. {
  399. if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
  400. /* Official start of GP. */
  401. rcu_preempt_ctrlblk.gpnum++;
  402. RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
  403. /* Any blocked RCU readers block new GP. */
  404. if (rcu_preempt_blocked_readers_any())
  405. rcu_preempt_ctrlblk.gp_tasks =
  406. rcu_preempt_ctrlblk.blkd_tasks.next;
  407. /* Set up for RCU priority boosting. */
  408. rcu_preempt_boost_start_gp();
  409. /* If there is no running reader, CPU is done with GP. */
  410. if (!rcu_preempt_running_reader())
  411. rcu_preempt_cpu_qs();
  412. }
  413. }
  414. /*
  415. * We have entered the scheduler, and the current task might soon be
  416. * context-switched away from. If this task is in an RCU read-side
  417. * critical section, we will no longer be able to rely on the CPU to
  418. * record that fact, so we enqueue the task on the blkd_tasks list.
  419. * If the task started after the current grace period began, as recorded
  420. * by ->gpcpu, we enqueue at the beginning of the list. Otherwise
  421. * before the element referenced by ->gp_tasks (or at the tail if
  422. * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element.
  423. * The task will dequeue itself when it exits the outermost enclosing
  424. * RCU read-side critical section. Therefore, the current grace period
  425. * cannot be permitted to complete until the ->gp_tasks pointer becomes
  426. * NULL.
  427. *
  428. * Caller must disable preemption.
  429. */
  430. void rcu_preempt_note_context_switch(void)
  431. {
  432. struct task_struct *t = current;
  433. unsigned long flags;
  434. local_irq_save(flags); /* must exclude scheduler_tick(). */
  435. if (rcu_preempt_running_reader() > 0 &&
  436. (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
  437. /* Possibly blocking in an RCU read-side critical section. */
  438. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
  439. /*
  440. * If this CPU has already checked in, then this task
  441. * will hold up the next grace period rather than the
  442. * current grace period. Queue the task accordingly.
  443. * If the task is queued for the current grace period
  444. * (i.e., this CPU has not yet passed through a quiescent
  445. * state for the current grace period), then as long
  446. * as that task remains queued, the current grace period
  447. * cannot end.
  448. */
  449. list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
  450. if (rcu_cpu_blocking_cur_gp())
  451. rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
  452. } else if (rcu_preempt_running_reader() < 0 &&
  453. t->rcu_read_unlock_special) {
  454. /*
  455. * Complete exit from RCU read-side critical section on
  456. * behalf of preempted instance of __rcu_read_unlock().
  457. */
  458. rcu_read_unlock_special(t);
  459. }
  460. /*
  461. * Either we were not in an RCU read-side critical section to
  462. * begin with, or we have now recorded that critical section
  463. * globally. Either way, we can now note a quiescent state
  464. * for this CPU. Again, if we were in an RCU read-side critical
  465. * section, and if that critical section was blocking the current
  466. * grace period, then the fact that the task has been enqueued
  467. * means that current grace period continues to be blocked.
  468. */
  469. rcu_preempt_cpu_qs();
  470. local_irq_restore(flags);
  471. }
  472. /*
  473. * Handle special cases during rcu_read_unlock(), such as needing to
  474. * notify RCU core processing or task having blocked during the RCU
  475. * read-side critical section.
  476. */
  477. void rcu_read_unlock_special(struct task_struct *t)
  478. {
  479. int empty;
  480. int empty_exp;
  481. unsigned long flags;
  482. struct list_head *np;
  483. #ifdef CONFIG_RCU_BOOST
  484. struct rt_mutex *rbmp = NULL;
  485. #endif /* #ifdef CONFIG_RCU_BOOST */
  486. int special;
  487. /*
  488. * NMI handlers cannot block and cannot safely manipulate state.
  489. * They therefore cannot possibly be special, so just leave.
  490. */
  491. if (in_nmi())
  492. return;
  493. local_irq_save(flags);
  494. /*
  495. * If RCU core is waiting for this CPU to exit critical section,
  496. * let it know that we have done so.
  497. */
  498. special = t->rcu_read_unlock_special;
  499. if (special & RCU_READ_UNLOCK_NEED_QS)
  500. rcu_preempt_cpu_qs();
  501. /* Hardware IRQ handlers cannot block. */
  502. if (in_irq() || in_serving_softirq()) {
  503. local_irq_restore(flags);
  504. return;
  505. }
  506. /* Clean up if blocked during RCU read-side critical section. */
  507. if (special & RCU_READ_UNLOCK_BLOCKED) {
  508. t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
  509. /*
  510. * Remove this task from the ->blkd_tasks list and adjust
  511. * any pointers that might have been referencing it.
  512. */
  513. empty = !rcu_preempt_blocked_readers_cgp();
  514. empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
  515. np = rcu_next_node_entry(t);
  516. list_del_init(&t->rcu_node_entry);
  517. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
  518. rcu_preempt_ctrlblk.gp_tasks = np;
  519. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
  520. rcu_preempt_ctrlblk.exp_tasks = np;
  521. #ifdef CONFIG_RCU_BOOST
  522. if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
  523. rcu_preempt_ctrlblk.boost_tasks = np;
  524. #endif /* #ifdef CONFIG_RCU_BOOST */
  525. /*
  526. * If this was the last task on the current list, and if
  527. * we aren't waiting on the CPU, report the quiescent state
  528. * and start a new grace period if needed.
  529. */
  530. if (!empty && !rcu_preempt_blocked_readers_cgp()) {
  531. rcu_preempt_cpu_qs();
  532. rcu_preempt_start_gp();
  533. }
  534. /*
  535. * If this was the last task on the expedited lists,
  536. * then we need wake up the waiting task.
  537. */
  538. if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
  539. rcu_report_exp_done();
  540. }
  541. #ifdef CONFIG_RCU_BOOST
  542. /* Unboost self if was boosted. */
  543. if (t->rcu_boost_mutex != NULL) {
  544. rbmp = t->rcu_boost_mutex;
  545. t->rcu_boost_mutex = NULL;
  546. rt_mutex_unlock(rbmp);
  547. }
  548. #endif /* #ifdef CONFIG_RCU_BOOST */
  549. local_irq_restore(flags);
  550. }
  551. /*
  552. * Check for a quiescent state from the current CPU. When a task blocks,
  553. * the task is recorded in the rcu_preempt_ctrlblk structure, which is
  554. * checked elsewhere. This is called from the scheduling-clock interrupt.
  555. *
  556. * Caller must disable hard irqs.
  557. */
  558. static void rcu_preempt_check_callbacks(void)
  559. {
  560. struct task_struct *t = current;
  561. if (rcu_preempt_gp_in_progress() &&
  562. (!rcu_preempt_running_reader() ||
  563. !rcu_cpu_blocking_cur_gp()))
  564. rcu_preempt_cpu_qs();
  565. if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
  566. rcu_preempt_ctrlblk.rcb.donetail)
  567. invoke_rcu_callbacks();
  568. if (rcu_preempt_gp_in_progress() &&
  569. rcu_cpu_blocking_cur_gp() &&
  570. rcu_preempt_running_reader() > 0)
  571. t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
  572. }
  573. /*
  574. * TINY_PREEMPT_RCU has an extra callback-list tail pointer to
  575. * update, so this is invoked from rcu_process_callbacks() to
  576. * handle that case. Of course, it is invoked for all flavors of
  577. * RCU, but RCU callbacks can appear only on one of the lists, and
  578. * neither ->nexttail nor ->donetail can possibly be NULL, so there
  579. * is no need for an explicit check.
  580. */
  581. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  582. {
  583. if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
  584. rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
  585. }
  586. /*
  587. * Process callbacks for preemptible RCU.
  588. */
  589. static void rcu_preempt_process_callbacks(void)
  590. {
  591. __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
  592. }
  593. /*
  594. * Queue a preemptible -RCU callback for invocation after a grace period.
  595. */
  596. void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  597. {
  598. unsigned long flags;
  599. debug_rcu_head_queue(head);
  600. head->func = func;
  601. head->next = NULL;
  602. local_irq_save(flags);
  603. *rcu_preempt_ctrlblk.nexttail = head;
  604. rcu_preempt_ctrlblk.nexttail = &head->next;
  605. RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
  606. rcu_preempt_start_gp(); /* checks to see if GP needed. */
  607. local_irq_restore(flags);
  608. }
  609. EXPORT_SYMBOL_GPL(call_rcu);
  610. /*
  611. * synchronize_rcu - wait until a grace period has elapsed.
  612. *
  613. * Control will return to the caller some time after a full grace
  614. * period has elapsed, in other words after all currently executing RCU
  615. * read-side critical sections have completed. RCU read-side critical
  616. * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  617. * and may be nested.
  618. */
  619. void synchronize_rcu(void)
  620. {
  621. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  622. !lock_is_held(&rcu_lock_map) &&
  623. !lock_is_held(&rcu_sched_lock_map),
  624. "Illegal synchronize_rcu() in RCU read-side critical section");
  625. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  626. if (!rcu_scheduler_active)
  627. return;
  628. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  629. WARN_ON_ONCE(rcu_preempt_running_reader());
  630. if (!rcu_preempt_blocked_readers_any())
  631. return;
  632. /* Once we get past the fastpath checks, same code as rcu_barrier(). */
  633. rcu_barrier();
  634. }
  635. EXPORT_SYMBOL_GPL(synchronize_rcu);
  636. static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
  637. static unsigned long sync_rcu_preempt_exp_count;
  638. static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
  639. /*
  640. * Return non-zero if there are any tasks in RCU read-side critical
  641. * sections blocking the current preemptible-RCU expedited grace period.
  642. * If there is no preemptible-RCU expedited grace period currently in
  643. * progress, returns zero unconditionally.
  644. */
  645. static int rcu_preempted_readers_exp(void)
  646. {
  647. return rcu_preempt_ctrlblk.exp_tasks != NULL;
  648. }
  649. /*
  650. * Report the exit from RCU read-side critical section for the last task
  651. * that queued itself during or before the current expedited preemptible-RCU
  652. * grace period.
  653. */
  654. static void rcu_report_exp_done(void)
  655. {
  656. wake_up(&sync_rcu_preempt_exp_wq);
  657. }
  658. /*
  659. * Wait for an rcu-preempt grace period, but expedite it. The basic idea
  660. * is to rely in the fact that there is but one CPU, and that it is
  661. * illegal for a task to invoke synchronize_rcu_expedited() while in a
  662. * preemptible-RCU read-side critical section. Therefore, any such
  663. * critical sections must correspond to blocked tasks, which must therefore
  664. * be on the ->blkd_tasks list. So just record the current head of the
  665. * list in the ->exp_tasks pointer, and wait for all tasks including and
  666. * after the task pointed to by ->exp_tasks to drain.
  667. */
  668. void synchronize_rcu_expedited(void)
  669. {
  670. unsigned long flags;
  671. struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
  672. unsigned long snap;
  673. barrier(); /* ensure prior action seen before grace period. */
  674. WARN_ON_ONCE(rcu_preempt_running_reader());
  675. /*
  676. * Acquire lock so that there is only one preemptible RCU grace
  677. * period in flight. Of course, if someone does the expedited
  678. * grace period for us while we are acquiring the lock, just leave.
  679. */
  680. snap = sync_rcu_preempt_exp_count + 1;
  681. mutex_lock(&sync_rcu_preempt_exp_mutex);
  682. if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
  683. goto unlock_mb_ret; /* Others did our work for us. */
  684. local_irq_save(flags);
  685. /*
  686. * All RCU readers have to already be on blkd_tasks because
  687. * we cannot legally be executing in an RCU read-side critical
  688. * section.
  689. */
  690. /* Snapshot current head of ->blkd_tasks list. */
  691. rpcp->exp_tasks = rpcp->blkd_tasks.next;
  692. if (rpcp->exp_tasks == &rpcp->blkd_tasks)
  693. rpcp->exp_tasks = NULL;
  694. /* Wait for tail of ->blkd_tasks list to drain. */
  695. if (!rcu_preempted_readers_exp())
  696. local_irq_restore(flags);
  697. else {
  698. rcu_initiate_boost();
  699. local_irq_restore(flags);
  700. wait_event(sync_rcu_preempt_exp_wq,
  701. !rcu_preempted_readers_exp());
  702. }
  703. /* Clean up and exit. */
  704. barrier(); /* ensure expedited GP seen before counter increment. */
  705. sync_rcu_preempt_exp_count++;
  706. unlock_mb_ret:
  707. mutex_unlock(&sync_rcu_preempt_exp_mutex);
  708. barrier(); /* ensure subsequent action seen after grace period. */
  709. }
  710. EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  711. /*
  712. * Does preemptible RCU need the CPU to stay out of dynticks mode?
  713. */
  714. int rcu_preempt_needs_cpu(void)
  715. {
  716. if (!rcu_preempt_running_reader())
  717. rcu_preempt_cpu_qs();
  718. return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
  719. }
  720. #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */
  721. #ifdef CONFIG_RCU_TRACE
  722. /*
  723. * Because preemptible RCU does not exist, it is not necessary to
  724. * dump out its statistics.
  725. */
  726. static void show_tiny_preempt_stats(struct seq_file *m)
  727. {
  728. }
  729. #endif /* #ifdef CONFIG_RCU_TRACE */
  730. /*
  731. * Because preemptible RCU does not exist, it never has any callbacks
  732. * to check.
  733. */
  734. static void rcu_preempt_check_callbacks(void)
  735. {
  736. }
  737. /*
  738. * Because preemptible RCU does not exist, it never has any callbacks
  739. * to remove.
  740. */
  741. static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
  742. {
  743. }
  744. /*
  745. * Because preemptible RCU does not exist, it never has any callbacks
  746. * to process.
  747. */
  748. static void rcu_preempt_process_callbacks(void)
  749. {
  750. }
  751. #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
  752. #ifdef CONFIG_RCU_BOOST
  753. /*
  754. * Wake up rcu_kthread() to process callbacks now eligible for invocation
  755. * or to boost readers.
  756. */
  757. static void invoke_rcu_callbacks(void)
  758. {
  759. have_rcu_kthread_work = 1;
  760. if (rcu_kthread_task != NULL)
  761. wake_up(&rcu_kthread_wq);
  762. }
  763. #ifdef CONFIG_RCU_TRACE
  764. /*
  765. * Is the current CPU running the RCU-callbacks kthread?
  766. * Caller must have preemption disabled.
  767. */
  768. static bool rcu_is_callbacks_kthread(void)
  769. {
  770. return rcu_kthread_task == current;
  771. }
  772. #endif /* #ifdef CONFIG_RCU_TRACE */
  773. /*
  774. * This kthread invokes RCU callbacks whose grace periods have
  775. * elapsed. It is awakened as needed, and takes the place of the
  776. * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
  777. * This is a kthread, but it is never stopped, at least not until
  778. * the system goes down.
  779. */
  780. static int rcu_kthread(void *arg)
  781. {
  782. unsigned long work;
  783. unsigned long morework;
  784. unsigned long flags;
  785. for (;;) {
  786. wait_event_interruptible(rcu_kthread_wq,
  787. have_rcu_kthread_work != 0);
  788. morework = rcu_boost();
  789. local_irq_save(flags);
  790. work = have_rcu_kthread_work;
  791. have_rcu_kthread_work = morework;
  792. local_irq_restore(flags);
  793. if (work)
  794. rcu_process_callbacks(NULL);
  795. schedule_timeout_interruptible(1); /* Leave CPU for others. */
  796. }
  797. return 0; /* Not reached, but needed to shut gcc up. */
  798. }
  799. /*
  800. * Spawn the kthread that invokes RCU callbacks.
  801. */
  802. static int __init rcu_spawn_kthreads(void)
  803. {
  804. struct sched_param sp;
  805. rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
  806. sp.sched_priority = RCU_BOOST_PRIO;
  807. sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
  808. return 0;
  809. }
  810. early_initcall(rcu_spawn_kthreads);
  811. #else /* #ifdef CONFIG_RCU_BOOST */
  812. /* Hold off callback invocation until early_initcall() time. */
  813. static int rcu_scheduler_fully_active __read_mostly;
  814. /*
  815. * Start up softirq processing of callbacks.
  816. */
  817. void invoke_rcu_callbacks(void)
  818. {
  819. if (rcu_scheduler_fully_active)
  820. raise_softirq(RCU_SOFTIRQ);
  821. }
  822. #ifdef CONFIG_RCU_TRACE
  823. /*
  824. * There is no callback kthread, so this thread is never it.
  825. */
  826. static bool rcu_is_callbacks_kthread(void)
  827. {
  828. return false;
  829. }
  830. #endif /* #ifdef CONFIG_RCU_TRACE */
  831. static int __init rcu_scheduler_really_started(void)
  832. {
  833. rcu_scheduler_fully_active = 1;
  834. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  835. raise_softirq(RCU_SOFTIRQ); /* Invoke any callbacks from early boot. */
  836. return 0;
  837. }
  838. early_initcall(rcu_scheduler_really_started);
  839. #endif /* #else #ifdef CONFIG_RCU_BOOST */
  840. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  841. #include <linux/kernel_stat.h>
  842. /*
  843. * During boot, we forgive RCU lockdep issues. After this function is
  844. * invoked, we start taking RCU lockdep issues seriously.
  845. */
  846. void __init rcu_scheduler_starting(void)
  847. {
  848. WARN_ON(nr_context_switches() > 0);
  849. rcu_scheduler_active = 1;
  850. }
  851. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  852. #ifdef CONFIG_RCU_TRACE
  853. #ifdef CONFIG_RCU_BOOST
  854. static void rcu_initiate_boost_trace(void)
  855. {
  856. if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
  857. rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
  858. else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
  859. rcu_preempt_ctrlblk.exp_tasks == NULL)
  860. rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
  861. else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
  862. rcu_preempt_ctrlblk.n_balk_boost_tasks++;
  863. else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
  864. rcu_preempt_ctrlblk.n_balk_notyet++;
  865. else
  866. rcu_preempt_ctrlblk.n_balk_nos++;
  867. }
  868. #endif /* #ifdef CONFIG_RCU_BOOST */
  869. static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
  870. {
  871. unsigned long flags;
  872. raw_local_irq_save(flags);
  873. rcp->qlen -= n;
  874. raw_local_irq_restore(flags);
  875. }
  876. /*
  877. * Dump statistics for TINY_RCU, such as they are.
  878. */
  879. static int show_tiny_stats(struct seq_file *m, void *unused)
  880. {
  881. show_tiny_preempt_stats(m);
  882. seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
  883. seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
  884. return 0;
  885. }
  886. static int show_tiny_stats_open(struct inode *inode, struct file *file)
  887. {
  888. return single_open(file, show_tiny_stats, NULL);
  889. }
  890. static const struct file_operations show_tiny_stats_fops = {
  891. .owner = THIS_MODULE,
  892. .open = show_tiny_stats_open,
  893. .read = seq_read,
  894. .llseek = seq_lseek,
  895. .release = single_release,
  896. };
  897. static struct dentry *rcudir;
  898. static int __init rcutiny_trace_init(void)
  899. {
  900. struct dentry *retval;
  901. rcudir = debugfs_create_dir("rcu", NULL);
  902. if (!rcudir)
  903. goto free_out;
  904. retval = debugfs_create_file("rcudata", 0444, rcudir,
  905. NULL, &show_tiny_stats_fops);
  906. if (!retval)
  907. goto free_out;
  908. return 0;
  909. free_out:
  910. debugfs_remove_recursive(rcudir);
  911. return 1;
  912. }
  913. static void __exit rcutiny_trace_cleanup(void)
  914. {
  915. debugfs_remove_recursive(rcudir);
  916. }
  917. module_init(rcutiny_trace_init);
  918. module_exit(rcutiny_trace_cleanup);
  919. MODULE_AUTHOR("Paul E. McKenney");
  920. MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
  921. MODULE_LICENSE("GPL");
  922. #endif /* #ifdef CONFIG_RCU_TRACE */