rcutiny.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  21. *
  22. * For detailed explanation of Read-Copy Update mechanism see -
  23. * Documentation/RCU
  24. */
  25. #include <linux/completion.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/notifier.h>
  28. #include <linux/rcupdate.h>
  29. #include <linux/kernel.h>
  30. #include <linux/export.h>
  31. #include <linux/mutex.h>
  32. #include <linux/sched.h>
  33. #include <linux/types.h>
  34. #include <linux/init.h>
  35. #include <linux/time.h>
  36. #include <linux/cpu.h>
  37. #include <linux/prefetch.h>
  38. #ifdef CONFIG_RCU_TRACE
  39. #include <trace/events/rcu.h>
  40. #endif /* #else #ifdef CONFIG_RCU_TRACE */
  41. #include "rcu.h"
  42. /* Forward declarations for rcutiny_plugin.h. */
  43. struct rcu_ctrlblk;
  44. static void invoke_rcu_callbacks(void);
  45. static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
  46. static void rcu_process_callbacks(struct softirq_action *unused);
  47. static void __call_rcu(struct rcu_head *head,
  48. void (*func)(struct rcu_head *rcu),
  49. struct rcu_ctrlblk *rcp);
  50. #include "rcutiny_plugin.h"
  51. static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  52. /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
  53. static void rcu_idle_enter_common(long long newval)
  54. {
  55. if (newval) {
  56. RCU_TRACE(trace_rcu_dyntick("--=",
  57. rcu_dynticks_nesting, newval));
  58. rcu_dynticks_nesting = newval;
  59. return;
  60. }
  61. RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
  62. if (!is_idle_task(current)) {
  63. struct task_struct *idle = idle_task(smp_processor_id());
  64. RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
  65. rcu_dynticks_nesting, newval));
  66. ftrace_dump(DUMP_ALL);
  67. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  68. current->pid, current->comm,
  69. idle->pid, idle->comm); /* must be idle task! */
  70. }
  71. rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
  72. barrier();
  73. rcu_dynticks_nesting = newval;
  74. }
  75. /*
  76. * Enter idle, which is an extended quiescent state if we have fully
  77. * entered that mode (i.e., if the new value of dynticks_nesting is zero).
  78. */
  79. void rcu_idle_enter(void)
  80. {
  81. unsigned long flags;
  82. long long newval;
  83. local_irq_save(flags);
  84. WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
  85. if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
  86. DYNTICK_TASK_NEST_VALUE)
  87. newval = 0;
  88. else
  89. newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
  90. rcu_idle_enter_common(newval);
  91. local_irq_restore(flags);
  92. }
  93. EXPORT_SYMBOL_GPL(rcu_idle_enter);
  94. /*
  95. * Exit an interrupt handler towards idle.
  96. */
  97. void rcu_irq_exit(void)
  98. {
  99. unsigned long flags;
  100. long long newval;
  101. local_irq_save(flags);
  102. newval = rcu_dynticks_nesting - 1;
  103. WARN_ON_ONCE(newval < 0);
  104. rcu_idle_enter_common(newval);
  105. local_irq_restore(flags);
  106. }
  107. EXPORT_SYMBOL_GPL(rcu_irq_exit);
  108. /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
  109. static void rcu_idle_exit_common(long long oldval)
  110. {
  111. if (oldval) {
  112. RCU_TRACE(trace_rcu_dyntick("++=",
  113. oldval, rcu_dynticks_nesting));
  114. return;
  115. }
  116. RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
  117. if (!is_idle_task(current)) {
  118. struct task_struct *idle = idle_task(smp_processor_id());
  119. RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
  120. oldval, rcu_dynticks_nesting));
  121. ftrace_dump(DUMP_ALL);
  122. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  123. current->pid, current->comm,
  124. idle->pid, idle->comm); /* must be idle task! */
  125. }
  126. }
  127. /*
  128. * Exit idle, so that we are no longer in an extended quiescent state.
  129. */
  130. void rcu_idle_exit(void)
  131. {
  132. unsigned long flags;
  133. long long oldval;
  134. local_irq_save(flags);
  135. oldval = rcu_dynticks_nesting;
  136. WARN_ON_ONCE(rcu_dynticks_nesting < 0);
  137. if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
  138. rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  139. else
  140. rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  141. rcu_idle_exit_common(oldval);
  142. local_irq_restore(flags);
  143. }
  144. EXPORT_SYMBOL_GPL(rcu_idle_exit);
  145. /*
  146. * Enter an interrupt handler, moving away from idle.
  147. */
  148. void rcu_irq_enter(void)
  149. {
  150. unsigned long flags;
  151. long long oldval;
  152. local_irq_save(flags);
  153. oldval = rcu_dynticks_nesting;
  154. rcu_dynticks_nesting++;
  155. WARN_ON_ONCE(rcu_dynticks_nesting == 0);
  156. rcu_idle_exit_common(oldval);
  157. local_irq_restore(flags);
  158. }
  159. EXPORT_SYMBOL_GPL(rcu_irq_enter);
  160. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  161. /*
  162. * Test whether RCU thinks that the current CPU is idle.
  163. */
  164. int rcu_is_cpu_idle(void)
  165. {
  166. return !rcu_dynticks_nesting;
  167. }
  168. EXPORT_SYMBOL(rcu_is_cpu_idle);
  169. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  170. /*
  171. * Test whether the current CPU was interrupted from idle. Nested
  172. * interrupts don't count, we must be running at the first interrupt
  173. * level.
  174. */
  175. int rcu_is_cpu_rrupt_from_idle(void)
  176. {
  177. return rcu_dynticks_nesting <= 0;
  178. }
  179. /*
  180. * Helper function for rcu_sched_qs() and rcu_bh_qs().
  181. * Also irqs are disabled to avoid confusion due to interrupt handlers
  182. * invoking call_rcu().
  183. */
  184. static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
  185. {
  186. if (rcp->rcucblist != NULL &&
  187. rcp->donetail != rcp->curtail) {
  188. rcp->donetail = rcp->curtail;
  189. return 1;
  190. }
  191. return 0;
  192. }
  193. /*
  194. * Record an rcu quiescent state. And an rcu_bh quiescent state while we
  195. * are at it, given that any rcu quiescent state is also an rcu_bh
  196. * quiescent state. Use "+" instead of "||" to defeat short circuiting.
  197. */
  198. void rcu_sched_qs(int cpu)
  199. {
  200. unsigned long flags;
  201. local_irq_save(flags);
  202. if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
  203. rcu_qsctr_help(&rcu_bh_ctrlblk))
  204. invoke_rcu_callbacks();
  205. local_irq_restore(flags);
  206. }
  207. /*
  208. * Record an rcu_bh quiescent state.
  209. */
  210. void rcu_bh_qs(int cpu)
  211. {
  212. unsigned long flags;
  213. local_irq_save(flags);
  214. if (rcu_qsctr_help(&rcu_bh_ctrlblk))
  215. invoke_rcu_callbacks();
  216. local_irq_restore(flags);
  217. }
  218. /*
  219. * Check to see if the scheduling-clock interrupt came from an extended
  220. * quiescent state, and, if so, tell RCU about it. This function must
  221. * be called from hardirq context. It is normally called from the
  222. * scheduling-clock interrupt.
  223. */
  224. void rcu_check_callbacks(int cpu, int user)
  225. {
  226. if (user || rcu_is_cpu_rrupt_from_idle())
  227. rcu_sched_qs(cpu);
  228. else if (!in_softirq())
  229. rcu_bh_qs(cpu);
  230. rcu_preempt_check_callbacks();
  231. }
  232. /*
  233. * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
  234. * whose grace period has elapsed.
  235. */
  236. static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
  237. {
  238. char *rn = NULL;
  239. struct rcu_head *next, *list;
  240. unsigned long flags;
  241. RCU_TRACE(int cb_count = 0);
  242. /* If no RCU callbacks ready to invoke, just return. */
  243. if (&rcp->rcucblist == rcp->donetail) {
  244. RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
  245. RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
  246. ACCESS_ONCE(rcp->rcucblist),
  247. need_resched(),
  248. is_idle_task(current),
  249. rcu_is_callbacks_kthread()));
  250. return;
  251. }
  252. /* Move the ready-to-invoke callbacks to a local list. */
  253. local_irq_save(flags);
  254. RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
  255. list = rcp->rcucblist;
  256. rcp->rcucblist = *rcp->donetail;
  257. *rcp->donetail = NULL;
  258. if (rcp->curtail == rcp->donetail)
  259. rcp->curtail = &rcp->rcucblist;
  260. rcu_preempt_remove_callbacks(rcp);
  261. rcp->donetail = &rcp->rcucblist;
  262. local_irq_restore(flags);
  263. /* Invoke the callbacks on the local list. */
  264. RCU_TRACE(rn = rcp->name);
  265. while (list) {
  266. next = list->next;
  267. prefetch(next);
  268. debug_rcu_head_unqueue(list);
  269. local_bh_disable();
  270. __rcu_reclaim(rn, list);
  271. local_bh_enable();
  272. list = next;
  273. RCU_TRACE(cb_count++);
  274. }
  275. RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
  276. RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
  277. is_idle_task(current),
  278. rcu_is_callbacks_kthread()));
  279. }
  280. static void rcu_process_callbacks(struct softirq_action *unused)
  281. {
  282. __rcu_process_callbacks(&rcu_sched_ctrlblk);
  283. __rcu_process_callbacks(&rcu_bh_ctrlblk);
  284. rcu_preempt_process_callbacks();
  285. }
  286. /*
  287. * Wait for a grace period to elapse. But it is illegal to invoke
  288. * synchronize_sched() from within an RCU read-side critical section.
  289. * Therefore, any legal call to synchronize_sched() is a quiescent
  290. * state, and so on a UP system, synchronize_sched() need do nothing.
  291. * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
  292. * benefits of doing might_sleep() to reduce latency.)
  293. *
  294. * Cool, huh? (Due to Josh Triplett.)
  295. *
  296. * But we want to make this a static inline later. The cond_resched()
  297. * currently makes this problematic.
  298. */
  299. void synchronize_sched(void)
  300. {
  301. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  302. !lock_is_held(&rcu_lock_map) &&
  303. !lock_is_held(&rcu_sched_lock_map),
  304. "Illegal synchronize_sched() in RCU read-side critical section");
  305. cond_resched();
  306. }
  307. EXPORT_SYMBOL_GPL(synchronize_sched);
  308. /*
  309. * Helper function for call_rcu() and call_rcu_bh().
  310. */
  311. static void __call_rcu(struct rcu_head *head,
  312. void (*func)(struct rcu_head *rcu),
  313. struct rcu_ctrlblk *rcp)
  314. {
  315. unsigned long flags;
  316. debug_rcu_head_queue(head);
  317. head->func = func;
  318. head->next = NULL;
  319. local_irq_save(flags);
  320. *rcp->curtail = head;
  321. rcp->curtail = &head->next;
  322. RCU_TRACE(rcp->qlen++);
  323. local_irq_restore(flags);
  324. }
  325. /*
  326. * Post an RCU callback to be invoked after the end of an RCU-sched grace
  327. * period. But since we have but one CPU, that would be after any
  328. * quiescent state.
  329. */
  330. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  331. {
  332. __call_rcu(head, func, &rcu_sched_ctrlblk);
  333. }
  334. EXPORT_SYMBOL_GPL(call_rcu_sched);
  335. /*
  336. * Post an RCU bottom-half callback to be invoked after any subsequent
  337. * quiescent state.
  338. */
  339. void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  340. {
  341. __call_rcu(head, func, &rcu_bh_ctrlblk);
  342. }
  343. EXPORT_SYMBOL_GPL(call_rcu_bh);