rcutorture.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. /*
  2. * Read-Copy Update module-based torture test facility
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2005, 2006
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Josh Triplett <josh@freedesktop.org>
  22. *
  23. * See also: Documentation/RCU/torture.txt
  24. */
  25. #include <linux/types.h>
  26. #include <linux/kernel.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/err.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/smp.h>
  33. #include <linux/rcupdate.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/sched.h>
  36. #include <asm/atomic.h>
  37. #include <linux/bitops.h>
  38. #include <linux/completion.h>
  39. #include <linux/moduleparam.h>
  40. #include <linux/percpu.h>
  41. #include <linux/notifier.h>
  42. #include <linux/freezer.h>
  43. #include <linux/cpu.h>
  44. #include <linux/delay.h>
  45. #include <linux/byteorder/swabb.h>
  46. #include <linux/stat.h>
  47. #include <linux/srcu.h>
  48. #include <linux/slab.h>
  49. MODULE_LICENSE("GPL");
  50. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
  51. "Josh Triplett <josh@freedesktop.org>");
  52. static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
  53. static int nfakewriters = 4; /* # fake writer threads */
  54. static int stat_interval; /* Interval between stats, in seconds. */
  55. /* Defaults to "only at end of test". */
  56. static int verbose; /* Print more debug info. */
  57. static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
  58. static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
  59. static int stutter = 5; /* Start/stop testing interval (in sec) */
  60. static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  61. module_param(nreaders, int, 0444);
  62. MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  63. module_param(nfakewriters, int, 0444);
  64. MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  65. module_param(stat_interval, int, 0444);
  66. MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  67. module_param(verbose, bool, 0444);
  68. MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  69. module_param(test_no_idle_hz, bool, 0444);
  70. MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  71. module_param(shuffle_interval, int, 0444);
  72. MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  73. module_param(stutter, int, 0444);
  74. MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
  75. module_param(torture_type, charp, 0444);
  76. MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
  77. #define TORTURE_FLAG "-torture:"
  78. #define PRINTK_STRING(s) \
  79. do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  80. #define VERBOSE_PRINTK_STRING(s) \
  81. do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  82. #define VERBOSE_PRINTK_ERRSTRING(s) \
  83. do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
  84. static char printk_buf[4096];
  85. static int nrealreaders;
  86. static struct task_struct *writer_task;
  87. static struct task_struct **fakewriter_tasks;
  88. static struct task_struct **reader_tasks;
  89. static struct task_struct *stats_task;
  90. static struct task_struct *shuffler_task;
  91. static struct task_struct *stutter_task;
  92. #define RCU_TORTURE_PIPE_LEN 10
  93. struct rcu_torture {
  94. struct rcu_head rtort_rcu;
  95. int rtort_pipe_count;
  96. struct list_head rtort_free;
  97. int rtort_mbtest;
  98. };
  99. static int fullstop = 0; /* stop generating callbacks at test end. */
  100. static LIST_HEAD(rcu_torture_freelist);
  101. static struct rcu_torture *rcu_torture_current = NULL;
  102. static long rcu_torture_current_version = 0;
  103. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  104. static DEFINE_SPINLOCK(rcu_torture_lock);
  105. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
  106. { 0 };
  107. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
  108. { 0 };
  109. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  110. static atomic_t n_rcu_torture_alloc;
  111. static atomic_t n_rcu_torture_alloc_fail;
  112. static atomic_t n_rcu_torture_free;
  113. static atomic_t n_rcu_torture_mberror;
  114. static atomic_t n_rcu_torture_error;
  115. static struct list_head rcu_torture_removed;
  116. static int stutter_pause_test = 0;
  117. /*
  118. * Allocate an element from the rcu_tortures pool.
  119. */
  120. static struct rcu_torture *
  121. rcu_torture_alloc(void)
  122. {
  123. struct list_head *p;
  124. spin_lock_bh(&rcu_torture_lock);
  125. if (list_empty(&rcu_torture_freelist)) {
  126. atomic_inc(&n_rcu_torture_alloc_fail);
  127. spin_unlock_bh(&rcu_torture_lock);
  128. return NULL;
  129. }
  130. atomic_inc(&n_rcu_torture_alloc);
  131. p = rcu_torture_freelist.next;
  132. list_del_init(p);
  133. spin_unlock_bh(&rcu_torture_lock);
  134. return container_of(p, struct rcu_torture, rtort_free);
  135. }
  136. /*
  137. * Free an element to the rcu_tortures pool.
  138. */
  139. static void
  140. rcu_torture_free(struct rcu_torture *p)
  141. {
  142. atomic_inc(&n_rcu_torture_free);
  143. spin_lock_bh(&rcu_torture_lock);
  144. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  145. spin_unlock_bh(&rcu_torture_lock);
  146. }
  147. struct rcu_random_state {
  148. unsigned long rrs_state;
  149. long rrs_count;
  150. };
  151. #define RCU_RANDOM_MULT 39916801 /* prime */
  152. #define RCU_RANDOM_ADD 479001701 /* prime */
  153. #define RCU_RANDOM_REFRESH 10000
  154. #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
  155. /*
  156. * Crude but fast random-number generator. Uses a linear congruential
  157. * generator, with occasional help from cpu_clock().
  158. */
  159. static unsigned long
  160. rcu_random(struct rcu_random_state *rrsp)
  161. {
  162. if (--rrsp->rrs_count < 0) {
  163. rrsp->rrs_state +=
  164. (unsigned long)cpu_clock(raw_smp_processor_id());
  165. rrsp->rrs_count = RCU_RANDOM_REFRESH;
  166. }
  167. rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
  168. return swahw32(rrsp->rrs_state);
  169. }
  170. static void
  171. rcu_stutter_wait(void)
  172. {
  173. while (stutter_pause_test)
  174. schedule_timeout_interruptible(1);
  175. }
  176. /*
  177. * Operations vector for selecting different types of tests.
  178. */
  179. struct rcu_torture_ops {
  180. void (*init)(void);
  181. void (*cleanup)(void);
  182. int (*readlock)(void);
  183. void (*readdelay)(struct rcu_random_state *rrsp);
  184. void (*readunlock)(int idx);
  185. int (*completed)(void);
  186. void (*deferredfree)(struct rcu_torture *p);
  187. void (*sync)(void);
  188. void (*cb_barrier)(void);
  189. int (*stats)(char *page);
  190. char *name;
  191. };
  192. static struct rcu_torture_ops *cur_ops = NULL;
  193. /*
  194. * Definitions for rcu torture testing.
  195. */
  196. static int rcu_torture_read_lock(void) __acquires(RCU)
  197. {
  198. rcu_read_lock();
  199. return 0;
  200. }
  201. static void rcu_read_delay(struct rcu_random_state *rrsp)
  202. {
  203. long delay;
  204. const long longdelay = 200;
  205. /* We want there to be long-running readers, but not all the time. */
  206. delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
  207. if (!delay)
  208. udelay(longdelay);
  209. }
  210. static void rcu_torture_read_unlock(int idx) __releases(RCU)
  211. {
  212. rcu_read_unlock();
  213. }
  214. static int rcu_torture_completed(void)
  215. {
  216. return rcu_batches_completed();
  217. }
  218. static void
  219. rcu_torture_cb(struct rcu_head *p)
  220. {
  221. int i;
  222. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  223. if (fullstop) {
  224. /* Test is ending, just drop callbacks on the floor. */
  225. /* The next initialization will pick up the pieces. */
  226. return;
  227. }
  228. i = rp->rtort_pipe_count;
  229. if (i > RCU_TORTURE_PIPE_LEN)
  230. i = RCU_TORTURE_PIPE_LEN;
  231. atomic_inc(&rcu_torture_wcount[i]);
  232. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  233. rp->rtort_mbtest = 0;
  234. rcu_torture_free(rp);
  235. } else
  236. cur_ops->deferredfree(rp);
  237. }
  238. static void rcu_torture_deferred_free(struct rcu_torture *p)
  239. {
  240. call_rcu(&p->rtort_rcu, rcu_torture_cb);
  241. }
  242. static struct rcu_torture_ops rcu_ops = {
  243. .init = NULL,
  244. .cleanup = NULL,
  245. .readlock = rcu_torture_read_lock,
  246. .readdelay = rcu_read_delay,
  247. .readunlock = rcu_torture_read_unlock,
  248. .completed = rcu_torture_completed,
  249. .deferredfree = rcu_torture_deferred_free,
  250. .sync = synchronize_rcu,
  251. .cb_barrier = rcu_barrier,
  252. .stats = NULL,
  253. .name = "rcu"
  254. };
  255. static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
  256. {
  257. int i;
  258. struct rcu_torture *rp;
  259. struct rcu_torture *rp1;
  260. cur_ops->sync();
  261. list_add(&p->rtort_free, &rcu_torture_removed);
  262. list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
  263. i = rp->rtort_pipe_count;
  264. if (i > RCU_TORTURE_PIPE_LEN)
  265. i = RCU_TORTURE_PIPE_LEN;
  266. atomic_inc(&rcu_torture_wcount[i]);
  267. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  268. rp->rtort_mbtest = 0;
  269. list_del(&rp->rtort_free);
  270. rcu_torture_free(rp);
  271. }
  272. }
  273. }
  274. static void rcu_sync_torture_init(void)
  275. {
  276. INIT_LIST_HEAD(&rcu_torture_removed);
  277. }
  278. static struct rcu_torture_ops rcu_sync_ops = {
  279. .init = rcu_sync_torture_init,
  280. .cleanup = NULL,
  281. .readlock = rcu_torture_read_lock,
  282. .readdelay = rcu_read_delay,
  283. .readunlock = rcu_torture_read_unlock,
  284. .completed = rcu_torture_completed,
  285. .deferredfree = rcu_sync_torture_deferred_free,
  286. .sync = synchronize_rcu,
  287. .cb_barrier = NULL,
  288. .stats = NULL,
  289. .name = "rcu_sync"
  290. };
  291. /*
  292. * Definitions for rcu_bh torture testing.
  293. */
  294. static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
  295. {
  296. rcu_read_lock_bh();
  297. return 0;
  298. }
  299. static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
  300. {
  301. rcu_read_unlock_bh();
  302. }
  303. static int rcu_bh_torture_completed(void)
  304. {
  305. return rcu_batches_completed_bh();
  306. }
  307. static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
  308. {
  309. call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
  310. }
  311. struct rcu_bh_torture_synchronize {
  312. struct rcu_head head;
  313. struct completion completion;
  314. };
  315. static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
  316. {
  317. struct rcu_bh_torture_synchronize *rcu;
  318. rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
  319. complete(&rcu->completion);
  320. }
  321. static void rcu_bh_torture_synchronize(void)
  322. {
  323. struct rcu_bh_torture_synchronize rcu;
  324. init_completion(&rcu.completion);
  325. call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
  326. wait_for_completion(&rcu.completion);
  327. }
  328. static struct rcu_torture_ops rcu_bh_ops = {
  329. .init = NULL,
  330. .cleanup = NULL,
  331. .readlock = rcu_bh_torture_read_lock,
  332. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  333. .readunlock = rcu_bh_torture_read_unlock,
  334. .completed = rcu_bh_torture_completed,
  335. .deferredfree = rcu_bh_torture_deferred_free,
  336. .sync = rcu_bh_torture_synchronize,
  337. .cb_barrier = rcu_barrier_bh,
  338. .stats = NULL,
  339. .name = "rcu_bh"
  340. };
  341. static struct rcu_torture_ops rcu_bh_sync_ops = {
  342. .init = rcu_sync_torture_init,
  343. .cleanup = NULL,
  344. .readlock = rcu_bh_torture_read_lock,
  345. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  346. .readunlock = rcu_bh_torture_read_unlock,
  347. .completed = rcu_bh_torture_completed,
  348. .deferredfree = rcu_sync_torture_deferred_free,
  349. .sync = rcu_bh_torture_synchronize,
  350. .cb_barrier = NULL,
  351. .stats = NULL,
  352. .name = "rcu_bh_sync"
  353. };
  354. /*
  355. * Definitions for srcu torture testing.
  356. */
  357. static struct srcu_struct srcu_ctl;
  358. static void srcu_torture_init(void)
  359. {
  360. init_srcu_struct(&srcu_ctl);
  361. rcu_sync_torture_init();
  362. }
  363. static void srcu_torture_cleanup(void)
  364. {
  365. synchronize_srcu(&srcu_ctl);
  366. cleanup_srcu_struct(&srcu_ctl);
  367. }
  368. static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
  369. {
  370. return srcu_read_lock(&srcu_ctl);
  371. }
  372. static void srcu_read_delay(struct rcu_random_state *rrsp)
  373. {
  374. long delay;
  375. const long uspertick = 1000000 / HZ;
  376. const long longdelay = 10;
  377. /* We want there to be long-running readers, but not all the time. */
  378. delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
  379. if (!delay)
  380. schedule_timeout_interruptible(longdelay);
  381. }
  382. static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
  383. {
  384. srcu_read_unlock(&srcu_ctl, idx);
  385. }
  386. static int srcu_torture_completed(void)
  387. {
  388. return srcu_batches_completed(&srcu_ctl);
  389. }
  390. static void srcu_torture_synchronize(void)
  391. {
  392. synchronize_srcu(&srcu_ctl);
  393. }
  394. static int srcu_torture_stats(char *page)
  395. {
  396. int cnt = 0;
  397. int cpu;
  398. int idx = srcu_ctl.completed & 0x1;
  399. cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
  400. torture_type, TORTURE_FLAG, idx);
  401. for_each_possible_cpu(cpu) {
  402. cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
  403. per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
  404. per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
  405. }
  406. cnt += sprintf(&page[cnt], "\n");
  407. return cnt;
  408. }
  409. static struct rcu_torture_ops srcu_ops = {
  410. .init = srcu_torture_init,
  411. .cleanup = srcu_torture_cleanup,
  412. .readlock = srcu_torture_read_lock,
  413. .readdelay = srcu_read_delay,
  414. .readunlock = srcu_torture_read_unlock,
  415. .completed = srcu_torture_completed,
  416. .deferredfree = rcu_sync_torture_deferred_free,
  417. .sync = srcu_torture_synchronize,
  418. .cb_barrier = NULL,
  419. .stats = srcu_torture_stats,
  420. .name = "srcu"
  421. };
  422. /*
  423. * Definitions for sched torture testing.
  424. */
  425. static int sched_torture_read_lock(void)
  426. {
  427. preempt_disable();
  428. return 0;
  429. }
  430. static void sched_torture_read_unlock(int idx)
  431. {
  432. preempt_enable();
  433. }
  434. static int sched_torture_completed(void)
  435. {
  436. return 0;
  437. }
  438. static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
  439. {
  440. call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
  441. }
  442. static void sched_torture_synchronize(void)
  443. {
  444. synchronize_sched();
  445. }
  446. static struct rcu_torture_ops sched_ops = {
  447. .init = rcu_sync_torture_init,
  448. .cleanup = NULL,
  449. .readlock = sched_torture_read_lock,
  450. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  451. .readunlock = sched_torture_read_unlock,
  452. .completed = sched_torture_completed,
  453. .deferredfree = rcu_sched_torture_deferred_free,
  454. .sync = sched_torture_synchronize,
  455. .cb_barrier = rcu_barrier_sched,
  456. .stats = NULL,
  457. .name = "sched"
  458. };
  459. static struct rcu_torture_ops sched_ops_sync = {
  460. .init = rcu_sync_torture_init,
  461. .cleanup = NULL,
  462. .readlock = sched_torture_read_lock,
  463. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  464. .readunlock = sched_torture_read_unlock,
  465. .completed = sched_torture_completed,
  466. .deferredfree = rcu_sync_torture_deferred_free,
  467. .sync = sched_torture_synchronize,
  468. .cb_barrier = NULL,
  469. .stats = NULL,
  470. .name = "sched_sync"
  471. };
  472. /*
  473. * RCU torture writer kthread. Repeatedly substitutes a new structure
  474. * for that pointed to by rcu_torture_current, freeing the old structure
  475. * after a series of grace periods (the "pipeline").
  476. */
  477. static int
  478. rcu_torture_writer(void *arg)
  479. {
  480. int i;
  481. long oldbatch = rcu_batches_completed();
  482. struct rcu_torture *rp;
  483. struct rcu_torture *old_rp;
  484. static DEFINE_RCU_RANDOM(rand);
  485. VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
  486. set_user_nice(current, 19);
  487. do {
  488. schedule_timeout_uninterruptible(1);
  489. if ((rp = rcu_torture_alloc()) == NULL)
  490. continue;
  491. rp->rtort_pipe_count = 0;
  492. udelay(rcu_random(&rand) & 0x3ff);
  493. old_rp = rcu_torture_current;
  494. rp->rtort_mbtest = 1;
  495. rcu_assign_pointer(rcu_torture_current, rp);
  496. smp_wmb();
  497. if (old_rp) {
  498. i = old_rp->rtort_pipe_count;
  499. if (i > RCU_TORTURE_PIPE_LEN)
  500. i = RCU_TORTURE_PIPE_LEN;
  501. atomic_inc(&rcu_torture_wcount[i]);
  502. old_rp->rtort_pipe_count++;
  503. cur_ops->deferredfree(old_rp);
  504. }
  505. rcu_torture_current_version++;
  506. oldbatch = cur_ops->completed();
  507. rcu_stutter_wait();
  508. } while (!kthread_should_stop() && !fullstop);
  509. VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
  510. while (!kthread_should_stop())
  511. schedule_timeout_uninterruptible(1);
  512. return 0;
  513. }
  514. /*
  515. * RCU torture fake writer kthread. Repeatedly calls sync, with a random
  516. * delay between calls.
  517. */
  518. static int
  519. rcu_torture_fakewriter(void *arg)
  520. {
  521. DEFINE_RCU_RANDOM(rand);
  522. VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
  523. set_user_nice(current, 19);
  524. do {
  525. schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
  526. udelay(rcu_random(&rand) & 0x3ff);
  527. cur_ops->sync();
  528. rcu_stutter_wait();
  529. } while (!kthread_should_stop() && !fullstop);
  530. VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
  531. while (!kthread_should_stop())
  532. schedule_timeout_uninterruptible(1);
  533. return 0;
  534. }
  535. /*
  536. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  537. * incrementing the corresponding element of the pipeline array. The
  538. * counter in the element should never be greater than 1, otherwise, the
  539. * RCU implementation is broken.
  540. */
  541. static int
  542. rcu_torture_reader(void *arg)
  543. {
  544. int completed;
  545. int idx;
  546. DEFINE_RCU_RANDOM(rand);
  547. struct rcu_torture *p;
  548. int pipe_count;
  549. VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
  550. set_user_nice(current, 19);
  551. do {
  552. idx = cur_ops->readlock();
  553. completed = cur_ops->completed();
  554. p = rcu_dereference(rcu_torture_current);
  555. if (p == NULL) {
  556. /* Wait for rcu_torture_writer to get underway */
  557. cur_ops->readunlock(idx);
  558. schedule_timeout_interruptible(HZ);
  559. continue;
  560. }
  561. if (p->rtort_mbtest == 0)
  562. atomic_inc(&n_rcu_torture_mberror);
  563. cur_ops->readdelay(&rand);
  564. preempt_disable();
  565. pipe_count = p->rtort_pipe_count;
  566. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  567. /* Should not happen, but... */
  568. pipe_count = RCU_TORTURE_PIPE_LEN;
  569. }
  570. ++__get_cpu_var(rcu_torture_count)[pipe_count];
  571. completed = cur_ops->completed() - completed;
  572. if (completed > RCU_TORTURE_PIPE_LEN) {
  573. /* Should not happen, but... */
  574. completed = RCU_TORTURE_PIPE_LEN;
  575. }
  576. ++__get_cpu_var(rcu_torture_batch)[completed];
  577. preempt_enable();
  578. cur_ops->readunlock(idx);
  579. schedule();
  580. rcu_stutter_wait();
  581. } while (!kthread_should_stop() && !fullstop);
  582. VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
  583. while (!kthread_should_stop())
  584. schedule_timeout_uninterruptible(1);
  585. return 0;
  586. }
  587. /*
  588. * Create an RCU-torture statistics message in the specified buffer.
  589. */
  590. static int
  591. rcu_torture_printk(char *page)
  592. {
  593. int cnt = 0;
  594. int cpu;
  595. int i;
  596. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  597. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  598. for_each_possible_cpu(cpu) {
  599. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  600. pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
  601. batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
  602. }
  603. }
  604. for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
  605. if (pipesummary[i] != 0)
  606. break;
  607. }
  608. cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
  609. cnt += sprintf(&page[cnt],
  610. "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
  611. "rtmbe: %d",
  612. rcu_torture_current,
  613. rcu_torture_current_version,
  614. list_empty(&rcu_torture_freelist),
  615. atomic_read(&n_rcu_torture_alloc),
  616. atomic_read(&n_rcu_torture_alloc_fail),
  617. atomic_read(&n_rcu_torture_free),
  618. atomic_read(&n_rcu_torture_mberror));
  619. if (atomic_read(&n_rcu_torture_mberror) != 0)
  620. cnt += sprintf(&page[cnt], " !!!");
  621. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  622. if (i > 1) {
  623. cnt += sprintf(&page[cnt], "!!! ");
  624. atomic_inc(&n_rcu_torture_error);
  625. WARN_ON_ONCE(1);
  626. }
  627. cnt += sprintf(&page[cnt], "Reader Pipe: ");
  628. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  629. cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
  630. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  631. cnt += sprintf(&page[cnt], "Reader Batch: ");
  632. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  633. cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
  634. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  635. cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
  636. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  637. cnt += sprintf(&page[cnt], " %d",
  638. atomic_read(&rcu_torture_wcount[i]));
  639. }
  640. cnt += sprintf(&page[cnt], "\n");
  641. if (cur_ops->stats)
  642. cnt += cur_ops->stats(&page[cnt]);
  643. return cnt;
  644. }
  645. /*
  646. * Print torture statistics. Caller must ensure that there is only
  647. * one call to this function at a given time!!! This is normally
  648. * accomplished by relying on the module system to only have one copy
  649. * of the module loaded, and then by giving the rcu_torture_stats
  650. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  651. * thread is not running).
  652. */
  653. static void
  654. rcu_torture_stats_print(void)
  655. {
  656. int cnt;
  657. cnt = rcu_torture_printk(printk_buf);
  658. printk(KERN_ALERT "%s", printk_buf);
  659. }
  660. /*
  661. * Periodically prints torture statistics, if periodic statistics printing
  662. * was specified via the stat_interval module parameter.
  663. *
  664. * No need to worry about fullstop here, since this one doesn't reference
  665. * volatile state or register callbacks.
  666. */
  667. static int
  668. rcu_torture_stats(void *arg)
  669. {
  670. VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
  671. do {
  672. schedule_timeout_interruptible(stat_interval * HZ);
  673. rcu_torture_stats_print();
  674. } while (!kthread_should_stop());
  675. VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
  676. return 0;
  677. }
  678. static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
  679. /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
  680. * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
  681. */
  682. static void rcu_torture_shuffle_tasks(void)
  683. {
  684. cpumask_t tmp_mask;
  685. int i;
  686. cpus_setall(tmp_mask);
  687. get_online_cpus();
  688. /* No point in shuffling if there is only one online CPU (ex: UP) */
  689. if (num_online_cpus() == 1) {
  690. put_online_cpus();
  691. return;
  692. }
  693. if (rcu_idle_cpu != -1)
  694. cpu_clear(rcu_idle_cpu, tmp_mask);
  695. set_cpus_allowed_ptr(current, &tmp_mask);
  696. if (reader_tasks) {
  697. for (i = 0; i < nrealreaders; i++)
  698. if (reader_tasks[i])
  699. set_cpus_allowed_ptr(reader_tasks[i],
  700. &tmp_mask);
  701. }
  702. if (fakewriter_tasks) {
  703. for (i = 0; i < nfakewriters; i++)
  704. if (fakewriter_tasks[i])
  705. set_cpus_allowed_ptr(fakewriter_tasks[i],
  706. &tmp_mask);
  707. }
  708. if (writer_task)
  709. set_cpus_allowed_ptr(writer_task, &tmp_mask);
  710. if (stats_task)
  711. set_cpus_allowed_ptr(stats_task, &tmp_mask);
  712. if (rcu_idle_cpu == -1)
  713. rcu_idle_cpu = num_online_cpus() - 1;
  714. else
  715. rcu_idle_cpu--;
  716. put_online_cpus();
  717. }
  718. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  719. * system to become idle at a time and cut off its timer ticks. This is meant
  720. * to test the support for such tickless idle CPU in RCU.
  721. */
  722. static int
  723. rcu_torture_shuffle(void *arg)
  724. {
  725. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
  726. do {
  727. schedule_timeout_interruptible(shuffle_interval * HZ);
  728. rcu_torture_shuffle_tasks();
  729. } while (!kthread_should_stop());
  730. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
  731. return 0;
  732. }
  733. /* Cause the rcutorture test to "stutter", starting and stopping all
  734. * threads periodically.
  735. */
  736. static int
  737. rcu_torture_stutter(void *arg)
  738. {
  739. VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
  740. do {
  741. schedule_timeout_interruptible(stutter * HZ);
  742. stutter_pause_test = 1;
  743. if (!kthread_should_stop())
  744. schedule_timeout_interruptible(stutter * HZ);
  745. stutter_pause_test = 0;
  746. } while (!kthread_should_stop());
  747. VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
  748. return 0;
  749. }
  750. static inline void
  751. rcu_torture_print_module_parms(char *tag)
  752. {
  753. printk(KERN_ALERT "%s" TORTURE_FLAG
  754. "--- %s: nreaders=%d nfakewriters=%d "
  755. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  756. "shuffle_interval=%d stutter=%d\n",
  757. torture_type, tag, nrealreaders, nfakewriters,
  758. stat_interval, verbose, test_no_idle_hz, shuffle_interval,
  759. stutter);
  760. }
  761. static void
  762. rcu_torture_cleanup(void)
  763. {
  764. int i;
  765. fullstop = 1;
  766. if (stutter_task) {
  767. VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
  768. kthread_stop(stutter_task);
  769. }
  770. stutter_task = NULL;
  771. if (shuffler_task) {
  772. VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
  773. kthread_stop(shuffler_task);
  774. }
  775. shuffler_task = NULL;
  776. if (writer_task) {
  777. VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
  778. kthread_stop(writer_task);
  779. }
  780. writer_task = NULL;
  781. if (reader_tasks) {
  782. for (i = 0; i < nrealreaders; i++) {
  783. if (reader_tasks[i]) {
  784. VERBOSE_PRINTK_STRING(
  785. "Stopping rcu_torture_reader task");
  786. kthread_stop(reader_tasks[i]);
  787. }
  788. reader_tasks[i] = NULL;
  789. }
  790. kfree(reader_tasks);
  791. reader_tasks = NULL;
  792. }
  793. rcu_torture_current = NULL;
  794. if (fakewriter_tasks) {
  795. for (i = 0; i < nfakewriters; i++) {
  796. if (fakewriter_tasks[i]) {
  797. VERBOSE_PRINTK_STRING(
  798. "Stopping rcu_torture_fakewriter task");
  799. kthread_stop(fakewriter_tasks[i]);
  800. }
  801. fakewriter_tasks[i] = NULL;
  802. }
  803. kfree(fakewriter_tasks);
  804. fakewriter_tasks = NULL;
  805. }
  806. if (stats_task) {
  807. VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
  808. kthread_stop(stats_task);
  809. }
  810. stats_task = NULL;
  811. /* Wait for all RCU callbacks to fire. */
  812. if (cur_ops->cb_barrier != NULL)
  813. cur_ops->cb_barrier();
  814. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  815. if (cur_ops->cleanup)
  816. cur_ops->cleanup();
  817. if (atomic_read(&n_rcu_torture_error))
  818. rcu_torture_print_module_parms("End of test: FAILURE");
  819. else
  820. rcu_torture_print_module_parms("End of test: SUCCESS");
  821. }
  822. static int __init
  823. rcu_torture_init(void)
  824. {
  825. int i;
  826. int cpu;
  827. int firsterr = 0;
  828. static struct rcu_torture_ops *torture_ops[] =
  829. { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
  830. &srcu_ops, &sched_ops, &sched_ops_sync, };
  831. /* Process args and tell the world that the torturer is on the job. */
  832. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  833. cur_ops = torture_ops[i];
  834. if (strcmp(torture_type, cur_ops->name) == 0)
  835. break;
  836. }
  837. if (i == ARRAY_SIZE(torture_ops)) {
  838. printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
  839. torture_type);
  840. return (-EINVAL);
  841. }
  842. if (cur_ops->init)
  843. cur_ops->init(); /* no "goto unwind" prior to this point!!! */
  844. if (nreaders >= 0)
  845. nrealreaders = nreaders;
  846. else
  847. nrealreaders = 2 * num_online_cpus();
  848. rcu_torture_print_module_parms("Start of test");
  849. fullstop = 0;
  850. /* Set up the freelist. */
  851. INIT_LIST_HEAD(&rcu_torture_freelist);
  852. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
  853. rcu_tortures[i].rtort_mbtest = 0;
  854. list_add_tail(&rcu_tortures[i].rtort_free,
  855. &rcu_torture_freelist);
  856. }
  857. /* Initialize the statistics so that each run gets its own numbers. */
  858. rcu_torture_current = NULL;
  859. rcu_torture_current_version = 0;
  860. atomic_set(&n_rcu_torture_alloc, 0);
  861. atomic_set(&n_rcu_torture_alloc_fail, 0);
  862. atomic_set(&n_rcu_torture_free, 0);
  863. atomic_set(&n_rcu_torture_mberror, 0);
  864. atomic_set(&n_rcu_torture_error, 0);
  865. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  866. atomic_set(&rcu_torture_wcount[i], 0);
  867. for_each_possible_cpu(cpu) {
  868. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  869. per_cpu(rcu_torture_count, cpu)[i] = 0;
  870. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  871. }
  872. }
  873. /* Start up the kthreads. */
  874. VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
  875. writer_task = kthread_run(rcu_torture_writer, NULL,
  876. "rcu_torture_writer");
  877. if (IS_ERR(writer_task)) {
  878. firsterr = PTR_ERR(writer_task);
  879. VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
  880. writer_task = NULL;
  881. goto unwind;
  882. }
  883. fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
  884. GFP_KERNEL);
  885. if (fakewriter_tasks == NULL) {
  886. VERBOSE_PRINTK_ERRSTRING("out of memory");
  887. firsterr = -ENOMEM;
  888. goto unwind;
  889. }
  890. for (i = 0; i < nfakewriters; i++) {
  891. VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
  892. fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
  893. "rcu_torture_fakewriter");
  894. if (IS_ERR(fakewriter_tasks[i])) {
  895. firsterr = PTR_ERR(fakewriter_tasks[i]);
  896. VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
  897. fakewriter_tasks[i] = NULL;
  898. goto unwind;
  899. }
  900. }
  901. reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
  902. GFP_KERNEL);
  903. if (reader_tasks == NULL) {
  904. VERBOSE_PRINTK_ERRSTRING("out of memory");
  905. firsterr = -ENOMEM;
  906. goto unwind;
  907. }
  908. for (i = 0; i < nrealreaders; i++) {
  909. VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
  910. reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
  911. "rcu_torture_reader");
  912. if (IS_ERR(reader_tasks[i])) {
  913. firsterr = PTR_ERR(reader_tasks[i]);
  914. VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
  915. reader_tasks[i] = NULL;
  916. goto unwind;
  917. }
  918. }
  919. if (stat_interval > 0) {
  920. VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
  921. stats_task = kthread_run(rcu_torture_stats, NULL,
  922. "rcu_torture_stats");
  923. if (IS_ERR(stats_task)) {
  924. firsterr = PTR_ERR(stats_task);
  925. VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
  926. stats_task = NULL;
  927. goto unwind;
  928. }
  929. }
  930. if (test_no_idle_hz) {
  931. rcu_idle_cpu = num_online_cpus() - 1;
  932. /* Create the shuffler thread */
  933. shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
  934. "rcu_torture_shuffle");
  935. if (IS_ERR(shuffler_task)) {
  936. firsterr = PTR_ERR(shuffler_task);
  937. VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
  938. shuffler_task = NULL;
  939. goto unwind;
  940. }
  941. }
  942. if (stutter < 0)
  943. stutter = 0;
  944. if (stutter) {
  945. /* Create the stutter thread */
  946. stutter_task = kthread_run(rcu_torture_stutter, NULL,
  947. "rcu_torture_stutter");
  948. if (IS_ERR(stutter_task)) {
  949. firsterr = PTR_ERR(stutter_task);
  950. VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
  951. stutter_task = NULL;
  952. goto unwind;
  953. }
  954. }
  955. return 0;
  956. unwind:
  957. rcu_torture_cleanup();
  958. return firsterr;
  959. }
  960. module_init(rcu_torture_init);
  961. module_exit(rcu_torture_cleanup);