rcutorture.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. /*
  2. * Read-Copy Update module-based torture test facility
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2005, 2006
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. * Josh Triplett <josh@freedesktop.org>
  22. *
  23. * See also: Documentation/RCU/torture.txt
  24. */
  25. #include <linux/types.h>
  26. #include <linux/kernel.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/err.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/smp.h>
  33. #include <linux/rcupdate.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/sched.h>
  36. #include <asm/atomic.h>
  37. #include <linux/bitops.h>
  38. #include <linux/completion.h>
  39. #include <linux/moduleparam.h>
  40. #include <linux/percpu.h>
  41. #include <linux/notifier.h>
  42. #include <linux/freezer.h>
  43. #include <linux/cpu.h>
  44. #include <linux/delay.h>
  45. #include <linux/byteorder/swabb.h>
  46. #include <linux/stat.h>
  47. #include <linux/srcu.h>
  48. #include <linux/slab.h>
  49. MODULE_LICENSE("GPL");
  50. MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
  51. "Josh Triplett <josh@freedesktop.org>");
  52. static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
  53. static int nfakewriters = 4; /* # fake writer threads */
  54. static int stat_interval; /* Interval between stats, in seconds. */
  55. /* Defaults to "only at end of test". */
  56. static int verbose; /* Print more debug info. */
  57. static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
  58. static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
  59. static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  60. module_param(nreaders, int, 0444);
  61. MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  62. module_param(nfakewriters, int, 0444);
  63. MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  64. module_param(stat_interval, int, 0444);
  65. MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  66. module_param(verbose, bool, 0444);
  67. MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  68. module_param(test_no_idle_hz, bool, 0444);
  69. MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  70. module_param(shuffle_interval, int, 0444);
  71. MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  72. module_param(torture_type, charp, 0444);
  73. MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
  74. #define TORTURE_FLAG "-torture:"
  75. #define PRINTK_STRING(s) \
  76. do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  77. #define VERBOSE_PRINTK_STRING(s) \
  78. do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  79. #define VERBOSE_PRINTK_ERRSTRING(s) \
  80. do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
  81. static char printk_buf[4096];
  82. static int nrealreaders;
  83. static struct task_struct *writer_task;
  84. static struct task_struct **fakewriter_tasks;
  85. static struct task_struct **reader_tasks;
  86. static struct task_struct *stats_task;
  87. static struct task_struct *shuffler_task;
  88. #define RCU_TORTURE_PIPE_LEN 10
  89. struct rcu_torture {
  90. struct rcu_head rtort_rcu;
  91. int rtort_pipe_count;
  92. struct list_head rtort_free;
  93. int rtort_mbtest;
  94. };
  95. static int fullstop = 0; /* stop generating callbacks at test end. */
  96. static LIST_HEAD(rcu_torture_freelist);
  97. static struct rcu_torture *rcu_torture_current = NULL;
  98. static long rcu_torture_current_version = 0;
  99. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  100. static DEFINE_SPINLOCK(rcu_torture_lock);
  101. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
  102. { 0 };
  103. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
  104. { 0 };
  105. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  106. static atomic_t n_rcu_torture_alloc;
  107. static atomic_t n_rcu_torture_alloc_fail;
  108. static atomic_t n_rcu_torture_free;
  109. static atomic_t n_rcu_torture_mberror;
  110. static atomic_t n_rcu_torture_error;
  111. static struct list_head rcu_torture_removed;
  112. /*
  113. * Allocate an element from the rcu_tortures pool.
  114. */
  115. static struct rcu_torture *
  116. rcu_torture_alloc(void)
  117. {
  118. struct list_head *p;
  119. spin_lock_bh(&rcu_torture_lock);
  120. if (list_empty(&rcu_torture_freelist)) {
  121. atomic_inc(&n_rcu_torture_alloc_fail);
  122. spin_unlock_bh(&rcu_torture_lock);
  123. return NULL;
  124. }
  125. atomic_inc(&n_rcu_torture_alloc);
  126. p = rcu_torture_freelist.next;
  127. list_del_init(p);
  128. spin_unlock_bh(&rcu_torture_lock);
  129. return container_of(p, struct rcu_torture, rtort_free);
  130. }
  131. /*
  132. * Free an element to the rcu_tortures pool.
  133. */
  134. static void
  135. rcu_torture_free(struct rcu_torture *p)
  136. {
  137. atomic_inc(&n_rcu_torture_free);
  138. spin_lock_bh(&rcu_torture_lock);
  139. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  140. spin_unlock_bh(&rcu_torture_lock);
  141. }
  142. struct rcu_random_state {
  143. unsigned long rrs_state;
  144. long rrs_count;
  145. };
  146. #define RCU_RANDOM_MULT 39916801 /* prime */
  147. #define RCU_RANDOM_ADD 479001701 /* prime */
  148. #define RCU_RANDOM_REFRESH 10000
  149. #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
  150. /*
  151. * Crude but fast random-number generator. Uses a linear congruential
  152. * generator, with occasional help from cpu_clock().
  153. */
  154. static unsigned long
  155. rcu_random(struct rcu_random_state *rrsp)
  156. {
  157. if (--rrsp->rrs_count < 0) {
  158. rrsp->rrs_state +=
  159. (unsigned long)cpu_clock(raw_smp_processor_id());
  160. rrsp->rrs_count = RCU_RANDOM_REFRESH;
  161. }
  162. rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
  163. return swahw32(rrsp->rrs_state);
  164. }
  165. /*
  166. * Operations vector for selecting different types of tests.
  167. */
  168. struct rcu_torture_ops {
  169. void (*init)(void);
  170. void (*cleanup)(void);
  171. int (*readlock)(void);
  172. void (*readdelay)(struct rcu_random_state *rrsp);
  173. void (*readunlock)(int idx);
  174. int (*completed)(void);
  175. void (*deferredfree)(struct rcu_torture *p);
  176. void (*sync)(void);
  177. int (*stats)(char *page);
  178. char *name;
  179. };
  180. static struct rcu_torture_ops *cur_ops = NULL;
  181. /*
  182. * Definitions for rcu torture testing.
  183. */
  184. static int rcu_torture_read_lock(void) __acquires(RCU)
  185. {
  186. rcu_read_lock();
  187. return 0;
  188. }
  189. static void rcu_read_delay(struct rcu_random_state *rrsp)
  190. {
  191. long delay;
  192. const long longdelay = 200;
  193. /* We want there to be long-running readers, but not all the time. */
  194. delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
  195. if (!delay)
  196. udelay(longdelay);
  197. }
  198. static void rcu_torture_read_unlock(int idx) __releases(RCU)
  199. {
  200. rcu_read_unlock();
  201. }
  202. static int rcu_torture_completed(void)
  203. {
  204. return rcu_batches_completed();
  205. }
  206. static void
  207. rcu_torture_cb(struct rcu_head *p)
  208. {
  209. int i;
  210. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  211. if (fullstop) {
  212. /* Test is ending, just drop callbacks on the floor. */
  213. /* The next initialization will pick up the pieces. */
  214. return;
  215. }
  216. i = rp->rtort_pipe_count;
  217. if (i > RCU_TORTURE_PIPE_LEN)
  218. i = RCU_TORTURE_PIPE_LEN;
  219. atomic_inc(&rcu_torture_wcount[i]);
  220. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  221. rp->rtort_mbtest = 0;
  222. rcu_torture_free(rp);
  223. } else
  224. cur_ops->deferredfree(rp);
  225. }
  226. static void rcu_torture_deferred_free(struct rcu_torture *p)
  227. {
  228. call_rcu(&p->rtort_rcu, rcu_torture_cb);
  229. }
  230. static struct rcu_torture_ops rcu_ops = {
  231. .init = NULL,
  232. .cleanup = NULL,
  233. .readlock = rcu_torture_read_lock,
  234. .readdelay = rcu_read_delay,
  235. .readunlock = rcu_torture_read_unlock,
  236. .completed = rcu_torture_completed,
  237. .deferredfree = rcu_torture_deferred_free,
  238. .sync = synchronize_rcu,
  239. .stats = NULL,
  240. .name = "rcu"
  241. };
  242. static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
  243. {
  244. int i;
  245. struct rcu_torture *rp;
  246. struct rcu_torture *rp1;
  247. cur_ops->sync();
  248. list_add(&p->rtort_free, &rcu_torture_removed);
  249. list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
  250. i = rp->rtort_pipe_count;
  251. if (i > RCU_TORTURE_PIPE_LEN)
  252. i = RCU_TORTURE_PIPE_LEN;
  253. atomic_inc(&rcu_torture_wcount[i]);
  254. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  255. rp->rtort_mbtest = 0;
  256. list_del(&rp->rtort_free);
  257. rcu_torture_free(rp);
  258. }
  259. }
  260. }
  261. static void rcu_sync_torture_init(void)
  262. {
  263. INIT_LIST_HEAD(&rcu_torture_removed);
  264. }
  265. static struct rcu_torture_ops rcu_sync_ops = {
  266. .init = rcu_sync_torture_init,
  267. .cleanup = NULL,
  268. .readlock = rcu_torture_read_lock,
  269. .readdelay = rcu_read_delay,
  270. .readunlock = rcu_torture_read_unlock,
  271. .completed = rcu_torture_completed,
  272. .deferredfree = rcu_sync_torture_deferred_free,
  273. .sync = synchronize_rcu,
  274. .stats = NULL,
  275. .name = "rcu_sync"
  276. };
  277. /*
  278. * Definitions for rcu_bh torture testing.
  279. */
  280. static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
  281. {
  282. rcu_read_lock_bh();
  283. return 0;
  284. }
  285. static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
  286. {
  287. rcu_read_unlock_bh();
  288. }
  289. static int rcu_bh_torture_completed(void)
  290. {
  291. return rcu_batches_completed_bh();
  292. }
  293. static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
  294. {
  295. call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
  296. }
  297. struct rcu_bh_torture_synchronize {
  298. struct rcu_head head;
  299. struct completion completion;
  300. };
  301. static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
  302. {
  303. struct rcu_bh_torture_synchronize *rcu;
  304. rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
  305. complete(&rcu->completion);
  306. }
  307. static void rcu_bh_torture_synchronize(void)
  308. {
  309. struct rcu_bh_torture_synchronize rcu;
  310. init_completion(&rcu.completion);
  311. call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
  312. wait_for_completion(&rcu.completion);
  313. }
  314. static struct rcu_torture_ops rcu_bh_ops = {
  315. .init = NULL,
  316. .cleanup = NULL,
  317. .readlock = rcu_bh_torture_read_lock,
  318. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  319. .readunlock = rcu_bh_torture_read_unlock,
  320. .completed = rcu_bh_torture_completed,
  321. .deferredfree = rcu_bh_torture_deferred_free,
  322. .sync = rcu_bh_torture_synchronize,
  323. .stats = NULL,
  324. .name = "rcu_bh"
  325. };
  326. static struct rcu_torture_ops rcu_bh_sync_ops = {
  327. .init = rcu_sync_torture_init,
  328. .cleanup = NULL,
  329. .readlock = rcu_bh_torture_read_lock,
  330. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  331. .readunlock = rcu_bh_torture_read_unlock,
  332. .completed = rcu_bh_torture_completed,
  333. .deferredfree = rcu_sync_torture_deferred_free,
  334. .sync = rcu_bh_torture_synchronize,
  335. .stats = NULL,
  336. .name = "rcu_bh_sync"
  337. };
  338. /*
  339. * Definitions for srcu torture testing.
  340. */
  341. static struct srcu_struct srcu_ctl;
  342. static void srcu_torture_init(void)
  343. {
  344. init_srcu_struct(&srcu_ctl);
  345. rcu_sync_torture_init();
  346. }
  347. static void srcu_torture_cleanup(void)
  348. {
  349. synchronize_srcu(&srcu_ctl);
  350. cleanup_srcu_struct(&srcu_ctl);
  351. }
  352. static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
  353. {
  354. return srcu_read_lock(&srcu_ctl);
  355. }
  356. static void srcu_read_delay(struct rcu_random_state *rrsp)
  357. {
  358. long delay;
  359. const long uspertick = 1000000 / HZ;
  360. const long longdelay = 10;
  361. /* We want there to be long-running readers, but not all the time. */
  362. delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
  363. if (!delay)
  364. schedule_timeout_interruptible(longdelay);
  365. }
  366. static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
  367. {
  368. srcu_read_unlock(&srcu_ctl, idx);
  369. }
  370. static int srcu_torture_completed(void)
  371. {
  372. return srcu_batches_completed(&srcu_ctl);
  373. }
  374. static void srcu_torture_synchronize(void)
  375. {
  376. synchronize_srcu(&srcu_ctl);
  377. }
  378. static int srcu_torture_stats(char *page)
  379. {
  380. int cnt = 0;
  381. int cpu;
  382. int idx = srcu_ctl.completed & 0x1;
  383. cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
  384. torture_type, TORTURE_FLAG, idx);
  385. for_each_possible_cpu(cpu) {
  386. cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
  387. per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
  388. per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
  389. }
  390. cnt += sprintf(&page[cnt], "\n");
  391. return cnt;
  392. }
  393. static struct rcu_torture_ops srcu_ops = {
  394. .init = srcu_torture_init,
  395. .cleanup = srcu_torture_cleanup,
  396. .readlock = srcu_torture_read_lock,
  397. .readdelay = srcu_read_delay,
  398. .readunlock = srcu_torture_read_unlock,
  399. .completed = srcu_torture_completed,
  400. .deferredfree = rcu_sync_torture_deferred_free,
  401. .sync = srcu_torture_synchronize,
  402. .stats = srcu_torture_stats,
  403. .name = "srcu"
  404. };
  405. /*
  406. * Definitions for sched torture testing.
  407. */
  408. static int sched_torture_read_lock(void)
  409. {
  410. preempt_disable();
  411. return 0;
  412. }
  413. static void sched_torture_read_unlock(int idx)
  414. {
  415. preempt_enable();
  416. }
  417. static int sched_torture_completed(void)
  418. {
  419. return 0;
  420. }
  421. static void sched_torture_synchronize(void)
  422. {
  423. synchronize_sched();
  424. }
  425. static struct rcu_torture_ops sched_ops = {
  426. .init = rcu_sync_torture_init,
  427. .cleanup = NULL,
  428. .readlock = sched_torture_read_lock,
  429. .readdelay = rcu_read_delay, /* just reuse rcu's version. */
  430. .readunlock = sched_torture_read_unlock,
  431. .completed = sched_torture_completed,
  432. .deferredfree = rcu_sync_torture_deferred_free,
  433. .sync = sched_torture_synchronize,
  434. .stats = NULL,
  435. .name = "sched"
  436. };
  437. /*
  438. * RCU torture writer kthread. Repeatedly substitutes a new structure
  439. * for that pointed to by rcu_torture_current, freeing the old structure
  440. * after a series of grace periods (the "pipeline").
  441. */
  442. static int
  443. rcu_torture_writer(void *arg)
  444. {
  445. int i;
  446. long oldbatch = rcu_batches_completed();
  447. struct rcu_torture *rp;
  448. struct rcu_torture *old_rp;
  449. static DEFINE_RCU_RANDOM(rand);
  450. VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
  451. set_user_nice(current, 19);
  452. do {
  453. schedule_timeout_uninterruptible(1);
  454. if ((rp = rcu_torture_alloc()) == NULL)
  455. continue;
  456. rp->rtort_pipe_count = 0;
  457. udelay(rcu_random(&rand) & 0x3ff);
  458. old_rp = rcu_torture_current;
  459. rp->rtort_mbtest = 1;
  460. rcu_assign_pointer(rcu_torture_current, rp);
  461. smp_wmb();
  462. if (old_rp) {
  463. i = old_rp->rtort_pipe_count;
  464. if (i > RCU_TORTURE_PIPE_LEN)
  465. i = RCU_TORTURE_PIPE_LEN;
  466. atomic_inc(&rcu_torture_wcount[i]);
  467. old_rp->rtort_pipe_count++;
  468. cur_ops->deferredfree(old_rp);
  469. }
  470. rcu_torture_current_version++;
  471. oldbatch = cur_ops->completed();
  472. } while (!kthread_should_stop() && !fullstop);
  473. VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
  474. while (!kthread_should_stop())
  475. schedule_timeout_uninterruptible(1);
  476. return 0;
  477. }
  478. /*
  479. * RCU torture fake writer kthread. Repeatedly calls sync, with a random
  480. * delay between calls.
  481. */
  482. static int
  483. rcu_torture_fakewriter(void *arg)
  484. {
  485. DEFINE_RCU_RANDOM(rand);
  486. VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
  487. set_user_nice(current, 19);
  488. do {
  489. schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
  490. udelay(rcu_random(&rand) & 0x3ff);
  491. cur_ops->sync();
  492. } while (!kthread_should_stop() && !fullstop);
  493. VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
  494. while (!kthread_should_stop())
  495. schedule_timeout_uninterruptible(1);
  496. return 0;
  497. }
  498. /*
  499. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  500. * incrementing the corresponding element of the pipeline array. The
  501. * counter in the element should never be greater than 1, otherwise, the
  502. * RCU implementation is broken.
  503. */
  504. static int
  505. rcu_torture_reader(void *arg)
  506. {
  507. int completed;
  508. int idx;
  509. DEFINE_RCU_RANDOM(rand);
  510. struct rcu_torture *p;
  511. int pipe_count;
  512. VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
  513. set_user_nice(current, 19);
  514. do {
  515. idx = cur_ops->readlock();
  516. completed = cur_ops->completed();
  517. p = rcu_dereference(rcu_torture_current);
  518. if (p == NULL) {
  519. /* Wait for rcu_torture_writer to get underway */
  520. cur_ops->readunlock(idx);
  521. schedule_timeout_interruptible(HZ);
  522. continue;
  523. }
  524. if (p->rtort_mbtest == 0)
  525. atomic_inc(&n_rcu_torture_mberror);
  526. cur_ops->readdelay(&rand);
  527. preempt_disable();
  528. pipe_count = p->rtort_pipe_count;
  529. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  530. /* Should not happen, but... */
  531. pipe_count = RCU_TORTURE_PIPE_LEN;
  532. }
  533. ++__get_cpu_var(rcu_torture_count)[pipe_count];
  534. completed = cur_ops->completed() - completed;
  535. if (completed > RCU_TORTURE_PIPE_LEN) {
  536. /* Should not happen, but... */
  537. completed = RCU_TORTURE_PIPE_LEN;
  538. }
  539. ++__get_cpu_var(rcu_torture_batch)[completed];
  540. preempt_enable();
  541. cur_ops->readunlock(idx);
  542. schedule();
  543. } while (!kthread_should_stop() && !fullstop);
  544. VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
  545. while (!kthread_should_stop())
  546. schedule_timeout_uninterruptible(1);
  547. return 0;
  548. }
  549. /*
  550. * Create an RCU-torture statistics message in the specified buffer.
  551. */
  552. static int
  553. rcu_torture_printk(char *page)
  554. {
  555. int cnt = 0;
  556. int cpu;
  557. int i;
  558. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  559. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  560. for_each_possible_cpu(cpu) {
  561. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  562. pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
  563. batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
  564. }
  565. }
  566. for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
  567. if (pipesummary[i] != 0)
  568. break;
  569. }
  570. cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
  571. cnt += sprintf(&page[cnt],
  572. "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
  573. "rtmbe: %d",
  574. rcu_torture_current,
  575. rcu_torture_current_version,
  576. list_empty(&rcu_torture_freelist),
  577. atomic_read(&n_rcu_torture_alloc),
  578. atomic_read(&n_rcu_torture_alloc_fail),
  579. atomic_read(&n_rcu_torture_free),
  580. atomic_read(&n_rcu_torture_mberror));
  581. if (atomic_read(&n_rcu_torture_mberror) != 0)
  582. cnt += sprintf(&page[cnt], " !!!");
  583. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  584. if (i > 1) {
  585. cnt += sprintf(&page[cnt], "!!! ");
  586. atomic_inc(&n_rcu_torture_error);
  587. }
  588. cnt += sprintf(&page[cnt], "Reader Pipe: ");
  589. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  590. cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
  591. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  592. cnt += sprintf(&page[cnt], "Reader Batch: ");
  593. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  594. cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
  595. cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
  596. cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
  597. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  598. cnt += sprintf(&page[cnt], " %d",
  599. atomic_read(&rcu_torture_wcount[i]));
  600. }
  601. cnt += sprintf(&page[cnt], "\n");
  602. if (cur_ops->stats)
  603. cnt += cur_ops->stats(&page[cnt]);
  604. return cnt;
  605. }
  606. /*
  607. * Print torture statistics. Caller must ensure that there is only
  608. * one call to this function at a given time!!! This is normally
  609. * accomplished by relying on the module system to only have one copy
  610. * of the module loaded, and then by giving the rcu_torture_stats
  611. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  612. * thread is not running).
  613. */
  614. static void
  615. rcu_torture_stats_print(void)
  616. {
  617. int cnt;
  618. cnt = rcu_torture_printk(printk_buf);
  619. printk(KERN_ALERT "%s", printk_buf);
  620. }
  621. /*
  622. * Periodically prints torture statistics, if periodic statistics printing
  623. * was specified via the stat_interval module parameter.
  624. *
  625. * No need to worry about fullstop here, since this one doesn't reference
  626. * volatile state or register callbacks.
  627. */
  628. static int
  629. rcu_torture_stats(void *arg)
  630. {
  631. VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
  632. do {
  633. schedule_timeout_interruptible(stat_interval * HZ);
  634. rcu_torture_stats_print();
  635. } while (!kthread_should_stop());
  636. VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
  637. return 0;
  638. }
  639. static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
  640. /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
  641. * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
  642. */
  643. static void rcu_torture_shuffle_tasks(void)
  644. {
  645. cpumask_t tmp_mask;
  646. int i;
  647. cpus_setall(tmp_mask);
  648. get_online_cpus();
  649. /* No point in shuffling if there is only one online CPU (ex: UP) */
  650. if (num_online_cpus() == 1) {
  651. put_online_cpus();
  652. return;
  653. }
  654. if (rcu_idle_cpu != -1)
  655. cpu_clear(rcu_idle_cpu, tmp_mask);
  656. set_cpus_allowed_ptr(current, &tmp_mask);
  657. if (reader_tasks) {
  658. for (i = 0; i < nrealreaders; i++)
  659. if (reader_tasks[i])
  660. set_cpus_allowed_ptr(reader_tasks[i],
  661. &tmp_mask);
  662. }
  663. if (fakewriter_tasks) {
  664. for (i = 0; i < nfakewriters; i++)
  665. if (fakewriter_tasks[i])
  666. set_cpus_allowed_ptr(fakewriter_tasks[i],
  667. &tmp_mask);
  668. }
  669. if (writer_task)
  670. set_cpus_allowed_ptr(writer_task, &tmp_mask);
  671. if (stats_task)
  672. set_cpus_allowed_ptr(stats_task, &tmp_mask);
  673. if (rcu_idle_cpu == -1)
  674. rcu_idle_cpu = num_online_cpus() - 1;
  675. else
  676. rcu_idle_cpu--;
  677. put_online_cpus();
  678. }
  679. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  680. * system to become idle at a time and cut off its timer ticks. This is meant
  681. * to test the support for such tickless idle CPU in RCU.
  682. */
  683. static int
  684. rcu_torture_shuffle(void *arg)
  685. {
  686. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
  687. do {
  688. schedule_timeout_interruptible(shuffle_interval * HZ);
  689. rcu_torture_shuffle_tasks();
  690. } while (!kthread_should_stop());
  691. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
  692. return 0;
  693. }
  694. static inline void
  695. rcu_torture_print_module_parms(char *tag)
  696. {
  697. printk(KERN_ALERT "%s" TORTURE_FLAG
  698. "--- %s: nreaders=%d nfakewriters=%d "
  699. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  700. "shuffle_interval = %d\n",
  701. torture_type, tag, nrealreaders, nfakewriters,
  702. stat_interval, verbose, test_no_idle_hz, shuffle_interval);
  703. }
  704. static void
  705. rcu_torture_cleanup(void)
  706. {
  707. int i;
  708. fullstop = 1;
  709. if (shuffler_task) {
  710. VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
  711. kthread_stop(shuffler_task);
  712. }
  713. shuffler_task = NULL;
  714. if (writer_task) {
  715. VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
  716. kthread_stop(writer_task);
  717. }
  718. writer_task = NULL;
  719. if (reader_tasks) {
  720. for (i = 0; i < nrealreaders; i++) {
  721. if (reader_tasks[i]) {
  722. VERBOSE_PRINTK_STRING(
  723. "Stopping rcu_torture_reader task");
  724. kthread_stop(reader_tasks[i]);
  725. }
  726. reader_tasks[i] = NULL;
  727. }
  728. kfree(reader_tasks);
  729. reader_tasks = NULL;
  730. }
  731. rcu_torture_current = NULL;
  732. if (fakewriter_tasks) {
  733. for (i = 0; i < nfakewriters; i++) {
  734. if (fakewriter_tasks[i]) {
  735. VERBOSE_PRINTK_STRING(
  736. "Stopping rcu_torture_fakewriter task");
  737. kthread_stop(fakewriter_tasks[i]);
  738. }
  739. fakewriter_tasks[i] = NULL;
  740. }
  741. kfree(fakewriter_tasks);
  742. fakewriter_tasks = NULL;
  743. }
  744. if (stats_task) {
  745. VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
  746. kthread_stop(stats_task);
  747. }
  748. stats_task = NULL;
  749. /* Wait for all RCU callbacks to fire. */
  750. rcu_barrier();
  751. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  752. if (cur_ops->cleanup)
  753. cur_ops->cleanup();
  754. if (atomic_read(&n_rcu_torture_error))
  755. rcu_torture_print_module_parms("End of test: FAILURE");
  756. else
  757. rcu_torture_print_module_parms("End of test: SUCCESS");
  758. }
  759. static int __init
  760. rcu_torture_init(void)
  761. {
  762. int i;
  763. int cpu;
  764. int firsterr = 0;
  765. static struct rcu_torture_ops *torture_ops[] =
  766. { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
  767. &srcu_ops, &sched_ops, };
  768. /* Process args and tell the world that the torturer is on the job. */
  769. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  770. cur_ops = torture_ops[i];
  771. if (strcmp(torture_type, cur_ops->name) == 0)
  772. break;
  773. }
  774. if (i == ARRAY_SIZE(torture_ops)) {
  775. printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
  776. torture_type);
  777. return (-EINVAL);
  778. }
  779. if (cur_ops->init)
  780. cur_ops->init(); /* no "goto unwind" prior to this point!!! */
  781. if (nreaders >= 0)
  782. nrealreaders = nreaders;
  783. else
  784. nrealreaders = 2 * num_online_cpus();
  785. rcu_torture_print_module_parms("Start of test");
  786. fullstop = 0;
  787. /* Set up the freelist. */
  788. INIT_LIST_HEAD(&rcu_torture_freelist);
  789. for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
  790. rcu_tortures[i].rtort_mbtest = 0;
  791. list_add_tail(&rcu_tortures[i].rtort_free,
  792. &rcu_torture_freelist);
  793. }
  794. /* Initialize the statistics so that each run gets its own numbers. */
  795. rcu_torture_current = NULL;
  796. rcu_torture_current_version = 0;
  797. atomic_set(&n_rcu_torture_alloc, 0);
  798. atomic_set(&n_rcu_torture_alloc_fail, 0);
  799. atomic_set(&n_rcu_torture_free, 0);
  800. atomic_set(&n_rcu_torture_mberror, 0);
  801. atomic_set(&n_rcu_torture_error, 0);
  802. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  803. atomic_set(&rcu_torture_wcount[i], 0);
  804. for_each_possible_cpu(cpu) {
  805. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  806. per_cpu(rcu_torture_count, cpu)[i] = 0;
  807. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  808. }
  809. }
  810. /* Start up the kthreads. */
  811. VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
  812. writer_task = kthread_run(rcu_torture_writer, NULL,
  813. "rcu_torture_writer");
  814. if (IS_ERR(writer_task)) {
  815. firsterr = PTR_ERR(writer_task);
  816. VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
  817. writer_task = NULL;
  818. goto unwind;
  819. }
  820. fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
  821. GFP_KERNEL);
  822. if (fakewriter_tasks == NULL) {
  823. VERBOSE_PRINTK_ERRSTRING("out of memory");
  824. firsterr = -ENOMEM;
  825. goto unwind;
  826. }
  827. for (i = 0; i < nfakewriters; i++) {
  828. VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
  829. fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
  830. "rcu_torture_fakewriter");
  831. if (IS_ERR(fakewriter_tasks[i])) {
  832. firsterr = PTR_ERR(fakewriter_tasks[i]);
  833. VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
  834. fakewriter_tasks[i] = NULL;
  835. goto unwind;
  836. }
  837. }
  838. reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
  839. GFP_KERNEL);
  840. if (reader_tasks == NULL) {
  841. VERBOSE_PRINTK_ERRSTRING("out of memory");
  842. firsterr = -ENOMEM;
  843. goto unwind;
  844. }
  845. for (i = 0; i < nrealreaders; i++) {
  846. VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
  847. reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
  848. "rcu_torture_reader");
  849. if (IS_ERR(reader_tasks[i])) {
  850. firsterr = PTR_ERR(reader_tasks[i]);
  851. VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
  852. reader_tasks[i] = NULL;
  853. goto unwind;
  854. }
  855. }
  856. if (stat_interval > 0) {
  857. VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
  858. stats_task = kthread_run(rcu_torture_stats, NULL,
  859. "rcu_torture_stats");
  860. if (IS_ERR(stats_task)) {
  861. firsterr = PTR_ERR(stats_task);
  862. VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
  863. stats_task = NULL;
  864. goto unwind;
  865. }
  866. }
  867. if (test_no_idle_hz) {
  868. rcu_idle_cpu = num_online_cpus() - 1;
  869. /* Create the shuffler thread */
  870. shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
  871. "rcu_torture_shuffle");
  872. if (IS_ERR(shuffler_task)) {
  873. firsterr = PTR_ERR(shuffler_task);
  874. VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
  875. shuffler_task = NULL;
  876. goto unwind;
  877. }
  878. }
  879. return 0;
  880. unwind:
  881. rcu_torture_cleanup();
  882. return firsterr;
  883. }
  884. module_init(rcu_torture_init);
  885. module_exit(rcu_torture_cleanup);