rcutorture.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. /*
  2. * Read-Copy Update /proc-based torture test facility
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2005
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. *
  22. * See also: Documentation/RCU/torture.txt
  23. */
  24. #include <linux/types.h>
  25. #include <linux/kernel.h>
  26. #include <linux/init.h>
  27. #include <linux/module.h>
  28. #include <linux/kthread.h>
  29. #include <linux/err.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/smp.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/sched.h>
  35. #include <asm/atomic.h>
  36. #include <linux/bitops.h>
  37. #include <linux/module.h>
  38. #include <linux/completion.h>
  39. #include <linux/moduleparam.h>
  40. #include <linux/percpu.h>
  41. #include <linux/notifier.h>
  42. #include <linux/cpu.h>
  43. #include <linux/random.h>
  44. #include <linux/delay.h>
  45. #include <linux/byteorder/swabb.h>
  46. #include <linux/stat.h>
  47. MODULE_LICENSE("GPL");
  48. static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
  49. static int stat_interval; /* Interval between stats, in seconds. */
  50. /* Defaults to "only at end of test". */
  51. static int verbose; /* Print more debug info. */
  52. static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
  53. static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
  54. MODULE_PARM(nreaders, "i");
  55. MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  56. MODULE_PARM(stat_interval, "i");
  57. MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  58. MODULE_PARM(verbose, "i");
  59. MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  60. MODULE_PARM(test_no_idle_hz, "i");
  61. MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  62. MODULE_PARM(shuffle_interval, "i");
  63. MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  64. #define TORTURE_FLAG "rcutorture: "
  65. #define PRINTK_STRING(s) \
  66. do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
  67. #define VERBOSE_PRINTK_STRING(s) \
  68. do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
  69. #define VERBOSE_PRINTK_ERRSTRING(s) \
  70. do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
  71. static char printk_buf[4096];
  72. static int nrealreaders;
  73. static struct task_struct *writer_task;
  74. static struct task_struct **reader_tasks;
  75. static struct task_struct *stats_task;
  76. static struct task_struct *shuffler_task;
  77. #define RCU_TORTURE_PIPE_LEN 10
  78. struct rcu_torture {
  79. struct rcu_head rtort_rcu;
  80. int rtort_pipe_count;
  81. struct list_head rtort_free;
  82. int rtort_mbtest;
  83. };
  84. static int fullstop = 0; /* stop generating callbacks at test end. */
  85. static LIST_HEAD(rcu_torture_freelist);
  86. static struct rcu_torture *rcu_torture_current = NULL;
  87. static long rcu_torture_current_version = 0;
  88. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  89. static DEFINE_SPINLOCK(rcu_torture_lock);
  90. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
  91. { 0 };
  92. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
  93. { 0 };
  94. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  95. atomic_t n_rcu_torture_alloc;
  96. atomic_t n_rcu_torture_alloc_fail;
  97. atomic_t n_rcu_torture_free;
  98. atomic_t n_rcu_torture_mberror;
  99. atomic_t n_rcu_torture_error;
  100. /*
  101. * Allocate an element from the rcu_tortures pool.
  102. */
  103. static struct rcu_torture *
  104. rcu_torture_alloc(void)
  105. {
  106. struct list_head *p;
  107. spin_lock(&rcu_torture_lock);
  108. if (list_empty(&rcu_torture_freelist)) {
  109. atomic_inc(&n_rcu_torture_alloc_fail);
  110. spin_unlock(&rcu_torture_lock);
  111. return NULL;
  112. }
  113. atomic_inc(&n_rcu_torture_alloc);
  114. p = rcu_torture_freelist.next;
  115. list_del_init(p);
  116. spin_unlock(&rcu_torture_lock);
  117. return container_of(p, struct rcu_torture, rtort_free);
  118. }
  119. /*
  120. * Free an element to the rcu_tortures pool.
  121. */
  122. static void
  123. rcu_torture_free(struct rcu_torture *p)
  124. {
  125. atomic_inc(&n_rcu_torture_free);
  126. spin_lock(&rcu_torture_lock);
  127. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  128. spin_unlock(&rcu_torture_lock);
  129. }
  130. static void
  131. rcu_torture_cb(struct rcu_head *p)
  132. {
  133. int i;
  134. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  135. if (fullstop) {
  136. /* Test is ending, just drop callbacks on the floor. */
  137. /* The next initialization will pick up the pieces. */
  138. return;
  139. }
  140. i = rp->rtort_pipe_count;
  141. if (i > RCU_TORTURE_PIPE_LEN)
  142. i = RCU_TORTURE_PIPE_LEN;
  143. atomic_inc(&rcu_torture_wcount[i]);
  144. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  145. rp->rtort_mbtest = 0;
  146. rcu_torture_free(rp);
  147. } else
  148. call_rcu(p, rcu_torture_cb);
  149. }
  150. struct rcu_random_state {
  151. unsigned long rrs_state;
  152. unsigned long rrs_count;
  153. };
  154. #define RCU_RANDOM_MULT 39916801 /* prime */
  155. #define RCU_RANDOM_ADD 479001701 /* prime */
  156. #define RCU_RANDOM_REFRESH 10000
  157. #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
  158. /*
  159. * Crude but fast random-number generator. Uses a linear congruential
  160. * generator, with occasional help from get_random_bytes().
  161. */
  162. static long
  163. rcu_random(struct rcu_random_state *rrsp)
  164. {
  165. long refresh;
  166. if (--rrsp->rrs_count < 0) {
  167. get_random_bytes(&refresh, sizeof(refresh));
  168. rrsp->rrs_state += refresh;
  169. rrsp->rrs_count = RCU_RANDOM_REFRESH;
  170. }
  171. rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
  172. return swahw32(rrsp->rrs_state);
  173. }
  174. /*
  175. * RCU torture writer kthread. Repeatedly substitutes a new structure
  176. * for that pointed to by rcu_torture_current, freeing the old structure
  177. * after a series of grace periods (the "pipeline").
  178. */
  179. static int
  180. rcu_torture_writer(void *arg)
  181. {
  182. int i;
  183. long oldbatch = rcu_batches_completed();
  184. struct rcu_torture *rp;
  185. struct rcu_torture *old_rp;
  186. static DEFINE_RCU_RANDOM(rand);
  187. VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
  188. set_user_nice(current, 19);
  189. do {
  190. schedule_timeout_uninterruptible(1);
  191. if (rcu_batches_completed() == oldbatch)
  192. continue;
  193. if ((rp = rcu_torture_alloc()) == NULL)
  194. continue;
  195. rp->rtort_pipe_count = 0;
  196. udelay(rcu_random(&rand) & 0x3ff);
  197. old_rp = rcu_torture_current;
  198. rp->rtort_mbtest = 1;
  199. rcu_assign_pointer(rcu_torture_current, rp);
  200. smp_wmb();
  201. if (old_rp != NULL) {
  202. i = old_rp->rtort_pipe_count;
  203. if (i > RCU_TORTURE_PIPE_LEN)
  204. i = RCU_TORTURE_PIPE_LEN;
  205. atomic_inc(&rcu_torture_wcount[i]);
  206. old_rp->rtort_pipe_count++;
  207. call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
  208. }
  209. rcu_torture_current_version++;
  210. oldbatch = rcu_batches_completed();
  211. } while (!kthread_should_stop() && !fullstop);
  212. VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
  213. while (!kthread_should_stop())
  214. schedule_timeout_uninterruptible(1);
  215. return 0;
  216. }
  217. /*
  218. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  219. * incrementing the corresponding element of the pipeline array. The
  220. * counter in the element should never be greater than 1, otherwise, the
  221. * RCU implementation is broken.
  222. */
  223. static int
  224. rcu_torture_reader(void *arg)
  225. {
  226. int completed;
  227. DEFINE_RCU_RANDOM(rand);
  228. struct rcu_torture *p;
  229. int pipe_count;
  230. VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
  231. set_user_nice(current, 19);
  232. do {
  233. rcu_read_lock();
  234. completed = rcu_batches_completed();
  235. p = rcu_dereference(rcu_torture_current);
  236. if (p == NULL) {
  237. /* Wait for rcu_torture_writer to get underway */
  238. rcu_read_unlock();
  239. schedule_timeout_interruptible(HZ);
  240. continue;
  241. }
  242. if (p->rtort_mbtest == 0)
  243. atomic_inc(&n_rcu_torture_mberror);
  244. udelay(rcu_random(&rand) & 0x7f);
  245. preempt_disable();
  246. pipe_count = p->rtort_pipe_count;
  247. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  248. /* Should not happen, but... */
  249. pipe_count = RCU_TORTURE_PIPE_LEN;
  250. }
  251. ++__get_cpu_var(rcu_torture_count)[pipe_count];
  252. completed = rcu_batches_completed() - completed;
  253. if (completed > RCU_TORTURE_PIPE_LEN) {
  254. /* Should not happen, but... */
  255. completed = RCU_TORTURE_PIPE_LEN;
  256. }
  257. ++__get_cpu_var(rcu_torture_batch)[completed];
  258. preempt_enable();
  259. rcu_read_unlock();
  260. schedule();
  261. } while (!kthread_should_stop() && !fullstop);
  262. VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
  263. while (!kthread_should_stop())
  264. schedule_timeout_uninterruptible(1);
  265. return 0;
  266. }
  267. /*
  268. * Create an RCU-torture statistics message in the specified buffer.
  269. */
  270. static int
  271. rcu_torture_printk(char *page)
  272. {
  273. int cnt = 0;
  274. int cpu;
  275. int i;
  276. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  277. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  278. for_each_cpu(cpu) {
  279. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  280. pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
  281. batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
  282. }
  283. }
  284. for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
  285. if (pipesummary[i] != 0)
  286. break;
  287. }
  288. cnt += sprintf(&page[cnt], "rcutorture: ");
  289. cnt += sprintf(&page[cnt],
  290. "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
  291. "rtmbe: %d",
  292. rcu_torture_current,
  293. rcu_torture_current_version,
  294. list_empty(&rcu_torture_freelist),
  295. atomic_read(&n_rcu_torture_alloc),
  296. atomic_read(&n_rcu_torture_alloc_fail),
  297. atomic_read(&n_rcu_torture_free),
  298. atomic_read(&n_rcu_torture_mberror));
  299. if (atomic_read(&n_rcu_torture_mberror) != 0)
  300. cnt += sprintf(&page[cnt], " !!!");
  301. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  302. if (i > 1) {
  303. cnt += sprintf(&page[cnt], "!!! ");
  304. atomic_inc(&n_rcu_torture_error);
  305. }
  306. cnt += sprintf(&page[cnt], "Reader Pipe: ");
  307. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  308. cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
  309. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  310. cnt += sprintf(&page[cnt], "Reader Batch: ");
  311. for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
  312. cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
  313. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  314. cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
  315. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  316. cnt += sprintf(&page[cnt], " %d",
  317. atomic_read(&rcu_torture_wcount[i]));
  318. }
  319. cnt += sprintf(&page[cnt], "\n");
  320. return cnt;
  321. }
  322. /*
  323. * Print torture statistics. Caller must ensure that there is only
  324. * one call to this function at a given time!!! This is normally
  325. * accomplished by relying on the module system to only have one copy
  326. * of the module loaded, and then by giving the rcu_torture_stats
  327. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  328. * thread is not running).
  329. */
  330. static void
  331. rcu_torture_stats_print(void)
  332. {
  333. int cnt;
  334. cnt = rcu_torture_printk(printk_buf);
  335. printk(KERN_ALERT "%s", printk_buf);
  336. }
  337. /*
  338. * Periodically prints torture statistics, if periodic statistics printing
  339. * was specified via the stat_interval module parameter.
  340. *
  341. * No need to worry about fullstop here, since this one doesn't reference
  342. * volatile state or register callbacks.
  343. */
  344. static int
  345. rcu_torture_stats(void *arg)
  346. {
  347. VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
  348. do {
  349. schedule_timeout_interruptible(stat_interval * HZ);
  350. rcu_torture_stats_print();
  351. } while (!kthread_should_stop());
  352. VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
  353. return 0;
  354. }
  355. static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
  356. /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
  357. * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
  358. */
  359. void rcu_torture_shuffle_tasks(void)
  360. {
  361. cpumask_t tmp_mask = CPU_MASK_ALL;
  362. int i;
  363. lock_cpu_hotplug();
  364. /* No point in shuffling if there is only one online CPU (ex: UP) */
  365. if (num_online_cpus() == 1) {
  366. unlock_cpu_hotplug();
  367. return;
  368. }
  369. if (rcu_idle_cpu != -1)
  370. cpu_clear(rcu_idle_cpu, tmp_mask);
  371. set_cpus_allowed(current, tmp_mask);
  372. if (reader_tasks != NULL) {
  373. for (i = 0; i < nrealreaders; i++)
  374. if (reader_tasks[i])
  375. set_cpus_allowed(reader_tasks[i], tmp_mask);
  376. }
  377. if (writer_task)
  378. set_cpus_allowed(writer_task, tmp_mask);
  379. if (stats_task)
  380. set_cpus_allowed(stats_task, tmp_mask);
  381. if (rcu_idle_cpu == -1)
  382. rcu_idle_cpu = num_online_cpus() - 1;
  383. else
  384. rcu_idle_cpu--;
  385. unlock_cpu_hotplug();
  386. }
  387. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  388. * system to become idle at a time and cut off its timer ticks. This is meant
  389. * to test the support for such tickless idle CPU in RCU.
  390. */
  391. static int
  392. rcu_torture_shuffle(void *arg)
  393. {
  394. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
  395. do {
  396. schedule_timeout_interruptible(shuffle_interval * HZ);
  397. rcu_torture_shuffle_tasks();
  398. } while (!kthread_should_stop());
  399. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
  400. return 0;
  401. }
  402. static void
  403. rcu_torture_cleanup(void)
  404. {
  405. int i;
  406. fullstop = 1;
  407. if (shuffler_task != NULL) {
  408. VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
  409. kthread_stop(shuffler_task);
  410. }
  411. shuffler_task = NULL;
  412. if (writer_task != NULL) {
  413. VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
  414. kthread_stop(writer_task);
  415. }
  416. writer_task = NULL;
  417. if (reader_tasks != NULL) {
  418. for (i = 0; i < nrealreaders; i++) {
  419. if (reader_tasks[i] != NULL) {
  420. VERBOSE_PRINTK_STRING(
  421. "Stopping rcu_torture_reader task");
  422. kthread_stop(reader_tasks[i]);
  423. }
  424. reader_tasks[i] = NULL;
  425. }
  426. kfree(reader_tasks);
  427. reader_tasks = NULL;
  428. }
  429. rcu_torture_current = NULL;
  430. if (stats_task != NULL) {
  431. VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
  432. kthread_stop(stats_task);
  433. }
  434. stats_task = NULL;
  435. /* Wait for all RCU callbacks to fire. */
  436. rcu_barrier();
  437. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  438. printk(KERN_ALERT TORTURE_FLAG
  439. "--- End of test: %s\n",
  440. atomic_read(&n_rcu_torture_error) == 0 ? "SUCCESS" : "FAILURE");
  441. }
  442. static int
  443. rcu_torture_init(void)
  444. {
  445. int i;
  446. int cpu;
  447. int firsterr = 0;
  448. /* Process args and tell the world that the torturer is on the job. */
  449. if (nreaders >= 0)
  450. nrealreaders = nreaders;
  451. else
  452. nrealreaders = 2 * num_online_cpus();
  453. printk(KERN_ALERT TORTURE_FLAG "--- Start of test: nreaders=%d "
  454. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  455. "shuffle_interval = %d\n",
  456. nrealreaders, stat_interval, verbose, test_no_idle_hz,
  457. shuffle_interval);
  458. fullstop = 0;
  459. /* Set up the freelist. */
  460. INIT_LIST_HEAD(&rcu_torture_freelist);
  461. for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
  462. rcu_tortures[i].rtort_mbtest = 0;
  463. list_add_tail(&rcu_tortures[i].rtort_free,
  464. &rcu_torture_freelist);
  465. }
  466. /* Initialize the statistics so that each run gets its own numbers. */
  467. rcu_torture_current = NULL;
  468. rcu_torture_current_version = 0;
  469. atomic_set(&n_rcu_torture_alloc, 0);
  470. atomic_set(&n_rcu_torture_alloc_fail, 0);
  471. atomic_set(&n_rcu_torture_free, 0);
  472. atomic_set(&n_rcu_torture_mberror, 0);
  473. atomic_set(&n_rcu_torture_error, 0);
  474. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  475. atomic_set(&rcu_torture_wcount[i], 0);
  476. for_each_cpu(cpu) {
  477. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  478. per_cpu(rcu_torture_count, cpu)[i] = 0;
  479. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  480. }
  481. }
  482. /* Start up the kthreads. */
  483. VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
  484. writer_task = kthread_run(rcu_torture_writer, NULL,
  485. "rcu_torture_writer");
  486. if (IS_ERR(writer_task)) {
  487. firsterr = PTR_ERR(writer_task);
  488. VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
  489. writer_task = NULL;
  490. goto unwind;
  491. }
  492. reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
  493. GFP_KERNEL);
  494. if (reader_tasks == NULL) {
  495. VERBOSE_PRINTK_ERRSTRING("out of memory");
  496. firsterr = -ENOMEM;
  497. goto unwind;
  498. }
  499. for (i = 0; i < nrealreaders; i++) {
  500. VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
  501. reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
  502. "rcu_torture_reader");
  503. if (IS_ERR(reader_tasks[i])) {
  504. firsterr = PTR_ERR(reader_tasks[i]);
  505. VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
  506. reader_tasks[i] = NULL;
  507. goto unwind;
  508. }
  509. }
  510. if (stat_interval > 0) {
  511. VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
  512. stats_task = kthread_run(rcu_torture_stats, NULL,
  513. "rcu_torture_stats");
  514. if (IS_ERR(stats_task)) {
  515. firsterr = PTR_ERR(stats_task);
  516. VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
  517. stats_task = NULL;
  518. goto unwind;
  519. }
  520. }
  521. if (test_no_idle_hz) {
  522. rcu_idle_cpu = num_online_cpus() - 1;
  523. /* Create the shuffler thread */
  524. shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
  525. "rcu_torture_shuffle");
  526. if (IS_ERR(shuffler_task)) {
  527. firsterr = PTR_ERR(shuffler_task);
  528. VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
  529. shuffler_task = NULL;
  530. goto unwind;
  531. }
  532. }
  533. return 0;
  534. unwind:
  535. rcu_torture_cleanup();
  536. return firsterr;
  537. }
  538. module_init(rcu_torture_init);
  539. module_exit(rcu_torture_cleanup);