rcutorture.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Read-Copy Update /proc-based torture test facility
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2005
  19. *
  20. * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21. *
  22. * See also: Documentation/RCU/torture.txt
  23. */
  24. #include <linux/types.h>
  25. #include <linux/kernel.h>
  26. #include <linux/init.h>
  27. #include <linux/module.h>
  28. #include <linux/kthread.h>
  29. #include <linux/err.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/smp.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/sched.h>
  35. #include <asm/atomic.h>
  36. #include <linux/bitops.h>
  37. #include <linux/module.h>
  38. #include <linux/completion.h>
  39. #include <linux/moduleparam.h>
  40. #include <linux/percpu.h>
  41. #include <linux/notifier.h>
  42. #include <linux/cpu.h>
  43. #include <linux/random.h>
  44. #include <linux/delay.h>
  45. #include <linux/byteorder/swabb.h>
  46. #include <linux/stat.h>
  47. MODULE_LICENSE("GPL");
  48. static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
  49. static int stat_interval; /* Interval between stats, in seconds. */
  50. /* Defaults to "only at end of test". */
  51. static int verbose; /* Print more debug info. */
  52. static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
  53. static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
  54. module_param(nreaders, int, 0);
  55. MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  56. module_param(stat_interval, int, 0);
  57. MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  58. module_param(verbose, bool, 0);
  59. MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  60. module_param(test_no_idle_hz, bool, 0);
  61. MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  62. module_param(shuffle_interval, int, 0);
  63. MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  64. #define TORTURE_FLAG "rcutorture: "
  65. #define PRINTK_STRING(s) \
  66. do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
  67. #define VERBOSE_PRINTK_STRING(s) \
  68. do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
  69. #define VERBOSE_PRINTK_ERRSTRING(s) \
  70. do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
  71. static char printk_buf[4096];
  72. static int nrealreaders;
  73. static struct task_struct *writer_task;
  74. static struct task_struct **reader_tasks;
  75. static struct task_struct *stats_task;
  76. static struct task_struct *shuffler_task;
  77. #define RCU_TORTURE_PIPE_LEN 10
  78. struct rcu_torture {
  79. struct rcu_head rtort_rcu;
  80. int rtort_pipe_count;
  81. struct list_head rtort_free;
  82. int rtort_mbtest;
  83. };
  84. static int fullstop = 0; /* stop generating callbacks at test end. */
  85. static LIST_HEAD(rcu_torture_freelist);
  86. static struct rcu_torture *rcu_torture_current = NULL;
  87. static long rcu_torture_current_version = 0;
  88. static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
  89. static DEFINE_SPINLOCK(rcu_torture_lock);
  90. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
  91. { 0 };
  92. static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
  93. { 0 };
  94. static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
  95. atomic_t n_rcu_torture_alloc;
  96. atomic_t n_rcu_torture_alloc_fail;
  97. atomic_t n_rcu_torture_free;
  98. atomic_t n_rcu_torture_mberror;
  99. atomic_t n_rcu_torture_error;
  100. /*
  101. * Allocate an element from the rcu_tortures pool.
  102. */
  103. static struct rcu_torture *
  104. rcu_torture_alloc(void)
  105. {
  106. struct list_head *p;
  107. spin_lock_bh(&rcu_torture_lock);
  108. if (list_empty(&rcu_torture_freelist)) {
  109. atomic_inc(&n_rcu_torture_alloc_fail);
  110. spin_unlock_bh(&rcu_torture_lock);
  111. return NULL;
  112. }
  113. atomic_inc(&n_rcu_torture_alloc);
  114. p = rcu_torture_freelist.next;
  115. list_del_init(p);
  116. spin_unlock_bh(&rcu_torture_lock);
  117. return container_of(p, struct rcu_torture, rtort_free);
  118. }
  119. /*
  120. * Free an element to the rcu_tortures pool.
  121. */
  122. static void
  123. rcu_torture_free(struct rcu_torture *p)
  124. {
  125. atomic_inc(&n_rcu_torture_free);
  126. spin_lock_bh(&rcu_torture_lock);
  127. list_add_tail(&p->rtort_free, &rcu_torture_freelist);
  128. spin_unlock_bh(&rcu_torture_lock);
  129. }
  130. static void
  131. rcu_torture_cb(struct rcu_head *p)
  132. {
  133. int i;
  134. struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
  135. if (fullstop) {
  136. /* Test is ending, just drop callbacks on the floor. */
  137. /* The next initialization will pick up the pieces. */
  138. return;
  139. }
  140. i = rp->rtort_pipe_count;
  141. if (i > RCU_TORTURE_PIPE_LEN)
  142. i = RCU_TORTURE_PIPE_LEN;
  143. atomic_inc(&rcu_torture_wcount[i]);
  144. if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
  145. rp->rtort_mbtest = 0;
  146. rcu_torture_free(rp);
  147. } else
  148. call_rcu(p, rcu_torture_cb);
  149. }
  150. struct rcu_random_state {
  151. unsigned long rrs_state;
  152. unsigned long rrs_count;
  153. };
  154. #define RCU_RANDOM_MULT 39916801 /* prime */
  155. #define RCU_RANDOM_ADD 479001701 /* prime */
  156. #define RCU_RANDOM_REFRESH 10000
  157. #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
  158. /*
  159. * Crude but fast random-number generator. Uses a linear congruential
  160. * generator, with occasional help from get_random_bytes().
  161. */
  162. static long
  163. rcu_random(struct rcu_random_state *rrsp)
  164. {
  165. long refresh;
  166. if (--rrsp->rrs_count < 0) {
  167. get_random_bytes(&refresh, sizeof(refresh));
  168. rrsp->rrs_state += refresh;
  169. rrsp->rrs_count = RCU_RANDOM_REFRESH;
  170. }
  171. rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
  172. return swahw32(rrsp->rrs_state);
  173. }
  174. /*
  175. * RCU torture writer kthread. Repeatedly substitutes a new structure
  176. * for that pointed to by rcu_torture_current, freeing the old structure
  177. * after a series of grace periods (the "pipeline").
  178. */
  179. static int
  180. rcu_torture_writer(void *arg)
  181. {
  182. int i;
  183. long oldbatch = rcu_batches_completed();
  184. struct rcu_torture *rp;
  185. struct rcu_torture *old_rp;
  186. static DEFINE_RCU_RANDOM(rand);
  187. VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
  188. set_user_nice(current, 19);
  189. do {
  190. schedule_timeout_uninterruptible(1);
  191. if (rcu_batches_completed() == oldbatch)
  192. continue;
  193. if ((rp = rcu_torture_alloc()) == NULL)
  194. continue;
  195. rp->rtort_pipe_count = 0;
  196. udelay(rcu_random(&rand) & 0x3ff);
  197. old_rp = rcu_torture_current;
  198. rp->rtort_mbtest = 1;
  199. rcu_assign_pointer(rcu_torture_current, rp);
  200. smp_wmb();
  201. if (old_rp != NULL) {
  202. i = old_rp->rtort_pipe_count;
  203. if (i > RCU_TORTURE_PIPE_LEN)
  204. i = RCU_TORTURE_PIPE_LEN;
  205. atomic_inc(&rcu_torture_wcount[i]);
  206. old_rp->rtort_pipe_count++;
  207. call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
  208. }
  209. rcu_torture_current_version++;
  210. oldbatch = rcu_batches_completed();
  211. } while (!kthread_should_stop() && !fullstop);
  212. VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
  213. while (!kthread_should_stop())
  214. schedule_timeout_uninterruptible(1);
  215. return 0;
  216. }
  217. /*
  218. * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
  219. * incrementing the corresponding element of the pipeline array. The
  220. * counter in the element should never be greater than 1, otherwise, the
  221. * RCU implementation is broken.
  222. */
  223. static int
  224. rcu_torture_reader(void *arg)
  225. {
  226. int completed;
  227. DEFINE_RCU_RANDOM(rand);
  228. struct rcu_torture *p;
  229. int pipe_count;
  230. VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
  231. set_user_nice(current, 19);
  232. do {
  233. rcu_read_lock();
  234. completed = rcu_batches_completed();
  235. p = rcu_dereference(rcu_torture_current);
  236. if (p == NULL) {
  237. /* Wait for rcu_torture_writer to get underway */
  238. rcu_read_unlock();
  239. schedule_timeout_interruptible(HZ);
  240. continue;
  241. }
  242. if (p->rtort_mbtest == 0)
  243. atomic_inc(&n_rcu_torture_mberror);
  244. udelay(rcu_random(&rand) & 0x7f);
  245. preempt_disable();
  246. pipe_count = p->rtort_pipe_count;
  247. if (pipe_count > RCU_TORTURE_PIPE_LEN) {
  248. /* Should not happen, but... */
  249. pipe_count = RCU_TORTURE_PIPE_LEN;
  250. }
  251. ++__get_cpu_var(rcu_torture_count)[pipe_count];
  252. completed = rcu_batches_completed() - completed;
  253. if (completed > RCU_TORTURE_PIPE_LEN) {
  254. /* Should not happen, but... */
  255. completed = RCU_TORTURE_PIPE_LEN;
  256. }
  257. ++__get_cpu_var(rcu_torture_batch)[completed];
  258. preempt_enable();
  259. rcu_read_unlock();
  260. schedule();
  261. } while (!kthread_should_stop() && !fullstop);
  262. VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
  263. while (!kthread_should_stop())
  264. schedule_timeout_uninterruptible(1);
  265. return 0;
  266. }
  267. /*
  268. * Create an RCU-torture statistics message in the specified buffer.
  269. */
  270. static int
  271. rcu_torture_printk(char *page)
  272. {
  273. int cnt = 0;
  274. int cpu;
  275. int i;
  276. long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  277. long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
  278. for_each_possible_cpu(cpu) {
  279. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  280. pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
  281. batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
  282. }
  283. }
  284. for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
  285. if (pipesummary[i] != 0)
  286. break;
  287. }
  288. cnt += sprintf(&page[cnt], "rcutorture: ");
  289. cnt += sprintf(&page[cnt],
  290. "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
  291. "rtmbe: %d",
  292. rcu_torture_current,
  293. rcu_torture_current_version,
  294. list_empty(&rcu_torture_freelist),
  295. atomic_read(&n_rcu_torture_alloc),
  296. atomic_read(&n_rcu_torture_alloc_fail),
  297. atomic_read(&n_rcu_torture_free),
  298. atomic_read(&n_rcu_torture_mberror));
  299. if (atomic_read(&n_rcu_torture_mberror) != 0)
  300. cnt += sprintf(&page[cnt], " !!!");
  301. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  302. if (i > 1) {
  303. cnt += sprintf(&page[cnt], "!!! ");
  304. atomic_inc(&n_rcu_torture_error);
  305. }
  306. cnt += sprintf(&page[cnt], "Reader Pipe: ");
  307. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  308. cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
  309. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  310. cnt += sprintf(&page[cnt], "Reader Batch: ");
  311. for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
  312. cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
  313. cnt += sprintf(&page[cnt], "\nrcutorture: ");
  314. cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
  315. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  316. cnt += sprintf(&page[cnt], " %d",
  317. atomic_read(&rcu_torture_wcount[i]));
  318. }
  319. cnt += sprintf(&page[cnt], "\n");
  320. return cnt;
  321. }
  322. /*
  323. * Print torture statistics. Caller must ensure that there is only
  324. * one call to this function at a given time!!! This is normally
  325. * accomplished by relying on the module system to only have one copy
  326. * of the module loaded, and then by giving the rcu_torture_stats
  327. * kthread full control (or the init/cleanup functions when rcu_torture_stats
  328. * thread is not running).
  329. */
  330. static void
  331. rcu_torture_stats_print(void)
  332. {
  333. int cnt;
  334. cnt = rcu_torture_printk(printk_buf);
  335. printk(KERN_ALERT "%s", printk_buf);
  336. }
  337. /*
  338. * Periodically prints torture statistics, if periodic statistics printing
  339. * was specified via the stat_interval module parameter.
  340. *
  341. * No need to worry about fullstop here, since this one doesn't reference
  342. * volatile state or register callbacks.
  343. */
  344. static int
  345. rcu_torture_stats(void *arg)
  346. {
  347. VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
  348. do {
  349. schedule_timeout_interruptible(stat_interval * HZ);
  350. rcu_torture_stats_print();
  351. } while (!kthread_should_stop());
  352. VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
  353. return 0;
  354. }
  355. static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
  356. /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
  357. * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
  358. */
  359. void rcu_torture_shuffle_tasks(void)
  360. {
  361. cpumask_t tmp_mask = CPU_MASK_ALL;
  362. int i;
  363. lock_cpu_hotplug();
  364. /* No point in shuffling if there is only one online CPU (ex: UP) */
  365. if (num_online_cpus() == 1) {
  366. unlock_cpu_hotplug();
  367. return;
  368. }
  369. if (rcu_idle_cpu != -1)
  370. cpu_clear(rcu_idle_cpu, tmp_mask);
  371. set_cpus_allowed(current, tmp_mask);
  372. if (reader_tasks != NULL) {
  373. for (i = 0; i < nrealreaders; i++)
  374. if (reader_tasks[i])
  375. set_cpus_allowed(reader_tasks[i], tmp_mask);
  376. }
  377. if (writer_task)
  378. set_cpus_allowed(writer_task, tmp_mask);
  379. if (stats_task)
  380. set_cpus_allowed(stats_task, tmp_mask);
  381. if (rcu_idle_cpu == -1)
  382. rcu_idle_cpu = num_online_cpus() - 1;
  383. else
  384. rcu_idle_cpu--;
  385. unlock_cpu_hotplug();
  386. }
  387. /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
  388. * system to become idle at a time and cut off its timer ticks. This is meant
  389. * to test the support for such tickless idle CPU in RCU.
  390. */
  391. static int
  392. rcu_torture_shuffle(void *arg)
  393. {
  394. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
  395. do {
  396. schedule_timeout_interruptible(shuffle_interval * HZ);
  397. rcu_torture_shuffle_tasks();
  398. } while (!kthread_should_stop());
  399. VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
  400. return 0;
  401. }
  402. static inline void
  403. rcu_torture_print_module_parms(char *tag)
  404. {
  405. printk(KERN_ALERT TORTURE_FLAG "--- %s: nreaders=%d "
  406. "stat_interval=%d verbose=%d test_no_idle_hz=%d "
  407. "shuffle_interval = %d\n",
  408. tag, nrealreaders, stat_interval, verbose, test_no_idle_hz,
  409. shuffle_interval);
  410. }
  411. static void
  412. rcu_torture_cleanup(void)
  413. {
  414. int i;
  415. fullstop = 1;
  416. if (shuffler_task != NULL) {
  417. VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
  418. kthread_stop(shuffler_task);
  419. }
  420. shuffler_task = NULL;
  421. if (writer_task != NULL) {
  422. VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
  423. kthread_stop(writer_task);
  424. }
  425. writer_task = NULL;
  426. if (reader_tasks != NULL) {
  427. for (i = 0; i < nrealreaders; i++) {
  428. if (reader_tasks[i] != NULL) {
  429. VERBOSE_PRINTK_STRING(
  430. "Stopping rcu_torture_reader task");
  431. kthread_stop(reader_tasks[i]);
  432. }
  433. reader_tasks[i] = NULL;
  434. }
  435. kfree(reader_tasks);
  436. reader_tasks = NULL;
  437. }
  438. rcu_torture_current = NULL;
  439. if (stats_task != NULL) {
  440. VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
  441. kthread_stop(stats_task);
  442. }
  443. stats_task = NULL;
  444. /* Wait for all RCU callbacks to fire. */
  445. rcu_barrier();
  446. rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
  447. if (atomic_read(&n_rcu_torture_error))
  448. rcu_torture_print_module_parms("End of test: FAILURE");
  449. else
  450. rcu_torture_print_module_parms("End of test: SUCCESS");
  451. }
  452. static int
  453. rcu_torture_init(void)
  454. {
  455. int i;
  456. int cpu;
  457. int firsterr = 0;
  458. /* Process args and tell the world that the torturer is on the job. */
  459. if (nreaders >= 0)
  460. nrealreaders = nreaders;
  461. else
  462. nrealreaders = 2 * num_online_cpus();
  463. rcu_torture_print_module_parms("Start of test");
  464. fullstop = 0;
  465. /* Set up the freelist. */
  466. INIT_LIST_HEAD(&rcu_torture_freelist);
  467. for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
  468. rcu_tortures[i].rtort_mbtest = 0;
  469. list_add_tail(&rcu_tortures[i].rtort_free,
  470. &rcu_torture_freelist);
  471. }
  472. /* Initialize the statistics so that each run gets its own numbers. */
  473. rcu_torture_current = NULL;
  474. rcu_torture_current_version = 0;
  475. atomic_set(&n_rcu_torture_alloc, 0);
  476. atomic_set(&n_rcu_torture_alloc_fail, 0);
  477. atomic_set(&n_rcu_torture_free, 0);
  478. atomic_set(&n_rcu_torture_mberror, 0);
  479. atomic_set(&n_rcu_torture_error, 0);
  480. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
  481. atomic_set(&rcu_torture_wcount[i], 0);
  482. for_each_possible_cpu(cpu) {
  483. for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
  484. per_cpu(rcu_torture_count, cpu)[i] = 0;
  485. per_cpu(rcu_torture_batch, cpu)[i] = 0;
  486. }
  487. }
  488. /* Start up the kthreads. */
  489. VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
  490. writer_task = kthread_run(rcu_torture_writer, NULL,
  491. "rcu_torture_writer");
  492. if (IS_ERR(writer_task)) {
  493. firsterr = PTR_ERR(writer_task);
  494. VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
  495. writer_task = NULL;
  496. goto unwind;
  497. }
  498. reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
  499. GFP_KERNEL);
  500. if (reader_tasks == NULL) {
  501. VERBOSE_PRINTK_ERRSTRING("out of memory");
  502. firsterr = -ENOMEM;
  503. goto unwind;
  504. }
  505. for (i = 0; i < nrealreaders; i++) {
  506. VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
  507. reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
  508. "rcu_torture_reader");
  509. if (IS_ERR(reader_tasks[i])) {
  510. firsterr = PTR_ERR(reader_tasks[i]);
  511. VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
  512. reader_tasks[i] = NULL;
  513. goto unwind;
  514. }
  515. }
  516. if (stat_interval > 0) {
  517. VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
  518. stats_task = kthread_run(rcu_torture_stats, NULL,
  519. "rcu_torture_stats");
  520. if (IS_ERR(stats_task)) {
  521. firsterr = PTR_ERR(stats_task);
  522. VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
  523. stats_task = NULL;
  524. goto unwind;
  525. }
  526. }
  527. if (test_no_idle_hz) {
  528. rcu_idle_cpu = num_online_cpus() - 1;
  529. /* Create the shuffler thread */
  530. shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
  531. "rcu_torture_shuffle");
  532. if (IS_ERR(shuffler_task)) {
  533. firsterr = PTR_ERR(shuffler_task);
  534. VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
  535. shuffler_task = NULL;
  536. goto unwind;
  537. }
  538. }
  539. return 0;
  540. unwind:
  541. rcu_torture_cleanup();
  542. return firsterr;
  543. }
  544. module_init(rcu_torture_init);
  545. module_exit(rcu_torture_cleanup);