ring_buffer_benchmark.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /*
  2. * ring buffer tester and benchmark
  3. *
  4. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/completion.h>
  8. #include <linux/kthread.h>
  9. #include <linux/module.h>
  10. #include <linux/time.h>
  11. struct rb_page {
  12. u64 ts;
  13. local_t commit;
  14. char data[4080];
  15. };
  16. /* run time and sleep time in seconds */
  17. #define RUN_TIME 10
  18. #define SLEEP_TIME 10
  19. /* number of events for writer to wake up the reader */
  20. static int wakeup_interval = 100;
  21. static int reader_finish;
  22. static struct completion read_start;
  23. static struct completion read_done;
  24. static struct ring_buffer *buffer;
  25. static struct task_struct *producer;
  26. static struct task_struct *consumer;
  27. static unsigned long read;
  28. static int disable_reader;
  29. module_param(disable_reader, uint, 0644);
  30. MODULE_PARM_DESC(disable_reader, "only run producer");
  31. static int write_iteration = 50;
  32. module_param(write_iteration, uint, 0644);
  33. MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
  34. static int producer_nice = 19;
  35. static int consumer_nice = 19;
  36. static int producer_fifo = -1;
  37. static int consumer_fifo = -1;
  38. module_param(producer_nice, uint, 0644);
  39. MODULE_PARM_DESC(producer_nice, "nice prio for producer");
  40. module_param(consumer_nice, uint, 0644);
  41. MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
  42. module_param(producer_fifo, uint, 0644);
  43. MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
  44. module_param(consumer_fifo, uint, 0644);
  45. MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
  46. static int read_events;
  47. static int kill_test;
  48. #define KILL_TEST() \
  49. do { \
  50. if (!kill_test) { \
  51. kill_test = 1; \
  52. WARN_ON(1); \
  53. } \
  54. } while (0)
  55. enum event_status {
  56. EVENT_FOUND,
  57. EVENT_DROPPED,
  58. };
  59. static enum event_status read_event(int cpu)
  60. {
  61. struct ring_buffer_event *event;
  62. int *entry;
  63. u64 ts;
  64. event = ring_buffer_consume(buffer, cpu, &ts);
  65. if (!event)
  66. return EVENT_DROPPED;
  67. entry = ring_buffer_event_data(event);
  68. if (*entry != cpu) {
  69. KILL_TEST();
  70. return EVENT_DROPPED;
  71. }
  72. read++;
  73. return EVENT_FOUND;
  74. }
  75. static enum event_status read_page(int cpu)
  76. {
  77. struct ring_buffer_event *event;
  78. struct rb_page *rpage;
  79. unsigned long commit;
  80. void *bpage;
  81. int *entry;
  82. int ret;
  83. int inc;
  84. int i;
  85. bpage = ring_buffer_alloc_read_page(buffer);
  86. if (!bpage)
  87. return EVENT_DROPPED;
  88. ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
  89. if (ret >= 0) {
  90. rpage = bpage;
  91. commit = local_read(&rpage->commit);
  92. for (i = 0; i < commit && !kill_test; i += inc) {
  93. if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
  94. KILL_TEST();
  95. break;
  96. }
  97. inc = -1;
  98. event = (void *)&rpage->data[i];
  99. switch (event->type_len) {
  100. case RINGBUF_TYPE_PADDING:
  101. /* failed writes may be discarded events */
  102. if (!event->time_delta)
  103. KILL_TEST();
  104. inc = event->array[0] + 4;
  105. break;
  106. case RINGBUF_TYPE_TIME_EXTEND:
  107. inc = 8;
  108. break;
  109. case 0:
  110. entry = ring_buffer_event_data(event);
  111. if (*entry != cpu) {
  112. KILL_TEST();
  113. break;
  114. }
  115. read++;
  116. if (!event->array[0]) {
  117. KILL_TEST();
  118. break;
  119. }
  120. inc = event->array[0] + 4;
  121. break;
  122. default:
  123. entry = ring_buffer_event_data(event);
  124. if (*entry != cpu) {
  125. KILL_TEST();
  126. break;
  127. }
  128. read++;
  129. inc = ((event->type_len + 1) * 4);
  130. }
  131. if (kill_test)
  132. break;
  133. if (inc <= 0) {
  134. KILL_TEST();
  135. break;
  136. }
  137. }
  138. }
  139. ring_buffer_free_read_page(buffer, bpage);
  140. if (ret < 0)
  141. return EVENT_DROPPED;
  142. return EVENT_FOUND;
  143. }
  144. static void ring_buffer_consumer(void)
  145. {
  146. /* toggle between reading pages and events */
  147. read_events ^= 1;
  148. read = 0;
  149. while (!reader_finish && !kill_test) {
  150. int found;
  151. do {
  152. int cpu;
  153. found = 0;
  154. for_each_online_cpu(cpu) {
  155. enum event_status stat;
  156. if (read_events)
  157. stat = read_event(cpu);
  158. else
  159. stat = read_page(cpu);
  160. if (kill_test)
  161. break;
  162. if (stat == EVENT_FOUND)
  163. found = 1;
  164. }
  165. } while (found && !kill_test);
  166. set_current_state(TASK_INTERRUPTIBLE);
  167. if (reader_finish)
  168. break;
  169. schedule();
  170. __set_current_state(TASK_RUNNING);
  171. }
  172. reader_finish = 0;
  173. complete(&read_done);
  174. }
  175. static void ring_buffer_producer(void)
  176. {
  177. struct timeval start_tv;
  178. struct timeval end_tv;
  179. unsigned long long time;
  180. unsigned long long entries;
  181. unsigned long long overruns;
  182. unsigned long missed = 0;
  183. unsigned long hit = 0;
  184. unsigned long avg;
  185. int cnt = 0;
  186. /*
  187. * Hammer the buffer for 10 secs (this may
  188. * make the system stall)
  189. */
  190. trace_printk("Starting ring buffer hammer\n");
  191. do_gettimeofday(&start_tv);
  192. do {
  193. struct ring_buffer_event *event;
  194. int *entry;
  195. int i;
  196. for (i = 0; i < write_iteration; i++) {
  197. event = ring_buffer_lock_reserve(buffer, 10);
  198. if (!event) {
  199. missed++;
  200. } else {
  201. hit++;
  202. entry = ring_buffer_event_data(event);
  203. *entry = smp_processor_id();
  204. ring_buffer_unlock_commit(buffer, event);
  205. }
  206. }
  207. do_gettimeofday(&end_tv);
  208. cnt++;
  209. if (consumer && !(cnt % wakeup_interval))
  210. wake_up_process(consumer);
  211. #ifndef CONFIG_PREEMPT
  212. /*
  213. * If we are a non preempt kernel, the 10 second run will
  214. * stop everything while it runs. Instead, we will call
  215. * cond_resched and also add any time that was lost by a
  216. * rescedule.
  217. *
  218. * Do a cond resched at the same frequency we would wake up
  219. * the reader.
  220. */
  221. if (cnt % wakeup_interval)
  222. cond_resched();
  223. #endif
  224. } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
  225. trace_printk("End ring buffer hammer\n");
  226. if (consumer) {
  227. /* Init both completions here to avoid races */
  228. init_completion(&read_start);
  229. init_completion(&read_done);
  230. /* the completions must be visible before the finish var */
  231. smp_wmb();
  232. reader_finish = 1;
  233. /* finish var visible before waking up the consumer */
  234. smp_wmb();
  235. wake_up_process(consumer);
  236. wait_for_completion(&read_done);
  237. }
  238. time = end_tv.tv_sec - start_tv.tv_sec;
  239. time *= USEC_PER_SEC;
  240. time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
  241. entries = ring_buffer_entries(buffer);
  242. overruns = ring_buffer_overruns(buffer);
  243. if (kill_test)
  244. trace_printk("ERROR!\n");
  245. if (!disable_reader) {
  246. if (consumer_fifo < 0)
  247. trace_printk("Running Consumer at nice: %d\n",
  248. consumer_nice);
  249. else
  250. trace_printk("Running Consumer at SCHED_FIFO %d\n",
  251. consumer_fifo);
  252. }
  253. if (producer_fifo < 0)
  254. trace_printk("Running Producer at nice: %d\n",
  255. producer_nice);
  256. else
  257. trace_printk("Running Producer at SCHED_FIFO %d\n",
  258. producer_fifo);
  259. /* Let the user know that the test is running at low priority */
  260. if (producer_fifo < 0 && consumer_fifo < 0 &&
  261. producer_nice == 19 && consumer_nice == 19)
  262. trace_printk("WARNING!!! This test is running at lowest priority.\n");
  263. trace_printk("Time: %lld (usecs)\n", time);
  264. trace_printk("Overruns: %lld\n", overruns);
  265. if (disable_reader)
  266. trace_printk("Read: (reader disabled)\n");
  267. else
  268. trace_printk("Read: %ld (by %s)\n", read,
  269. read_events ? "events" : "pages");
  270. trace_printk("Entries: %lld\n", entries);
  271. trace_printk("Total: %lld\n", entries + overruns + read);
  272. trace_printk("Missed: %ld\n", missed);
  273. trace_printk("Hit: %ld\n", hit);
  274. /* Convert time from usecs to millisecs */
  275. do_div(time, USEC_PER_MSEC);
  276. if (time)
  277. hit /= (long)time;
  278. else
  279. trace_printk("TIME IS ZERO??\n");
  280. trace_printk("Entries per millisec: %ld\n", hit);
  281. if (hit) {
  282. /* Calculate the average time in nanosecs */
  283. avg = NSEC_PER_MSEC / hit;
  284. trace_printk("%ld ns per entry\n", avg);
  285. }
  286. if (missed) {
  287. if (time)
  288. missed /= (long)time;
  289. trace_printk("Total iterations per millisec: %ld\n",
  290. hit + missed);
  291. /* it is possible that hit + missed will overflow and be zero */
  292. if (!(hit + missed)) {
  293. trace_printk("hit + missed overflowed and totalled zero!\n");
  294. hit--; /* make it non zero */
  295. }
  296. /* Caculate the average time in nanosecs */
  297. avg = NSEC_PER_MSEC / (hit + missed);
  298. trace_printk("%ld ns per entry\n", avg);
  299. }
  300. }
  301. static void wait_to_die(void)
  302. {
  303. set_current_state(TASK_INTERRUPTIBLE);
  304. while (!kthread_should_stop()) {
  305. schedule();
  306. set_current_state(TASK_INTERRUPTIBLE);
  307. }
  308. __set_current_state(TASK_RUNNING);
  309. }
  310. static int ring_buffer_consumer_thread(void *arg)
  311. {
  312. while (!kthread_should_stop() && !kill_test) {
  313. complete(&read_start);
  314. ring_buffer_consumer();
  315. set_current_state(TASK_INTERRUPTIBLE);
  316. if (kthread_should_stop() || kill_test)
  317. break;
  318. schedule();
  319. __set_current_state(TASK_RUNNING);
  320. }
  321. __set_current_state(TASK_RUNNING);
  322. if (kill_test)
  323. wait_to_die();
  324. return 0;
  325. }
  326. static int ring_buffer_producer_thread(void *arg)
  327. {
  328. init_completion(&read_start);
  329. while (!kthread_should_stop() && !kill_test) {
  330. ring_buffer_reset(buffer);
  331. if (consumer) {
  332. smp_wmb();
  333. wake_up_process(consumer);
  334. wait_for_completion(&read_start);
  335. }
  336. ring_buffer_producer();
  337. trace_printk("Sleeping for 10 secs\n");
  338. set_current_state(TASK_INTERRUPTIBLE);
  339. schedule_timeout(HZ * SLEEP_TIME);
  340. __set_current_state(TASK_RUNNING);
  341. }
  342. if (kill_test)
  343. wait_to_die();
  344. return 0;
  345. }
  346. static int __init ring_buffer_benchmark_init(void)
  347. {
  348. int ret;
  349. /* make a one meg buffer in overwite mode */
  350. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  351. if (!buffer)
  352. return -ENOMEM;
  353. if (!disable_reader) {
  354. consumer = kthread_create(ring_buffer_consumer_thread,
  355. NULL, "rb_consumer");
  356. ret = PTR_ERR(consumer);
  357. if (IS_ERR(consumer))
  358. goto out_fail;
  359. }
  360. producer = kthread_run(ring_buffer_producer_thread,
  361. NULL, "rb_producer");
  362. ret = PTR_ERR(producer);
  363. if (IS_ERR(producer))
  364. goto out_kill;
  365. /*
  366. * Run them as low-prio background tasks by default:
  367. */
  368. if (!disable_reader) {
  369. if (consumer_fifo >= 0) {
  370. struct sched_param param = {
  371. .sched_priority = consumer_fifo
  372. };
  373. sched_setscheduler(consumer, SCHED_FIFO, &param);
  374. } else
  375. set_user_nice(consumer, consumer_nice);
  376. }
  377. if (producer_fifo >= 0) {
  378. struct sched_param param = {
  379. .sched_priority = consumer_fifo
  380. };
  381. sched_setscheduler(producer, SCHED_FIFO, &param);
  382. } else
  383. set_user_nice(producer, producer_nice);
  384. return 0;
  385. out_kill:
  386. if (consumer)
  387. kthread_stop(consumer);
  388. out_fail:
  389. ring_buffer_free(buffer);
  390. return ret;
  391. }
  392. static void __exit ring_buffer_benchmark_exit(void)
  393. {
  394. kthread_stop(producer);
  395. if (consumer)
  396. kthread_stop(consumer);
  397. ring_buffer_free(buffer);
  398. }
  399. module_init(ring_buffer_benchmark_init);
  400. module_exit(ring_buffer_benchmark_exit);
  401. MODULE_AUTHOR("Steven Rostedt");
  402. MODULE_DESCRIPTION("ring_buffer_benchmark");
  403. MODULE_LICENSE("GPL");