ring_buffer_benchmark.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * ring buffer tester and benchmark
  3. *
  4. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/completion.h>
  8. #include <linux/kthread.h>
  9. #include <linux/module.h>
  10. #include <linux/time.h>
  11. struct rb_page {
  12. u64 ts;
  13. local_t commit;
  14. char data[4080];
  15. };
  16. /* run time and sleep time in seconds */
  17. #define RUN_TIME 10
  18. #define SLEEP_TIME 10
  19. /* number of events for writer to wake up the reader */
  20. static int wakeup_interval = 100;
  21. static int reader_finish;
  22. static struct completion read_start;
  23. static struct completion read_done;
  24. static struct ring_buffer *buffer;
  25. static struct task_struct *producer;
  26. static struct task_struct *consumer;
  27. static unsigned long read;
  28. static int disable_reader;
  29. module_param(disable_reader, uint, 0644);
  30. MODULE_PARM_DESC(disable_reader, "only run producer");
  31. static int write_iteration = 50;
  32. module_param(write_iteration, uint, 0644);
  33. MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
  34. static int read_events;
  35. static int kill_test;
  36. #define KILL_TEST() \
  37. do { \
  38. if (!kill_test) { \
  39. kill_test = 1; \
  40. WARN_ON(1); \
  41. } \
  42. } while (0)
  43. enum event_status {
  44. EVENT_FOUND,
  45. EVENT_DROPPED,
  46. };
  47. static enum event_status read_event(int cpu)
  48. {
  49. struct ring_buffer_event *event;
  50. int *entry;
  51. u64 ts;
  52. event = ring_buffer_consume(buffer, cpu, &ts);
  53. if (!event)
  54. return EVENT_DROPPED;
  55. entry = ring_buffer_event_data(event);
  56. if (*entry != cpu) {
  57. KILL_TEST();
  58. return EVENT_DROPPED;
  59. }
  60. read++;
  61. return EVENT_FOUND;
  62. }
  63. static enum event_status read_page(int cpu)
  64. {
  65. struct ring_buffer_event *event;
  66. struct rb_page *rpage;
  67. unsigned long commit;
  68. void *bpage;
  69. int *entry;
  70. int ret;
  71. int inc;
  72. int i;
  73. bpage = ring_buffer_alloc_read_page(buffer);
  74. if (!bpage)
  75. return EVENT_DROPPED;
  76. ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
  77. if (ret >= 0) {
  78. rpage = bpage;
  79. commit = local_read(&rpage->commit);
  80. for (i = 0; i < commit && !kill_test; i += inc) {
  81. if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
  82. KILL_TEST();
  83. break;
  84. }
  85. inc = -1;
  86. event = (void *)&rpage->data[i];
  87. switch (event->type_len) {
  88. case RINGBUF_TYPE_PADDING:
  89. /* failed writes may be discarded events */
  90. if (!event->time_delta)
  91. KILL_TEST();
  92. inc = event->array[0] + 4;
  93. break;
  94. case RINGBUF_TYPE_TIME_EXTEND:
  95. inc = 8;
  96. break;
  97. case 0:
  98. entry = ring_buffer_event_data(event);
  99. if (*entry != cpu) {
  100. KILL_TEST();
  101. break;
  102. }
  103. read++;
  104. if (!event->array[0]) {
  105. KILL_TEST();
  106. break;
  107. }
  108. inc = event->array[0] + 4;
  109. break;
  110. default:
  111. entry = ring_buffer_event_data(event);
  112. if (*entry != cpu) {
  113. KILL_TEST();
  114. break;
  115. }
  116. read++;
  117. inc = ((event->type_len + 1) * 4);
  118. }
  119. if (kill_test)
  120. break;
  121. if (inc <= 0) {
  122. KILL_TEST();
  123. break;
  124. }
  125. }
  126. }
  127. ring_buffer_free_read_page(buffer, bpage);
  128. if (ret < 0)
  129. return EVENT_DROPPED;
  130. return EVENT_FOUND;
  131. }
  132. static void ring_buffer_consumer(void)
  133. {
  134. /* toggle between reading pages and events */
  135. read_events ^= 1;
  136. read = 0;
  137. while (!reader_finish && !kill_test) {
  138. int found;
  139. do {
  140. int cpu;
  141. found = 0;
  142. for_each_online_cpu(cpu) {
  143. enum event_status stat;
  144. if (read_events)
  145. stat = read_event(cpu);
  146. else
  147. stat = read_page(cpu);
  148. if (kill_test)
  149. break;
  150. if (stat == EVENT_FOUND)
  151. found = 1;
  152. }
  153. } while (found && !kill_test);
  154. set_current_state(TASK_INTERRUPTIBLE);
  155. if (reader_finish)
  156. break;
  157. schedule();
  158. __set_current_state(TASK_RUNNING);
  159. }
  160. reader_finish = 0;
  161. complete(&read_done);
  162. }
  163. static void ring_buffer_producer(void)
  164. {
  165. struct timeval start_tv;
  166. struct timeval end_tv;
  167. unsigned long long time;
  168. unsigned long long entries;
  169. unsigned long long overruns;
  170. unsigned long missed = 0;
  171. unsigned long hit = 0;
  172. unsigned long avg;
  173. int cnt = 0;
  174. /*
  175. * Hammer the buffer for 10 secs (this may
  176. * make the system stall)
  177. */
  178. trace_printk("Starting ring buffer hammer\n");
  179. do_gettimeofday(&start_tv);
  180. do {
  181. struct ring_buffer_event *event;
  182. int *entry;
  183. int i;
  184. for (i = 0; i < write_iteration; i++) {
  185. event = ring_buffer_lock_reserve(buffer, 10);
  186. if (!event) {
  187. missed++;
  188. } else {
  189. hit++;
  190. entry = ring_buffer_event_data(event);
  191. *entry = smp_processor_id();
  192. ring_buffer_unlock_commit(buffer, event);
  193. }
  194. }
  195. do_gettimeofday(&end_tv);
  196. cnt++;
  197. if (consumer && !(cnt % wakeup_interval))
  198. wake_up_process(consumer);
  199. #ifndef CONFIG_PREEMPT
  200. /*
  201. * If we are a non preempt kernel, the 10 second run will
  202. * stop everything while it runs. Instead, we will call
  203. * cond_resched and also add any time that was lost by a
  204. * rescedule.
  205. *
  206. * Do a cond resched at the same frequency we would wake up
  207. * the reader.
  208. */
  209. if (cnt % wakeup_interval)
  210. cond_resched();
  211. #endif
  212. } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
  213. trace_printk("End ring buffer hammer\n");
  214. if (consumer) {
  215. /* Init both completions here to avoid races */
  216. init_completion(&read_start);
  217. init_completion(&read_done);
  218. /* the completions must be visible before the finish var */
  219. smp_wmb();
  220. reader_finish = 1;
  221. /* finish var visible before waking up the consumer */
  222. smp_wmb();
  223. wake_up_process(consumer);
  224. wait_for_completion(&read_done);
  225. }
  226. time = end_tv.tv_sec - start_tv.tv_sec;
  227. time *= USEC_PER_SEC;
  228. time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
  229. entries = ring_buffer_entries(buffer);
  230. overruns = ring_buffer_overruns(buffer);
  231. if (kill_test)
  232. trace_printk("ERROR!\n");
  233. trace_printk("Time: %lld (usecs)\n", time);
  234. trace_printk("Overruns: %lld\n", overruns);
  235. if (disable_reader)
  236. trace_printk("Read: (reader disabled)\n");
  237. else
  238. trace_printk("Read: %ld (by %s)\n", read,
  239. read_events ? "events" : "pages");
  240. trace_printk("Entries: %lld\n", entries);
  241. trace_printk("Total: %lld\n", entries + overruns + read);
  242. trace_printk("Missed: %ld\n", missed);
  243. trace_printk("Hit: %ld\n", hit);
  244. /* Convert time from usecs to millisecs */
  245. do_div(time, USEC_PER_MSEC);
  246. if (time)
  247. hit /= (long)time;
  248. else
  249. trace_printk("TIME IS ZERO??\n");
  250. trace_printk("Entries per millisec: %ld\n", hit);
  251. if (hit) {
  252. /* Calculate the average time in nanosecs */
  253. avg = NSEC_PER_MSEC / hit;
  254. trace_printk("%ld ns per entry\n", avg);
  255. }
  256. if (missed) {
  257. if (time)
  258. missed /= (long)time;
  259. trace_printk("Total iterations per millisec: %ld\n",
  260. hit + missed);
  261. /* it is possible that hit + missed will overflow and be zero */
  262. if (!(hit + missed)) {
  263. trace_printk("hit + missed overflowed and totalled zero!\n");
  264. hit--; /* make it non zero */
  265. }
  266. /* Caculate the average time in nanosecs */
  267. avg = NSEC_PER_MSEC / (hit + missed);
  268. trace_printk("%ld ns per entry\n", avg);
  269. }
  270. }
  271. static void wait_to_die(void)
  272. {
  273. set_current_state(TASK_INTERRUPTIBLE);
  274. while (!kthread_should_stop()) {
  275. schedule();
  276. set_current_state(TASK_INTERRUPTIBLE);
  277. }
  278. __set_current_state(TASK_RUNNING);
  279. }
  280. static int ring_buffer_consumer_thread(void *arg)
  281. {
  282. while (!kthread_should_stop() && !kill_test) {
  283. complete(&read_start);
  284. ring_buffer_consumer();
  285. set_current_state(TASK_INTERRUPTIBLE);
  286. if (kthread_should_stop() || kill_test)
  287. break;
  288. schedule();
  289. __set_current_state(TASK_RUNNING);
  290. }
  291. __set_current_state(TASK_RUNNING);
  292. if (kill_test)
  293. wait_to_die();
  294. return 0;
  295. }
  296. static int ring_buffer_producer_thread(void *arg)
  297. {
  298. init_completion(&read_start);
  299. while (!kthread_should_stop() && !kill_test) {
  300. ring_buffer_reset(buffer);
  301. if (consumer) {
  302. smp_wmb();
  303. wake_up_process(consumer);
  304. wait_for_completion(&read_start);
  305. }
  306. ring_buffer_producer();
  307. trace_printk("Sleeping for 10 secs\n");
  308. set_current_state(TASK_INTERRUPTIBLE);
  309. schedule_timeout(HZ * SLEEP_TIME);
  310. __set_current_state(TASK_RUNNING);
  311. }
  312. if (kill_test)
  313. wait_to_die();
  314. return 0;
  315. }
  316. static int __init ring_buffer_benchmark_init(void)
  317. {
  318. int ret;
  319. /* make a one meg buffer in overwite mode */
  320. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  321. if (!buffer)
  322. return -ENOMEM;
  323. if (!disable_reader) {
  324. consumer = kthread_create(ring_buffer_consumer_thread,
  325. NULL, "rb_consumer");
  326. ret = PTR_ERR(consumer);
  327. if (IS_ERR(consumer))
  328. goto out_fail;
  329. }
  330. producer = kthread_run(ring_buffer_producer_thread,
  331. NULL, "rb_producer");
  332. ret = PTR_ERR(producer);
  333. if (IS_ERR(producer))
  334. goto out_kill;
  335. /*
  336. * Run them as low-prio background tasks by default:
  337. */
  338. set_user_nice(consumer, 19);
  339. set_user_nice(producer, 19);
  340. return 0;
  341. out_kill:
  342. if (consumer)
  343. kthread_stop(consumer);
  344. out_fail:
  345. ring_buffer_free(buffer);
  346. return ret;
  347. }
  348. static void __exit ring_buffer_benchmark_exit(void)
  349. {
  350. kthread_stop(producer);
  351. if (consumer)
  352. kthread_stop(consumer);
  353. ring_buffer_free(buffer);
  354. }
  355. module_init(ring_buffer_benchmark_init);
  356. module_exit(ring_buffer_benchmark_exit);
  357. MODULE_AUTHOR("Steven Rostedt");
  358. MODULE_DESCRIPTION("ring_buffer_benchmark");
  359. MODULE_LICENSE("GPL");