ring_buffer_benchmark.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /*
  2. * ring buffer tester and benchmark
  3. *
  4. * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/completion.h>
  8. #include <linux/kthread.h>
  9. #include <linux/module.h>
  10. #include <linux/time.h>
  11. struct rb_page {
  12. u64 ts;
  13. local_t commit;
  14. char data[4080];
  15. };
  16. /* run time and sleep time in seconds */
  17. #define RUN_TIME 10
  18. #define SLEEP_TIME 10
  19. /* number of events for writer to wake up the reader */
  20. static int wakeup_interval = 100;
  21. static int reader_finish;
  22. static struct completion read_start;
  23. static struct completion read_done;
  24. static struct ring_buffer *buffer;
  25. static struct task_struct *producer;
  26. static struct task_struct *consumer;
  27. static unsigned long read;
  28. static int disable_reader;
  29. module_param(disable_reader, uint, 0644);
  30. MODULE_PARM_DESC(disable_reader, "only run producer");
  31. static int read_events;
  32. static int kill_test;
  33. #define KILL_TEST() \
  34. do { \
  35. if (!kill_test) { \
  36. kill_test = 1; \
  37. WARN_ON(1); \
  38. } \
  39. } while (0)
  40. enum event_status {
  41. EVENT_FOUND,
  42. EVENT_DROPPED,
  43. };
  44. static enum event_status read_event(int cpu)
  45. {
  46. struct ring_buffer_event *event;
  47. int *entry;
  48. u64 ts;
  49. event = ring_buffer_consume(buffer, cpu, &ts);
  50. if (!event)
  51. return EVENT_DROPPED;
  52. entry = ring_buffer_event_data(event);
  53. if (*entry != cpu) {
  54. KILL_TEST();
  55. return EVENT_DROPPED;
  56. }
  57. read++;
  58. return EVENT_FOUND;
  59. }
  60. static enum event_status read_page(int cpu)
  61. {
  62. struct ring_buffer_event *event;
  63. struct rb_page *rpage;
  64. unsigned long commit;
  65. void *bpage;
  66. int *entry;
  67. int ret;
  68. int inc;
  69. int i;
  70. bpage = ring_buffer_alloc_read_page(buffer);
  71. if (!bpage)
  72. return EVENT_DROPPED;
  73. ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
  74. if (ret >= 0) {
  75. rpage = bpage;
  76. commit = local_read(&rpage->commit);
  77. for (i = 0; i < commit && !kill_test; i += inc) {
  78. if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
  79. KILL_TEST();
  80. break;
  81. }
  82. inc = -1;
  83. event = (void *)&rpage->data[i];
  84. switch (event->type_len) {
  85. case RINGBUF_TYPE_PADDING:
  86. /* We don't expect any padding */
  87. KILL_TEST();
  88. break;
  89. case RINGBUF_TYPE_TIME_EXTEND:
  90. inc = 8;
  91. break;
  92. case 0:
  93. entry = ring_buffer_event_data(event);
  94. if (*entry != cpu) {
  95. KILL_TEST();
  96. break;
  97. }
  98. read++;
  99. if (!event->array[0]) {
  100. KILL_TEST();
  101. break;
  102. }
  103. inc = event->array[0];
  104. break;
  105. default:
  106. entry = ring_buffer_event_data(event);
  107. if (*entry != cpu) {
  108. KILL_TEST();
  109. break;
  110. }
  111. read++;
  112. inc = ((event->type_len + 1) * 4);
  113. }
  114. if (kill_test)
  115. break;
  116. if (inc <= 0) {
  117. KILL_TEST();
  118. break;
  119. }
  120. }
  121. }
  122. ring_buffer_free_read_page(buffer, bpage);
  123. if (ret < 0)
  124. return EVENT_DROPPED;
  125. return EVENT_FOUND;
  126. }
  127. static void ring_buffer_consumer(void)
  128. {
  129. /* toggle between reading pages and events */
  130. read_events ^= 1;
  131. read = 0;
  132. while (!reader_finish && !kill_test) {
  133. int found;
  134. do {
  135. int cpu;
  136. found = 0;
  137. for_each_online_cpu(cpu) {
  138. enum event_status stat;
  139. if (read_events)
  140. stat = read_event(cpu);
  141. else
  142. stat = read_page(cpu);
  143. if (kill_test)
  144. break;
  145. if (stat == EVENT_FOUND)
  146. found = 1;
  147. }
  148. } while (found && !kill_test);
  149. set_current_state(TASK_INTERRUPTIBLE);
  150. if (reader_finish)
  151. break;
  152. schedule();
  153. __set_current_state(TASK_RUNNING);
  154. }
  155. reader_finish = 0;
  156. complete(&read_done);
  157. }
  158. static void ring_buffer_producer(void)
  159. {
  160. struct timeval start_tv;
  161. struct timeval end_tv;
  162. unsigned long long time;
  163. unsigned long long entries;
  164. unsigned long long overruns;
  165. unsigned long missed = 0;
  166. unsigned long hit = 0;
  167. unsigned long avg;
  168. int cnt = 0;
  169. /*
  170. * Hammer the buffer for 10 secs (this may
  171. * make the system stall)
  172. */
  173. pr_info("Starting ring buffer hammer\n");
  174. do_gettimeofday(&start_tv);
  175. do {
  176. struct ring_buffer_event *event;
  177. int *entry;
  178. event = ring_buffer_lock_reserve(buffer, 10);
  179. if (!event) {
  180. missed++;
  181. } else {
  182. hit++;
  183. entry = ring_buffer_event_data(event);
  184. *entry = smp_processor_id();
  185. ring_buffer_unlock_commit(buffer, event);
  186. }
  187. do_gettimeofday(&end_tv);
  188. cnt++;
  189. if (consumer && !(cnt % wakeup_interval))
  190. wake_up_process(consumer);
  191. #ifndef CONFIG_PREEMPT
  192. /*
  193. * If we are a non preempt kernel, the 10 second run will
  194. * stop everything while it runs. Instead, we will call
  195. * cond_resched and also add any time that was lost by a
  196. * rescedule.
  197. *
  198. * Do a cond resched at the same frequency we would wake up
  199. * the reader.
  200. */
  201. if (cnt % wakeup_interval)
  202. cond_resched();
  203. #endif
  204. } while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
  205. pr_info("End ring buffer hammer\n");
  206. if (consumer) {
  207. /* Init both completions here to avoid races */
  208. init_completion(&read_start);
  209. init_completion(&read_done);
  210. /* the completions must be visible before the finish var */
  211. smp_wmb();
  212. reader_finish = 1;
  213. /* finish var visible before waking up the consumer */
  214. smp_wmb();
  215. wake_up_process(consumer);
  216. wait_for_completion(&read_done);
  217. }
  218. time = end_tv.tv_sec - start_tv.tv_sec;
  219. time *= USEC_PER_SEC;
  220. time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
  221. entries = ring_buffer_entries(buffer);
  222. overruns = ring_buffer_overruns(buffer);
  223. if (kill_test)
  224. pr_info("ERROR!\n");
  225. pr_info("Time: %lld (usecs)\n", time);
  226. pr_info("Overruns: %lld\n", overruns);
  227. if (disable_reader)
  228. pr_info("Read: (reader disabled)\n");
  229. else
  230. pr_info("Read: %ld (by %s)\n", read,
  231. read_events ? "events" : "pages");
  232. pr_info("Entries: %lld\n", entries);
  233. pr_info("Total: %lld\n", entries + overruns + read);
  234. pr_info("Missed: %ld\n", missed);
  235. pr_info("Hit: %ld\n", hit);
  236. /* Convert time from usecs to millisecs */
  237. do_div(time, USEC_PER_MSEC);
  238. if (time)
  239. hit /= (long)time;
  240. else
  241. pr_info("TIME IS ZERO??\n");
  242. pr_info("Entries per millisec: %ld\n", hit);
  243. if (hit) {
  244. /* Calculate the average time in nanosecs */
  245. avg = NSEC_PER_MSEC / hit;
  246. pr_info("%ld ns per entry\n", avg);
  247. }
  248. if (missed) {
  249. if (time)
  250. missed /= (long)time;
  251. pr_info("Total iterations per millisec: %ld\n", hit + missed);
  252. /* it is possible that hit + missed will overflow and be zero */
  253. if (!(hit + missed)) {
  254. pr_info("hit + missed overflowed and totalled zero!\n");
  255. hit--; /* make it non zero */
  256. }
  257. /* Caculate the average time in nanosecs */
  258. avg = NSEC_PER_MSEC / (hit + missed);
  259. pr_info("%ld ns per entry\n", avg);
  260. }
  261. }
  262. static void wait_to_die(void)
  263. {
  264. set_current_state(TASK_INTERRUPTIBLE);
  265. while (!kthread_should_stop()) {
  266. schedule();
  267. set_current_state(TASK_INTERRUPTIBLE);
  268. }
  269. __set_current_state(TASK_RUNNING);
  270. }
  271. static int ring_buffer_consumer_thread(void *arg)
  272. {
  273. while (!kthread_should_stop() && !kill_test) {
  274. complete(&read_start);
  275. ring_buffer_consumer();
  276. set_current_state(TASK_INTERRUPTIBLE);
  277. if (kthread_should_stop() || kill_test)
  278. break;
  279. schedule();
  280. __set_current_state(TASK_RUNNING);
  281. }
  282. __set_current_state(TASK_RUNNING);
  283. if (kill_test)
  284. wait_to_die();
  285. return 0;
  286. }
  287. static int ring_buffer_producer_thread(void *arg)
  288. {
  289. init_completion(&read_start);
  290. while (!kthread_should_stop() && !kill_test) {
  291. ring_buffer_reset(buffer);
  292. if (consumer) {
  293. smp_wmb();
  294. wake_up_process(consumer);
  295. wait_for_completion(&read_start);
  296. }
  297. ring_buffer_producer();
  298. pr_info("Sleeping for 10 secs\n");
  299. set_current_state(TASK_INTERRUPTIBLE);
  300. schedule_timeout(HZ * SLEEP_TIME);
  301. __set_current_state(TASK_RUNNING);
  302. }
  303. if (kill_test)
  304. wait_to_die();
  305. return 0;
  306. }
  307. static int __init ring_buffer_benchmark_init(void)
  308. {
  309. int ret;
  310. /* make a one meg buffer in overwite mode */
  311. buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
  312. if (!buffer)
  313. return -ENOMEM;
  314. if (!disable_reader) {
  315. consumer = kthread_create(ring_buffer_consumer_thread,
  316. NULL, "rb_consumer");
  317. ret = PTR_ERR(consumer);
  318. if (IS_ERR(consumer))
  319. goto out_fail;
  320. }
  321. producer = kthread_run(ring_buffer_producer_thread,
  322. NULL, "rb_producer");
  323. ret = PTR_ERR(producer);
  324. if (IS_ERR(producer))
  325. goto out_kill;
  326. return 0;
  327. out_kill:
  328. if (consumer)
  329. kthread_stop(consumer);
  330. out_fail:
  331. ring_buffer_free(buffer);
  332. return ret;
  333. }
  334. static void __exit ring_buffer_benchmark_exit(void)
  335. {
  336. kthread_stop(producer);
  337. if (consumer)
  338. kthread_stop(consumer);
  339. ring_buffer_free(buffer);
  340. }
  341. module_init(ring_buffer_benchmark_init);
  342. module_exit(ring_buffer_benchmark_exit);
  343. MODULE_AUTHOR("Steven Rostedt");
  344. MODULE_DESCRIPTION("ring_buffer_benchmark");
  345. MODULE_LICENSE("GPL");