trace_selftest.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. static inline int trace_valid_entry(struct trace_entry *entry)
  4. {
  5. switch (entry->type) {
  6. case TRACE_FN:
  7. case TRACE_CTX:
  8. return 1;
  9. }
  10. return 0;
  11. }
  12. static int
  13. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  14. {
  15. struct page *page;
  16. struct trace_entry *entries;
  17. int idx = 0;
  18. int i;
  19. page = list_entry(data->trace_pages.next, struct page, lru);
  20. entries = page_address(page);
  21. if (data->trace != entries)
  22. goto failed;
  23. /*
  24. * The starting trace buffer always has valid elements,
  25. * if any element exits.
  26. */
  27. entries = data->trace;
  28. for (i = 0; i < tr->entries; i++) {
  29. if (i < data->trace_idx &&
  30. !trace_valid_entry(&entries[idx])) {
  31. printk(KERN_CONT ".. invalid entry %d ", entries[idx].type);
  32. goto failed;
  33. }
  34. idx++;
  35. if (idx >= ENTRIES_PER_PAGE) {
  36. page = virt_to_page(entries);
  37. if (page->lru.next == &data->trace_pages) {
  38. if (i != tr->entries - 1) {
  39. printk(KERN_CONT ".. entries buffer mismatch");
  40. goto failed;
  41. }
  42. } else {
  43. page = list_entry(page->lru.next, struct page, lru);
  44. entries = page_address(page);
  45. }
  46. idx = 0;
  47. }
  48. }
  49. page = virt_to_page(entries);
  50. if (page->lru.next != &data->trace_pages) {
  51. printk(KERN_CONT ".. too many entries");
  52. goto failed;
  53. }
  54. return 0;
  55. failed:
  56. printk(KERN_CONT ".. corrupted trace buffer .. ");
  57. return -1;
  58. }
  59. /*
  60. * Test the trace buffer to see if all the elements
  61. * are still sane.
  62. */
  63. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  64. {
  65. unsigned long cnt = 0;
  66. int cpu;
  67. int ret = 0;
  68. for_each_possible_cpu(cpu) {
  69. if (!tr->data[cpu]->trace)
  70. continue;
  71. cnt += tr->data[cpu]->trace_idx;
  72. printk("%d: count = %ld\n", cpu, cnt);
  73. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  74. if (ret)
  75. break;
  76. }
  77. if (count)
  78. *count = cnt;
  79. return ret;
  80. }
  81. #ifdef CONFIG_FTRACE
  82. /*
  83. * Simple verification test of ftrace function tracer.
  84. * Enable ftrace, sleep 1/10 second, and then read the trace
  85. * buffer to see if all is in order.
  86. */
  87. int
  88. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  89. {
  90. unsigned long count;
  91. int ret;
  92. /* make sure functions have been recorded */
  93. ret = ftrace_force_update();
  94. if (ret) {
  95. printk(KERN_CONT ".. ftraced failed .. ");
  96. return ret;
  97. }
  98. /* start the tracing */
  99. tr->ctrl = 1;
  100. trace->init(tr);
  101. /* Sleep for a 1/10 of a second */
  102. msleep(100);
  103. /* stop the tracing. */
  104. tr->ctrl = 0;
  105. trace->ctrl_update(tr);
  106. /* check the trace buffer */
  107. ret = trace_test_buffer(tr, &count);
  108. trace->reset(tr);
  109. if (!ret && !count) {
  110. printk(KERN_CONT ".. no entries found ..");
  111. ret = -1;
  112. }
  113. return ret;
  114. }
  115. #endif /* CONFIG_FTRACE */
  116. #ifdef CONFIG_IRQSOFF_TRACER
  117. int
  118. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  119. {
  120. unsigned long save_max = tracing_max_latency;
  121. unsigned long count;
  122. int ret;
  123. /* start the tracing */
  124. tr->ctrl = 1;
  125. trace->init(tr);
  126. /* reset the max latency */
  127. tracing_max_latency = 0;
  128. /* disable interrupts for a bit */
  129. local_irq_disable();
  130. udelay(100);
  131. local_irq_enable();
  132. /* stop the tracing. */
  133. tr->ctrl = 0;
  134. trace->ctrl_update(tr);
  135. /* check both trace buffers */
  136. ret = trace_test_buffer(tr, NULL);
  137. if (!ret)
  138. ret = trace_test_buffer(&max_tr, &count);
  139. trace->reset(tr);
  140. if (!ret && !count) {
  141. printk(KERN_CONT ".. no entries found ..");
  142. ret = -1;
  143. }
  144. tracing_max_latency = save_max;
  145. return ret;
  146. }
  147. #endif /* CONFIG_IRQSOFF_TRACER */
  148. #ifdef CONFIG_PREEMPT_TRACER
  149. int
  150. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  151. {
  152. unsigned long save_max = tracing_max_latency;
  153. unsigned long count;
  154. int ret;
  155. /* start the tracing */
  156. tr->ctrl = 1;
  157. trace->init(tr);
  158. /* reset the max latency */
  159. tracing_max_latency = 0;
  160. /* disable preemption for a bit */
  161. preempt_disable();
  162. udelay(100);
  163. preempt_enable();
  164. /* stop the tracing. */
  165. tr->ctrl = 0;
  166. trace->ctrl_update(tr);
  167. /* check both trace buffers */
  168. ret = trace_test_buffer(tr, NULL);
  169. if (!ret)
  170. ret = trace_test_buffer(&max_tr, &count);
  171. trace->reset(tr);
  172. if (!ret && !count) {
  173. printk(KERN_CONT ".. no entries found ..");
  174. ret = -1;
  175. }
  176. tracing_max_latency = save_max;
  177. return ret;
  178. }
  179. #endif /* CONFIG_PREEMPT_TRACER */
  180. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  181. int
  182. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  183. {
  184. unsigned long save_max = tracing_max_latency;
  185. unsigned long count;
  186. int ret;
  187. /* start the tracing */
  188. tr->ctrl = 1;
  189. trace->init(tr);
  190. /* reset the max latency */
  191. tracing_max_latency = 0;
  192. /* disable preemption and interrupts for a bit */
  193. preempt_disable();
  194. local_irq_disable();
  195. udelay(100);
  196. preempt_enable();
  197. /* reverse the order of preempt vs irqs */
  198. local_irq_enable();
  199. /* stop the tracing. */
  200. tr->ctrl = 0;
  201. trace->ctrl_update(tr);
  202. /* check both trace buffers */
  203. ret = trace_test_buffer(tr, NULL);
  204. if (ret)
  205. goto out;
  206. ret = trace_test_buffer(&max_tr, &count);
  207. if (ret)
  208. goto out;
  209. if (!ret && !count) {
  210. printk(KERN_CONT ".. no entries found ..");
  211. ret = -1;
  212. goto out;
  213. }
  214. /* do the test by disabling interrupts first this time */
  215. tracing_max_latency = 0;
  216. tr->ctrl = 1;
  217. trace->ctrl_update(tr);
  218. preempt_disable();
  219. local_irq_disable();
  220. udelay(100);
  221. preempt_enable();
  222. /* reverse the order of preempt vs irqs */
  223. local_irq_enable();
  224. /* stop the tracing. */
  225. tr->ctrl = 0;
  226. trace->ctrl_update(tr);
  227. /* check both trace buffers */
  228. ret = trace_test_buffer(tr, NULL);
  229. if (ret)
  230. goto out;
  231. ret = trace_test_buffer(&max_tr, &count);
  232. if (!ret && !count) {
  233. printk(KERN_CONT ".. no entries found ..");
  234. ret = -1;
  235. goto out;
  236. }
  237. out:
  238. trace->reset(tr);
  239. tracing_max_latency = save_max;
  240. return ret;
  241. }
  242. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  243. #ifdef CONFIG_SCHED_TRACER
  244. static int trace_wakeup_test_thread(void *data)
  245. {
  246. struct completion *x = data;
  247. /* Make this a RT thread, doesn't need to be too high */
  248. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  249. /* Make it know we have a new prio */
  250. complete(x);
  251. /* now go to sleep and let the test wake us up */
  252. set_current_state(TASK_INTERRUPTIBLE);
  253. schedule();
  254. /* we are awake, now wait to disappear */
  255. while (!kthread_should_stop()) {
  256. /*
  257. * This is an RT task, do short sleeps to let
  258. * others run.
  259. */
  260. msleep(100);
  261. }
  262. return 0;
  263. }
  264. int
  265. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  266. {
  267. unsigned long save_max = tracing_max_latency;
  268. struct task_struct *p;
  269. struct completion isrt;
  270. unsigned long count;
  271. int ret;
  272. init_completion(&isrt);
  273. /* create a high prio thread */
  274. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  275. if (!IS_ERR(p)) {
  276. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  277. return -1;
  278. }
  279. /* make sure the thread is running at an RT prio */
  280. wait_for_completion(&isrt);
  281. /* start the tracing */
  282. tr->ctrl = 1;
  283. trace->init(tr);
  284. /* reset the max latency */
  285. tracing_max_latency = 0;
  286. /* sleep to let the RT thread sleep too */
  287. msleep(100);
  288. /*
  289. * Yes this is slightly racy. It is possible that for some
  290. * strange reason that the RT thread we created, did not
  291. * call schedule for 100ms after doing the completion,
  292. * and we do a wakeup on a task that already is awake.
  293. * But that is extremely unlikely, and the worst thing that
  294. * happens in such a case, is that we disable tracing.
  295. * Honestly, if this race does happen something is horrible
  296. * wrong with the system.
  297. */
  298. wake_up_process(p);
  299. /* stop the tracing. */
  300. tr->ctrl = 0;
  301. trace->ctrl_update(tr);
  302. /* check both trace buffers */
  303. ret = trace_test_buffer(tr, NULL);
  304. if (!ret)
  305. ret = trace_test_buffer(&max_tr, &count);
  306. trace->reset(tr);
  307. tracing_max_latency = save_max;
  308. /* kill the thread */
  309. kthread_stop(p);
  310. if (!ret && !count) {
  311. printk(KERN_CONT ".. no entries found ..");
  312. ret = -1;
  313. }
  314. return ret;
  315. }
  316. #endif /* CONFIG_SCHED_TRACER */
  317. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  318. int
  319. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  320. {
  321. unsigned long count;
  322. int ret;
  323. /* start the tracing */
  324. tr->ctrl = 1;
  325. trace->init(tr);
  326. /* Sleep for a 1/10 of a second */
  327. msleep(100);
  328. /* stop the tracing. */
  329. tr->ctrl = 0;
  330. trace->ctrl_update(tr);
  331. /* check the trace buffer */
  332. ret = trace_test_buffer(tr, &count);
  333. trace->reset(tr);
  334. if (!ret && !count) {
  335. printk(KERN_CONT ".. no entries found ..");
  336. ret = -1;
  337. }
  338. return ret;
  339. }
  340. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  341. #ifdef CONFIG_DYNAMIC_FTRACE
  342. #endif /* CONFIG_DYNAMIC_FTRACE */