trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_SPECIAL:
  12. return 1;
  13. }
  14. return 0;
  15. }
  16. static int
  17. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  18. {
  19. struct trace_entry *entries;
  20. struct page *page;
  21. int idx = 0;
  22. int i;
  23. BUG_ON(list_empty(&data->trace_pages));
  24. page = list_entry(data->trace_pages.next, struct page, lru);
  25. entries = page_address(page);
  26. if (head_page(data) != entries)
  27. goto failed;
  28. /*
  29. * The starting trace buffer always has valid elements,
  30. * if any element exists.
  31. */
  32. entries = head_page(data);
  33. for (i = 0; i < tr->entries; i++) {
  34. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  35. printk(KERN_CONT ".. invalid entry %d ",
  36. entries[idx].type);
  37. goto failed;
  38. }
  39. idx++;
  40. if (idx >= ENTRIES_PER_PAGE) {
  41. page = virt_to_page(entries);
  42. if (page->lru.next == &data->trace_pages) {
  43. if (i != tr->entries - 1) {
  44. printk(KERN_CONT ".. entries buffer mismatch");
  45. goto failed;
  46. }
  47. } else {
  48. page = list_entry(page->lru.next, struct page, lru);
  49. entries = page_address(page);
  50. }
  51. idx = 0;
  52. }
  53. }
  54. page = virt_to_page(entries);
  55. if (page->lru.next != &data->trace_pages) {
  56. printk(KERN_CONT ".. too many entries");
  57. goto failed;
  58. }
  59. return 0;
  60. failed:
  61. /* disable tracing */
  62. tracing_disabled = 1;
  63. printk(KERN_CONT ".. corrupted trace buffer .. ");
  64. return -1;
  65. }
  66. /*
  67. * Test the trace buffer to see if all the elements
  68. * are still sane.
  69. */
  70. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  71. {
  72. unsigned long cnt = 0;
  73. int cpu;
  74. int ret = 0;
  75. for_each_possible_cpu(cpu) {
  76. if (!head_page(tr->data[cpu]))
  77. continue;
  78. cnt += tr->data[cpu]->trace_idx;
  79. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  80. if (ret)
  81. break;
  82. }
  83. if (count)
  84. *count = cnt;
  85. return ret;
  86. }
  87. #ifdef CONFIG_FTRACE
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. #define __STR(x) #x
  90. #define STR(x) __STR(x)
  91. /* Test dynamic code modification and ftrace filters */
  92. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  93. struct trace_array *tr,
  94. int (*func)(void))
  95. {
  96. unsigned long count;
  97. int ret;
  98. int save_ftrace_enabled = ftrace_enabled;
  99. int save_tracer_enabled = tracer_enabled;
  100. /* The ftrace test PASSED */
  101. printk(KERN_CONT "PASSED\n");
  102. pr_info("Testing dynamic ftrace: ");
  103. /* enable tracing, and record the filter function */
  104. ftrace_enabled = 1;
  105. tracer_enabled = 1;
  106. /* passed in by parameter to fool gcc from optimizing */
  107. func();
  108. /* update the records */
  109. ret = ftrace_force_update();
  110. if (ret) {
  111. printk(KERN_CONT ".. ftraced failed .. ");
  112. return ret;
  113. }
  114. /* filter only on our function */
  115. ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
  116. sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  117. /* enable tracing */
  118. tr->ctrl = 1;
  119. trace->init(tr);
  120. /* Sleep for a 1/10 of a second */
  121. msleep(100);
  122. /* we should have nothing in the buffer */
  123. ret = trace_test_buffer(tr, &count);
  124. if (ret)
  125. goto out;
  126. if (count) {
  127. ret = -1;
  128. printk(KERN_CONT ".. filter did not filter .. ");
  129. goto out;
  130. }
  131. /* call our function again */
  132. func();
  133. /* sleep again */
  134. msleep(100);
  135. /* stop the tracing. */
  136. tr->ctrl = 0;
  137. trace->ctrl_update(tr);
  138. ftrace_enabled = 0;
  139. /* check the trace buffer */
  140. ret = trace_test_buffer(tr, &count);
  141. trace->reset(tr);
  142. /* we should only have one item */
  143. if (!ret && count != 1) {
  144. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  145. ret = -1;
  146. goto out;
  147. }
  148. out:
  149. ftrace_enabled = save_ftrace_enabled;
  150. tracer_enabled = save_tracer_enabled;
  151. /* Enable tracing on all functions again */
  152. ftrace_set_filter(NULL, 0, 1);
  153. return ret;
  154. }
  155. #else
  156. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  157. #endif /* CONFIG_DYNAMIC_FTRACE */
  158. /*
  159. * Simple verification test of ftrace function tracer.
  160. * Enable ftrace, sleep 1/10 second, and then read the trace
  161. * buffer to see if all is in order.
  162. */
  163. int
  164. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  165. {
  166. unsigned long count;
  167. int ret;
  168. int save_ftrace_enabled = ftrace_enabled;
  169. int save_tracer_enabled = tracer_enabled;
  170. /* make sure msleep has been recorded */
  171. msleep(1);
  172. /* force the recorded functions to be traced */
  173. ret = ftrace_force_update();
  174. if (ret) {
  175. printk(KERN_CONT ".. ftraced failed .. ");
  176. return ret;
  177. }
  178. /* start the tracing */
  179. ftrace_enabled = 1;
  180. tracer_enabled = 1;
  181. tr->ctrl = 1;
  182. trace->init(tr);
  183. /* Sleep for a 1/10 of a second */
  184. msleep(100);
  185. /* stop the tracing. */
  186. tr->ctrl = 0;
  187. trace->ctrl_update(tr);
  188. ftrace_enabled = 0;
  189. /* check the trace buffer */
  190. ret = trace_test_buffer(tr, &count);
  191. trace->reset(tr);
  192. if (!ret && !count) {
  193. printk(KERN_CONT ".. no entries found ..");
  194. ret = -1;
  195. goto out;
  196. }
  197. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  198. DYN_FTRACE_TEST_NAME);
  199. out:
  200. ftrace_enabled = save_ftrace_enabled;
  201. tracer_enabled = save_tracer_enabled;
  202. /* kill ftrace totally if we failed */
  203. if (ret)
  204. ftrace_kill();
  205. return ret;
  206. }
  207. #endif /* CONFIG_FTRACE */
  208. #ifdef CONFIG_IRQSOFF_TRACER
  209. int
  210. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  211. {
  212. unsigned long save_max = tracing_max_latency;
  213. unsigned long count;
  214. int ret;
  215. /* start the tracing */
  216. tr->ctrl = 1;
  217. trace->init(tr);
  218. /* reset the max latency */
  219. tracing_max_latency = 0;
  220. /* disable interrupts for a bit */
  221. local_irq_disable();
  222. udelay(100);
  223. local_irq_enable();
  224. /* stop the tracing. */
  225. tr->ctrl = 0;
  226. trace->ctrl_update(tr);
  227. /* check both trace buffers */
  228. ret = trace_test_buffer(tr, NULL);
  229. if (!ret)
  230. ret = trace_test_buffer(&max_tr, &count);
  231. trace->reset(tr);
  232. if (!ret && !count) {
  233. printk(KERN_CONT ".. no entries found ..");
  234. ret = -1;
  235. }
  236. tracing_max_latency = save_max;
  237. return ret;
  238. }
  239. #endif /* CONFIG_IRQSOFF_TRACER */
  240. #ifdef CONFIG_PREEMPT_TRACER
  241. int
  242. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  243. {
  244. unsigned long save_max = tracing_max_latency;
  245. unsigned long count;
  246. int ret;
  247. /* start the tracing */
  248. tr->ctrl = 1;
  249. trace->init(tr);
  250. /* reset the max latency */
  251. tracing_max_latency = 0;
  252. /* disable preemption for a bit */
  253. preempt_disable();
  254. udelay(100);
  255. preempt_enable();
  256. /* stop the tracing. */
  257. tr->ctrl = 0;
  258. trace->ctrl_update(tr);
  259. /* check both trace buffers */
  260. ret = trace_test_buffer(tr, NULL);
  261. if (!ret)
  262. ret = trace_test_buffer(&max_tr, &count);
  263. trace->reset(tr);
  264. if (!ret && !count) {
  265. printk(KERN_CONT ".. no entries found ..");
  266. ret = -1;
  267. }
  268. tracing_max_latency = save_max;
  269. return ret;
  270. }
  271. #endif /* CONFIG_PREEMPT_TRACER */
  272. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  273. int
  274. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  275. {
  276. unsigned long save_max = tracing_max_latency;
  277. unsigned long count;
  278. int ret;
  279. /* start the tracing */
  280. tr->ctrl = 1;
  281. trace->init(tr);
  282. /* reset the max latency */
  283. tracing_max_latency = 0;
  284. /* disable preemption and interrupts for a bit */
  285. preempt_disable();
  286. local_irq_disable();
  287. udelay(100);
  288. preempt_enable();
  289. /* reverse the order of preempt vs irqs */
  290. local_irq_enable();
  291. /* stop the tracing. */
  292. tr->ctrl = 0;
  293. trace->ctrl_update(tr);
  294. /* check both trace buffers */
  295. ret = trace_test_buffer(tr, NULL);
  296. if (ret)
  297. goto out;
  298. ret = trace_test_buffer(&max_tr, &count);
  299. if (ret)
  300. goto out;
  301. if (!ret && !count) {
  302. printk(KERN_CONT ".. no entries found ..");
  303. ret = -1;
  304. goto out;
  305. }
  306. /* do the test by disabling interrupts first this time */
  307. tracing_max_latency = 0;
  308. tr->ctrl = 1;
  309. trace->ctrl_update(tr);
  310. preempt_disable();
  311. local_irq_disable();
  312. udelay(100);
  313. preempt_enable();
  314. /* reverse the order of preempt vs irqs */
  315. local_irq_enable();
  316. /* stop the tracing. */
  317. tr->ctrl = 0;
  318. trace->ctrl_update(tr);
  319. /* check both trace buffers */
  320. ret = trace_test_buffer(tr, NULL);
  321. if (ret)
  322. goto out;
  323. ret = trace_test_buffer(&max_tr, &count);
  324. if (!ret && !count) {
  325. printk(KERN_CONT ".. no entries found ..");
  326. ret = -1;
  327. goto out;
  328. }
  329. out:
  330. trace->reset(tr);
  331. tracing_max_latency = save_max;
  332. return ret;
  333. }
  334. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  335. #ifdef CONFIG_SCHED_TRACER
  336. static int trace_wakeup_test_thread(void *data)
  337. {
  338. struct completion *x = data;
  339. /* Make this a RT thread, doesn't need to be too high */
  340. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  341. /* Make it know we have a new prio */
  342. complete(x);
  343. /* now go to sleep and let the test wake us up */
  344. set_current_state(TASK_INTERRUPTIBLE);
  345. schedule();
  346. /* we are awake, now wait to disappear */
  347. while (!kthread_should_stop()) {
  348. /*
  349. * This is an RT task, do short sleeps to let
  350. * others run.
  351. */
  352. msleep(100);
  353. }
  354. return 0;
  355. }
  356. int
  357. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  358. {
  359. unsigned long save_max = tracing_max_latency;
  360. struct task_struct *p;
  361. struct completion isrt;
  362. unsigned long count;
  363. int ret;
  364. init_completion(&isrt);
  365. /* create a high prio thread */
  366. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  367. if (IS_ERR(p)) {
  368. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  369. return -1;
  370. }
  371. /* make sure the thread is running at an RT prio */
  372. wait_for_completion(&isrt);
  373. /* start the tracing */
  374. tr->ctrl = 1;
  375. trace->init(tr);
  376. /* reset the max latency */
  377. tracing_max_latency = 0;
  378. /* sleep to let the RT thread sleep too */
  379. msleep(100);
  380. /*
  381. * Yes this is slightly racy. It is possible that for some
  382. * strange reason that the RT thread we created, did not
  383. * call schedule for 100ms after doing the completion,
  384. * and we do a wakeup on a task that already is awake.
  385. * But that is extremely unlikely, and the worst thing that
  386. * happens in such a case, is that we disable tracing.
  387. * Honestly, if this race does happen something is horrible
  388. * wrong with the system.
  389. */
  390. wake_up_process(p);
  391. /* stop the tracing. */
  392. tr->ctrl = 0;
  393. trace->ctrl_update(tr);
  394. /* check both trace buffers */
  395. ret = trace_test_buffer(tr, NULL);
  396. if (!ret)
  397. ret = trace_test_buffer(&max_tr, &count);
  398. trace->reset(tr);
  399. tracing_max_latency = save_max;
  400. /* kill the thread */
  401. kthread_stop(p);
  402. if (!ret && !count) {
  403. printk(KERN_CONT ".. no entries found ..");
  404. ret = -1;
  405. }
  406. return ret;
  407. }
  408. #endif /* CONFIG_SCHED_TRACER */
  409. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  410. int
  411. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  412. {
  413. unsigned long count;
  414. int ret;
  415. /* start the tracing */
  416. tr->ctrl = 1;
  417. trace->init(tr);
  418. /* Sleep for a 1/10 of a second */
  419. msleep(100);
  420. /* stop the tracing. */
  421. tr->ctrl = 0;
  422. trace->ctrl_update(tr);
  423. /* check the trace buffer */
  424. ret = trace_test_buffer(tr, &count);
  425. trace->reset(tr);
  426. if (!ret && !count) {
  427. printk(KERN_CONT ".. no entries found ..");
  428. ret = -1;
  429. }
  430. return ret;
  431. }
  432. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */