trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. return 1;
  10. }
  11. return 0;
  12. }
  13. static int
  14. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  15. {
  16. struct trace_entry *entries;
  17. struct page *page;
  18. int idx = 0;
  19. int i;
  20. BUG_ON(list_empty(&data->trace_pages));
  21. page = list_entry(data->trace_pages.next, struct page, lru);
  22. entries = page_address(page);
  23. if (head_page(data) != entries)
  24. goto failed;
  25. /*
  26. * The starting trace buffer always has valid elements,
  27. * if any element exists.
  28. */
  29. entries = head_page(data);
  30. for (i = 0; i < tr->entries; i++) {
  31. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  32. printk(KERN_CONT ".. invalid entry %d ",
  33. entries[idx].type);
  34. goto failed;
  35. }
  36. idx++;
  37. if (idx >= ENTRIES_PER_PAGE) {
  38. page = virt_to_page(entries);
  39. if (page->lru.next == &data->trace_pages) {
  40. if (i != tr->entries - 1) {
  41. printk(KERN_CONT ".. entries buffer mismatch");
  42. goto failed;
  43. }
  44. } else {
  45. page = list_entry(page->lru.next, struct page, lru);
  46. entries = page_address(page);
  47. }
  48. idx = 0;
  49. }
  50. }
  51. page = virt_to_page(entries);
  52. if (page->lru.next != &data->trace_pages) {
  53. printk(KERN_CONT ".. too many entries");
  54. goto failed;
  55. }
  56. return 0;
  57. failed:
  58. /* disable tracing */
  59. tracing_disabled = 1;
  60. printk(KERN_CONT ".. corrupted trace buffer .. ");
  61. return -1;
  62. }
  63. /*
  64. * Test the trace buffer to see if all the elements
  65. * are still sane.
  66. */
  67. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  68. {
  69. unsigned long cnt = 0;
  70. int cpu;
  71. int ret = 0;
  72. for_each_possible_cpu(cpu) {
  73. if (!head_page(tr->data[cpu]))
  74. continue;
  75. cnt += tr->data[cpu]->trace_idx;
  76. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  77. if (ret)
  78. break;
  79. }
  80. if (count)
  81. *count = cnt;
  82. return ret;
  83. }
  84. #ifdef CONFIG_FTRACE
  85. #ifdef CONFIG_DYNAMIC_FTRACE
  86. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  87. #define __STR(x) #x
  88. #define STR(x) __STR(x)
  89. static int DYN_FTRACE_TEST_NAME(void)
  90. {
  91. /* used to call mcount */
  92. return 0;
  93. }
  94. /* Test dynamic code modification and ftrace filters */
  95. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  96. struct trace_array *tr,
  97. int (*func)(void))
  98. {
  99. unsigned long count;
  100. int ret;
  101. int save_ftrace_enabled = ftrace_enabled;
  102. int save_tracer_enabled = tracer_enabled;
  103. /* The ftrace test PASSED */
  104. printk(KERN_CONT "PASSED\n");
  105. pr_info("Testing dynamic ftrace: ");
  106. /* enable tracing, and record the filter function */
  107. ftrace_enabled = 1;
  108. tracer_enabled = 1;
  109. /* passed in by parameter to fool gcc from optimizing */
  110. func();
  111. /* update the records */
  112. ret = ftrace_force_update();
  113. if (ret) {
  114. printk(KERN_CONT ".. ftraced failed .. ");
  115. return ret;
  116. }
  117. /* filter only on our function */
  118. ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
  119. sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  120. /* enable tracing */
  121. tr->ctrl = 1;
  122. trace->init(tr);
  123. /* Sleep for a 1/10 of a second */
  124. msleep(100);
  125. /* we should have nothing in the buffer */
  126. ret = trace_test_buffer(tr, &count);
  127. if (ret)
  128. goto out;
  129. if (count) {
  130. ret = -1;
  131. printk(KERN_CONT ".. filter did not filter .. ");
  132. goto out;
  133. }
  134. /* call our function again */
  135. func();
  136. /* sleep again */
  137. msleep(100);
  138. /* stop the tracing. */
  139. tr->ctrl = 0;
  140. trace->ctrl_update(tr);
  141. ftrace_enabled = 0;
  142. /* check the trace buffer */
  143. ret = trace_test_buffer(tr, &count);
  144. trace->reset(tr);
  145. /* we should only have one item */
  146. if (!ret && count != 1) {
  147. printk(KERN_CONT ".. filter failed ..");
  148. ret = -1;
  149. goto out;
  150. }
  151. out:
  152. ftrace_enabled = save_ftrace_enabled;
  153. tracer_enabled = save_tracer_enabled;
  154. /* Enable tracing on all functions again */
  155. ftrace_set_filter(NULL, 0, 1);
  156. return ret;
  157. }
  158. #else
  159. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  160. #endif /* CONFIG_DYNAMIC_FTRACE */
  161. /*
  162. * Simple verification test of ftrace function tracer.
  163. * Enable ftrace, sleep 1/10 second, and then read the trace
  164. * buffer to see if all is in order.
  165. */
  166. int
  167. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  168. {
  169. unsigned long count;
  170. int ret;
  171. int save_ftrace_enabled = ftrace_enabled;
  172. int save_tracer_enabled = tracer_enabled;
  173. /* make sure msleep has been recorded */
  174. msleep(1);
  175. /* force the recorded functions to be traced */
  176. ret = ftrace_force_update();
  177. if (ret) {
  178. printk(KERN_CONT ".. ftraced failed .. ");
  179. return ret;
  180. }
  181. /* start the tracing */
  182. ftrace_enabled = 1;
  183. tracer_enabled = 1;
  184. tr->ctrl = 1;
  185. trace->init(tr);
  186. /* Sleep for a 1/10 of a second */
  187. msleep(100);
  188. /* stop the tracing. */
  189. tr->ctrl = 0;
  190. trace->ctrl_update(tr);
  191. ftrace_enabled = 0;
  192. /* check the trace buffer */
  193. ret = trace_test_buffer(tr, &count);
  194. trace->reset(tr);
  195. if (!ret && !count) {
  196. printk(KERN_CONT ".. no entries found ..");
  197. ret = -1;
  198. goto out;
  199. }
  200. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  201. DYN_FTRACE_TEST_NAME);
  202. out:
  203. ftrace_enabled = save_ftrace_enabled;
  204. tracer_enabled = save_tracer_enabled;
  205. /* kill ftrace totally if we failed */
  206. if (ret)
  207. ftrace_kill();
  208. return ret;
  209. }
  210. #endif /* CONFIG_FTRACE */
  211. #ifdef CONFIG_IRQSOFF_TRACER
  212. int
  213. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  214. {
  215. unsigned long save_max = tracing_max_latency;
  216. unsigned long count;
  217. int ret;
  218. /* start the tracing */
  219. tr->ctrl = 1;
  220. trace->init(tr);
  221. /* reset the max latency */
  222. tracing_max_latency = 0;
  223. /* disable interrupts for a bit */
  224. local_irq_disable();
  225. udelay(100);
  226. local_irq_enable();
  227. /* stop the tracing. */
  228. tr->ctrl = 0;
  229. trace->ctrl_update(tr);
  230. /* check both trace buffers */
  231. ret = trace_test_buffer(tr, NULL);
  232. if (!ret)
  233. ret = trace_test_buffer(&max_tr, &count);
  234. trace->reset(tr);
  235. if (!ret && !count) {
  236. printk(KERN_CONT ".. no entries found ..");
  237. ret = -1;
  238. }
  239. tracing_max_latency = save_max;
  240. return ret;
  241. }
  242. #endif /* CONFIG_IRQSOFF_TRACER */
  243. #ifdef CONFIG_PREEMPT_TRACER
  244. int
  245. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  246. {
  247. unsigned long save_max = tracing_max_latency;
  248. unsigned long count;
  249. int ret;
  250. /* start the tracing */
  251. tr->ctrl = 1;
  252. trace->init(tr);
  253. /* reset the max latency */
  254. tracing_max_latency = 0;
  255. /* disable preemption for a bit */
  256. preempt_disable();
  257. udelay(100);
  258. preempt_enable();
  259. /* stop the tracing. */
  260. tr->ctrl = 0;
  261. trace->ctrl_update(tr);
  262. /* check both trace buffers */
  263. ret = trace_test_buffer(tr, NULL);
  264. if (!ret)
  265. ret = trace_test_buffer(&max_tr, &count);
  266. trace->reset(tr);
  267. if (!ret && !count) {
  268. printk(KERN_CONT ".. no entries found ..");
  269. ret = -1;
  270. }
  271. tracing_max_latency = save_max;
  272. return ret;
  273. }
  274. #endif /* CONFIG_PREEMPT_TRACER */
  275. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  276. int
  277. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  278. {
  279. unsigned long save_max = tracing_max_latency;
  280. unsigned long count;
  281. int ret;
  282. /* start the tracing */
  283. tr->ctrl = 1;
  284. trace->init(tr);
  285. /* reset the max latency */
  286. tracing_max_latency = 0;
  287. /* disable preemption and interrupts for a bit */
  288. preempt_disable();
  289. local_irq_disable();
  290. udelay(100);
  291. preempt_enable();
  292. /* reverse the order of preempt vs irqs */
  293. local_irq_enable();
  294. /* stop the tracing. */
  295. tr->ctrl = 0;
  296. trace->ctrl_update(tr);
  297. /* check both trace buffers */
  298. ret = trace_test_buffer(tr, NULL);
  299. if (ret)
  300. goto out;
  301. ret = trace_test_buffer(&max_tr, &count);
  302. if (ret)
  303. goto out;
  304. if (!ret && !count) {
  305. printk(KERN_CONT ".. no entries found ..");
  306. ret = -1;
  307. goto out;
  308. }
  309. /* do the test by disabling interrupts first this time */
  310. tracing_max_latency = 0;
  311. tr->ctrl = 1;
  312. trace->ctrl_update(tr);
  313. preempt_disable();
  314. local_irq_disable();
  315. udelay(100);
  316. preempt_enable();
  317. /* reverse the order of preempt vs irqs */
  318. local_irq_enable();
  319. /* stop the tracing. */
  320. tr->ctrl = 0;
  321. trace->ctrl_update(tr);
  322. /* check both trace buffers */
  323. ret = trace_test_buffer(tr, NULL);
  324. if (ret)
  325. goto out;
  326. ret = trace_test_buffer(&max_tr, &count);
  327. if (!ret && !count) {
  328. printk(KERN_CONT ".. no entries found ..");
  329. ret = -1;
  330. goto out;
  331. }
  332. out:
  333. trace->reset(tr);
  334. tracing_max_latency = save_max;
  335. return ret;
  336. }
  337. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  338. #ifdef CONFIG_SCHED_TRACER
  339. static int trace_wakeup_test_thread(void *data)
  340. {
  341. struct completion *x = data;
  342. /* Make this a RT thread, doesn't need to be too high */
  343. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  344. /* Make it know we have a new prio */
  345. complete(x);
  346. /* now go to sleep and let the test wake us up */
  347. set_current_state(TASK_INTERRUPTIBLE);
  348. schedule();
  349. /* we are awake, now wait to disappear */
  350. while (!kthread_should_stop()) {
  351. /*
  352. * This is an RT task, do short sleeps to let
  353. * others run.
  354. */
  355. msleep(100);
  356. }
  357. return 0;
  358. }
  359. int
  360. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  361. {
  362. unsigned long save_max = tracing_max_latency;
  363. struct task_struct *p;
  364. struct completion isrt;
  365. unsigned long count;
  366. int ret;
  367. init_completion(&isrt);
  368. /* create a high prio thread */
  369. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  370. if (IS_ERR(p)) {
  371. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  372. return -1;
  373. }
  374. /* make sure the thread is running at an RT prio */
  375. wait_for_completion(&isrt);
  376. /* start the tracing */
  377. tr->ctrl = 1;
  378. trace->init(tr);
  379. /* reset the max latency */
  380. tracing_max_latency = 0;
  381. /* sleep to let the RT thread sleep too */
  382. msleep(100);
  383. /*
  384. * Yes this is slightly racy. It is possible that for some
  385. * strange reason that the RT thread we created, did not
  386. * call schedule for 100ms after doing the completion,
  387. * and we do a wakeup on a task that already is awake.
  388. * But that is extremely unlikely, and the worst thing that
  389. * happens in such a case, is that we disable tracing.
  390. * Honestly, if this race does happen something is horrible
  391. * wrong with the system.
  392. */
  393. wake_up_process(p);
  394. /* stop the tracing. */
  395. tr->ctrl = 0;
  396. trace->ctrl_update(tr);
  397. /* check both trace buffers */
  398. ret = trace_test_buffer(tr, NULL);
  399. if (!ret)
  400. ret = trace_test_buffer(&max_tr, &count);
  401. trace->reset(tr);
  402. tracing_max_latency = save_max;
  403. /* kill the thread */
  404. kthread_stop(p);
  405. if (!ret && !count) {
  406. printk(KERN_CONT ".. no entries found ..");
  407. ret = -1;
  408. }
  409. return ret;
  410. }
  411. #endif /* CONFIG_SCHED_TRACER */
  412. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  413. int
  414. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  415. {
  416. unsigned long count;
  417. int ret;
  418. /* start the tracing */
  419. tr->ctrl = 1;
  420. trace->init(tr);
  421. /* Sleep for a 1/10 of a second */
  422. msleep(100);
  423. /* stop the tracing. */
  424. tr->ctrl = 0;
  425. trace->ctrl_update(tr);
  426. /* check the trace buffer */
  427. ret = trace_test_buffer(tr, &count);
  428. trace->reset(tr);
  429. if (!ret && !count) {
  430. printk(KERN_CONT ".. no entries found ..");
  431. ret = -1;
  432. }
  433. return ret;
  434. }
  435. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */