trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. return 1;
  11. }
  12. return 0;
  13. }
  14. static int
  15. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  16. {
  17. struct trace_entry *entries;
  18. struct page *page;
  19. int idx = 0;
  20. int i;
  21. BUG_ON(list_empty(&data->trace_pages));
  22. page = list_entry(data->trace_pages.next, struct page, lru);
  23. entries = page_address(page);
  24. if (head_page(data) != entries)
  25. goto failed;
  26. /*
  27. * The starting trace buffer always has valid elements,
  28. * if any element exists.
  29. */
  30. entries = head_page(data);
  31. for (i = 0; i < tr->entries; i++) {
  32. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  33. printk(KERN_CONT ".. invalid entry %d ",
  34. entries[idx].type);
  35. goto failed;
  36. }
  37. idx++;
  38. if (idx >= ENTRIES_PER_PAGE) {
  39. page = virt_to_page(entries);
  40. if (page->lru.next == &data->trace_pages) {
  41. if (i != tr->entries - 1) {
  42. printk(KERN_CONT ".. entries buffer mismatch");
  43. goto failed;
  44. }
  45. } else {
  46. page = list_entry(page->lru.next, struct page, lru);
  47. entries = page_address(page);
  48. }
  49. idx = 0;
  50. }
  51. }
  52. page = virt_to_page(entries);
  53. if (page->lru.next != &data->trace_pages) {
  54. printk(KERN_CONT ".. too many entries");
  55. goto failed;
  56. }
  57. return 0;
  58. failed:
  59. /* disable tracing */
  60. tracing_disabled = 1;
  61. printk(KERN_CONT ".. corrupted trace buffer .. ");
  62. return -1;
  63. }
  64. /*
  65. * Test the trace buffer to see if all the elements
  66. * are still sane.
  67. */
  68. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  69. {
  70. unsigned long cnt = 0;
  71. int cpu;
  72. int ret = 0;
  73. for_each_possible_cpu(cpu) {
  74. if (!head_page(tr->data[cpu]))
  75. continue;
  76. cnt += tr->data[cpu]->trace_idx;
  77. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  78. if (ret)
  79. break;
  80. }
  81. if (count)
  82. *count = cnt;
  83. return ret;
  84. }
  85. #ifdef CONFIG_FTRACE
  86. #ifdef CONFIG_DYNAMIC_FTRACE
  87. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  88. #define __STR(x) #x
  89. #define STR(x) __STR(x)
  90. static int DYN_FTRACE_TEST_NAME(void)
  91. {
  92. /* used to call mcount */
  93. return 0;
  94. }
  95. /* Test dynamic code modification and ftrace filters */
  96. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  97. struct trace_array *tr,
  98. int (*func)(void))
  99. {
  100. unsigned long count;
  101. int ret;
  102. int save_ftrace_enabled = ftrace_enabled;
  103. int save_tracer_enabled = tracer_enabled;
  104. /* The ftrace test PASSED */
  105. printk(KERN_CONT "PASSED\n");
  106. pr_info("Testing dynamic ftrace: ");
  107. /* enable tracing, and record the filter function */
  108. ftrace_enabled = 1;
  109. tracer_enabled = 1;
  110. /* passed in by parameter to fool gcc from optimizing */
  111. func();
  112. /* update the records */
  113. ret = ftrace_force_update();
  114. if (ret) {
  115. printk(KERN_CONT ".. ftraced failed .. ");
  116. return ret;
  117. }
  118. /* filter only on our function */
  119. ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
  120. sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  121. /* enable tracing */
  122. tr->ctrl = 1;
  123. trace->init(tr);
  124. /* Sleep for a 1/10 of a second */
  125. msleep(100);
  126. /* we should have nothing in the buffer */
  127. ret = trace_test_buffer(tr, &count);
  128. if (ret)
  129. goto out;
  130. if (count) {
  131. ret = -1;
  132. printk(KERN_CONT ".. filter did not filter .. ");
  133. goto out;
  134. }
  135. /* call our function again */
  136. func();
  137. /* sleep again */
  138. msleep(100);
  139. /* stop the tracing. */
  140. tr->ctrl = 0;
  141. trace->ctrl_update(tr);
  142. ftrace_enabled = 0;
  143. /* check the trace buffer */
  144. ret = trace_test_buffer(tr, &count);
  145. trace->reset(tr);
  146. /* we should only have one item */
  147. if (!ret && count != 1) {
  148. printk(KERN_CONT ".. filter failed ..");
  149. ret = -1;
  150. goto out;
  151. }
  152. out:
  153. ftrace_enabled = save_ftrace_enabled;
  154. tracer_enabled = save_tracer_enabled;
  155. /* Enable tracing on all functions again */
  156. ftrace_set_filter(NULL, 0, 1);
  157. return ret;
  158. }
  159. #else
  160. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  161. #endif /* CONFIG_DYNAMIC_FTRACE */
  162. /*
  163. * Simple verification test of ftrace function tracer.
  164. * Enable ftrace, sleep 1/10 second, and then read the trace
  165. * buffer to see if all is in order.
  166. */
  167. int
  168. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  169. {
  170. unsigned long count;
  171. int ret;
  172. int save_ftrace_enabled = ftrace_enabled;
  173. int save_tracer_enabled = tracer_enabled;
  174. /* make sure msleep has been recorded */
  175. msleep(1);
  176. /* force the recorded functions to be traced */
  177. ret = ftrace_force_update();
  178. if (ret) {
  179. printk(KERN_CONT ".. ftraced failed .. ");
  180. return ret;
  181. }
  182. /* start the tracing */
  183. ftrace_enabled = 1;
  184. tracer_enabled = 1;
  185. tr->ctrl = 1;
  186. trace->init(tr);
  187. /* Sleep for a 1/10 of a second */
  188. msleep(100);
  189. /* stop the tracing. */
  190. tr->ctrl = 0;
  191. trace->ctrl_update(tr);
  192. ftrace_enabled = 0;
  193. /* check the trace buffer */
  194. ret = trace_test_buffer(tr, &count);
  195. trace->reset(tr);
  196. if (!ret && !count) {
  197. printk(KERN_CONT ".. no entries found ..");
  198. ret = -1;
  199. goto out;
  200. }
  201. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  202. DYN_FTRACE_TEST_NAME);
  203. out:
  204. ftrace_enabled = save_ftrace_enabled;
  205. tracer_enabled = save_tracer_enabled;
  206. /* kill ftrace totally if we failed */
  207. if (ret)
  208. ftrace_kill();
  209. return ret;
  210. }
  211. #endif /* CONFIG_FTRACE */
  212. #ifdef CONFIG_IRQSOFF_TRACER
  213. int
  214. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  215. {
  216. unsigned long save_max = tracing_max_latency;
  217. unsigned long count;
  218. int ret;
  219. /* start the tracing */
  220. tr->ctrl = 1;
  221. trace->init(tr);
  222. /* reset the max latency */
  223. tracing_max_latency = 0;
  224. /* disable interrupts for a bit */
  225. local_irq_disable();
  226. udelay(100);
  227. local_irq_enable();
  228. /* stop the tracing. */
  229. tr->ctrl = 0;
  230. trace->ctrl_update(tr);
  231. /* check both trace buffers */
  232. ret = trace_test_buffer(tr, NULL);
  233. if (!ret)
  234. ret = trace_test_buffer(&max_tr, &count);
  235. trace->reset(tr);
  236. if (!ret && !count) {
  237. printk(KERN_CONT ".. no entries found ..");
  238. ret = -1;
  239. }
  240. tracing_max_latency = save_max;
  241. return ret;
  242. }
  243. #endif /* CONFIG_IRQSOFF_TRACER */
  244. #ifdef CONFIG_PREEMPT_TRACER
  245. int
  246. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  247. {
  248. unsigned long save_max = tracing_max_latency;
  249. unsigned long count;
  250. int ret;
  251. /* start the tracing */
  252. tr->ctrl = 1;
  253. trace->init(tr);
  254. /* reset the max latency */
  255. tracing_max_latency = 0;
  256. /* disable preemption for a bit */
  257. preempt_disable();
  258. udelay(100);
  259. preempt_enable();
  260. /* stop the tracing. */
  261. tr->ctrl = 0;
  262. trace->ctrl_update(tr);
  263. /* check both trace buffers */
  264. ret = trace_test_buffer(tr, NULL);
  265. if (!ret)
  266. ret = trace_test_buffer(&max_tr, &count);
  267. trace->reset(tr);
  268. if (!ret && !count) {
  269. printk(KERN_CONT ".. no entries found ..");
  270. ret = -1;
  271. }
  272. tracing_max_latency = save_max;
  273. return ret;
  274. }
  275. #endif /* CONFIG_PREEMPT_TRACER */
  276. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  277. int
  278. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  279. {
  280. unsigned long save_max = tracing_max_latency;
  281. unsigned long count;
  282. int ret;
  283. /* start the tracing */
  284. tr->ctrl = 1;
  285. trace->init(tr);
  286. /* reset the max latency */
  287. tracing_max_latency = 0;
  288. /* disable preemption and interrupts for a bit */
  289. preempt_disable();
  290. local_irq_disable();
  291. udelay(100);
  292. preempt_enable();
  293. /* reverse the order of preempt vs irqs */
  294. local_irq_enable();
  295. /* stop the tracing. */
  296. tr->ctrl = 0;
  297. trace->ctrl_update(tr);
  298. /* check both trace buffers */
  299. ret = trace_test_buffer(tr, NULL);
  300. if (ret)
  301. goto out;
  302. ret = trace_test_buffer(&max_tr, &count);
  303. if (ret)
  304. goto out;
  305. if (!ret && !count) {
  306. printk(KERN_CONT ".. no entries found ..");
  307. ret = -1;
  308. goto out;
  309. }
  310. /* do the test by disabling interrupts first this time */
  311. tracing_max_latency = 0;
  312. tr->ctrl = 1;
  313. trace->ctrl_update(tr);
  314. preempt_disable();
  315. local_irq_disable();
  316. udelay(100);
  317. preempt_enable();
  318. /* reverse the order of preempt vs irqs */
  319. local_irq_enable();
  320. /* stop the tracing. */
  321. tr->ctrl = 0;
  322. trace->ctrl_update(tr);
  323. /* check both trace buffers */
  324. ret = trace_test_buffer(tr, NULL);
  325. if (ret)
  326. goto out;
  327. ret = trace_test_buffer(&max_tr, &count);
  328. if (!ret && !count) {
  329. printk(KERN_CONT ".. no entries found ..");
  330. ret = -1;
  331. goto out;
  332. }
  333. out:
  334. trace->reset(tr);
  335. tracing_max_latency = save_max;
  336. return ret;
  337. }
  338. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  339. #ifdef CONFIG_SCHED_TRACER
  340. static int trace_wakeup_test_thread(void *data)
  341. {
  342. struct completion *x = data;
  343. /* Make this a RT thread, doesn't need to be too high */
  344. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  345. /* Make it know we have a new prio */
  346. complete(x);
  347. /* now go to sleep and let the test wake us up */
  348. set_current_state(TASK_INTERRUPTIBLE);
  349. schedule();
  350. /* we are awake, now wait to disappear */
  351. while (!kthread_should_stop()) {
  352. /*
  353. * This is an RT task, do short sleeps to let
  354. * others run.
  355. */
  356. msleep(100);
  357. }
  358. return 0;
  359. }
  360. int
  361. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  362. {
  363. unsigned long save_max = tracing_max_latency;
  364. struct task_struct *p;
  365. struct completion isrt;
  366. unsigned long count;
  367. int ret;
  368. init_completion(&isrt);
  369. /* create a high prio thread */
  370. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  371. if (IS_ERR(p)) {
  372. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  373. return -1;
  374. }
  375. /* make sure the thread is running at an RT prio */
  376. wait_for_completion(&isrt);
  377. /* start the tracing */
  378. tr->ctrl = 1;
  379. trace->init(tr);
  380. /* reset the max latency */
  381. tracing_max_latency = 0;
  382. /* sleep to let the RT thread sleep too */
  383. msleep(100);
  384. /*
  385. * Yes this is slightly racy. It is possible that for some
  386. * strange reason that the RT thread we created, did not
  387. * call schedule for 100ms after doing the completion,
  388. * and we do a wakeup on a task that already is awake.
  389. * But that is extremely unlikely, and the worst thing that
  390. * happens in such a case, is that we disable tracing.
  391. * Honestly, if this race does happen something is horrible
  392. * wrong with the system.
  393. */
  394. wake_up_process(p);
  395. /* stop the tracing. */
  396. tr->ctrl = 0;
  397. trace->ctrl_update(tr);
  398. /* check both trace buffers */
  399. ret = trace_test_buffer(tr, NULL);
  400. if (!ret)
  401. ret = trace_test_buffer(&max_tr, &count);
  402. trace->reset(tr);
  403. tracing_max_latency = save_max;
  404. /* kill the thread */
  405. kthread_stop(p);
  406. if (!ret && !count) {
  407. printk(KERN_CONT ".. no entries found ..");
  408. ret = -1;
  409. }
  410. return ret;
  411. }
  412. #endif /* CONFIG_SCHED_TRACER */
  413. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  414. int
  415. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  416. {
  417. unsigned long count;
  418. int ret;
  419. /* start the tracing */
  420. tr->ctrl = 1;
  421. trace->init(tr);
  422. /* Sleep for a 1/10 of a second */
  423. msleep(100);
  424. /* stop the tracing. */
  425. tr->ctrl = 0;
  426. trace->ctrl_update(tr);
  427. /* check the trace buffer */
  428. ret = trace_test_buffer(tr, &count);
  429. trace->reset(tr);
  430. if (!ret && !count) {
  431. printk(KERN_CONT ".. no entries found ..");
  432. ret = -1;
  433. }
  434. return ret;
  435. }
  436. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */