trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_SPECIAL:
  12. return 1;
  13. }
  14. return 0;
  15. }
  16. static int
  17. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  18. {
  19. struct trace_entry *entries;
  20. struct page *page;
  21. int idx = 0;
  22. int i;
  23. BUG_ON(list_empty(&data->trace_pages));
  24. page = list_entry(data->trace_pages.next, struct page, lru);
  25. entries = page_address(page);
  26. if (head_page(data) != entries)
  27. goto failed;
  28. /*
  29. * The starting trace buffer always has valid elements,
  30. * if any element exists.
  31. */
  32. entries = head_page(data);
  33. for (i = 0; i < tr->entries; i++) {
  34. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  35. printk(KERN_CONT ".. invalid entry %d ",
  36. entries[idx].type);
  37. goto failed;
  38. }
  39. idx++;
  40. if (idx >= ENTRIES_PER_PAGE) {
  41. page = virt_to_page(entries);
  42. if (page->lru.next == &data->trace_pages) {
  43. if (i != tr->entries - 1) {
  44. printk(KERN_CONT ".. entries buffer mismatch");
  45. goto failed;
  46. }
  47. } else {
  48. page = list_entry(page->lru.next, struct page, lru);
  49. entries = page_address(page);
  50. }
  51. idx = 0;
  52. }
  53. }
  54. page = virt_to_page(entries);
  55. if (page->lru.next != &data->trace_pages) {
  56. printk(KERN_CONT ".. too many entries");
  57. goto failed;
  58. }
  59. return 0;
  60. failed:
  61. /* disable tracing */
  62. tracing_disabled = 1;
  63. printk(KERN_CONT ".. corrupted trace buffer .. ");
  64. return -1;
  65. }
  66. /*
  67. * Test the trace buffer to see if all the elements
  68. * are still sane.
  69. */
  70. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  71. {
  72. unsigned long cnt = 0;
  73. int cpu;
  74. int ret = 0;
  75. for_each_possible_cpu(cpu) {
  76. if (!head_page(tr->data[cpu]))
  77. continue;
  78. cnt += tr->data[cpu]->trace_idx;
  79. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  80. if (ret)
  81. break;
  82. }
  83. if (count)
  84. *count = cnt;
  85. return ret;
  86. }
  87. #ifdef CONFIG_FTRACE
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
  90. #define __STR(x) #x
  91. #define STR(x) __STR(x)
  92. static int DYN_FTRACE_TEST_NAME(void)
  93. {
  94. /* used to call mcount */
  95. return 0;
  96. }
  97. /* Test dynamic code modification and ftrace filters */
  98. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  99. struct trace_array *tr,
  100. int (*func)(void))
  101. {
  102. unsigned long count;
  103. int ret;
  104. int save_ftrace_enabled = ftrace_enabled;
  105. int save_tracer_enabled = tracer_enabled;
  106. /* The ftrace test PASSED */
  107. printk(KERN_CONT "PASSED\n");
  108. pr_info("Testing dynamic ftrace: ");
  109. /* enable tracing, and record the filter function */
  110. ftrace_enabled = 1;
  111. tracer_enabled = 1;
  112. /* passed in by parameter to fool gcc from optimizing */
  113. func();
  114. /* update the records */
  115. ret = ftrace_force_update();
  116. if (ret) {
  117. printk(KERN_CONT ".. ftraced failed .. ");
  118. return ret;
  119. }
  120. /* filter only on our function */
  121. ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
  122. sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  123. /* enable tracing */
  124. tr->ctrl = 1;
  125. trace->init(tr);
  126. /* Sleep for a 1/10 of a second */
  127. msleep(100);
  128. /* we should have nothing in the buffer */
  129. ret = trace_test_buffer(tr, &count);
  130. if (ret)
  131. goto out;
  132. if (count) {
  133. ret = -1;
  134. printk(KERN_CONT ".. filter did not filter .. ");
  135. goto out;
  136. }
  137. /* call our function again */
  138. func();
  139. /* sleep again */
  140. msleep(100);
  141. /* stop the tracing. */
  142. tr->ctrl = 0;
  143. trace->ctrl_update(tr);
  144. ftrace_enabled = 0;
  145. /* check the trace buffer */
  146. ret = trace_test_buffer(tr, &count);
  147. trace->reset(tr);
  148. /* we should only have one item */
  149. if (!ret && count != 1) {
  150. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  151. ret = -1;
  152. goto out;
  153. }
  154. out:
  155. ftrace_enabled = save_ftrace_enabled;
  156. tracer_enabled = save_tracer_enabled;
  157. /* Enable tracing on all functions again */
  158. ftrace_set_filter(NULL, 0, 1);
  159. return ret;
  160. }
  161. #else
  162. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  163. #endif /* CONFIG_DYNAMIC_FTRACE */
  164. /*
  165. * Simple verification test of ftrace function tracer.
  166. * Enable ftrace, sleep 1/10 second, and then read the trace
  167. * buffer to see if all is in order.
  168. */
  169. int
  170. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  171. {
  172. unsigned long count;
  173. int ret;
  174. int save_ftrace_enabled = ftrace_enabled;
  175. int save_tracer_enabled = tracer_enabled;
  176. /* make sure msleep has been recorded */
  177. msleep(1);
  178. /* force the recorded functions to be traced */
  179. ret = ftrace_force_update();
  180. if (ret) {
  181. printk(KERN_CONT ".. ftraced failed .. ");
  182. return ret;
  183. }
  184. /* start the tracing */
  185. ftrace_enabled = 1;
  186. tracer_enabled = 1;
  187. tr->ctrl = 1;
  188. trace->init(tr);
  189. /* Sleep for a 1/10 of a second */
  190. msleep(100);
  191. /* stop the tracing. */
  192. tr->ctrl = 0;
  193. trace->ctrl_update(tr);
  194. ftrace_enabled = 0;
  195. /* check the trace buffer */
  196. ret = trace_test_buffer(tr, &count);
  197. trace->reset(tr);
  198. if (!ret && !count) {
  199. printk(KERN_CONT ".. no entries found ..");
  200. ret = -1;
  201. goto out;
  202. }
  203. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  204. DYN_FTRACE_TEST_NAME);
  205. out:
  206. ftrace_enabled = save_ftrace_enabled;
  207. tracer_enabled = save_tracer_enabled;
  208. /* kill ftrace totally if we failed */
  209. if (ret)
  210. ftrace_kill();
  211. return ret;
  212. }
  213. #endif /* CONFIG_FTRACE */
  214. #ifdef CONFIG_IRQSOFF_TRACER
  215. int
  216. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  217. {
  218. unsigned long save_max = tracing_max_latency;
  219. unsigned long count;
  220. int ret;
  221. /* start the tracing */
  222. tr->ctrl = 1;
  223. trace->init(tr);
  224. /* reset the max latency */
  225. tracing_max_latency = 0;
  226. /* disable interrupts for a bit */
  227. local_irq_disable();
  228. udelay(100);
  229. local_irq_enable();
  230. /* stop the tracing. */
  231. tr->ctrl = 0;
  232. trace->ctrl_update(tr);
  233. /* check both trace buffers */
  234. ret = trace_test_buffer(tr, NULL);
  235. if (!ret)
  236. ret = trace_test_buffer(&max_tr, &count);
  237. trace->reset(tr);
  238. if (!ret && !count) {
  239. printk(KERN_CONT ".. no entries found ..");
  240. ret = -1;
  241. }
  242. tracing_max_latency = save_max;
  243. return ret;
  244. }
  245. #endif /* CONFIG_IRQSOFF_TRACER */
  246. #ifdef CONFIG_PREEMPT_TRACER
  247. int
  248. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  249. {
  250. unsigned long save_max = tracing_max_latency;
  251. unsigned long count;
  252. int ret;
  253. /* start the tracing */
  254. tr->ctrl = 1;
  255. trace->init(tr);
  256. /* reset the max latency */
  257. tracing_max_latency = 0;
  258. /* disable preemption for a bit */
  259. preempt_disable();
  260. udelay(100);
  261. preempt_enable();
  262. /* stop the tracing. */
  263. tr->ctrl = 0;
  264. trace->ctrl_update(tr);
  265. /* check both trace buffers */
  266. ret = trace_test_buffer(tr, NULL);
  267. if (!ret)
  268. ret = trace_test_buffer(&max_tr, &count);
  269. trace->reset(tr);
  270. if (!ret && !count) {
  271. printk(KERN_CONT ".. no entries found ..");
  272. ret = -1;
  273. }
  274. tracing_max_latency = save_max;
  275. return ret;
  276. }
  277. #endif /* CONFIG_PREEMPT_TRACER */
  278. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  279. int
  280. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  281. {
  282. unsigned long save_max = tracing_max_latency;
  283. unsigned long count;
  284. int ret;
  285. /* start the tracing */
  286. tr->ctrl = 1;
  287. trace->init(tr);
  288. /* reset the max latency */
  289. tracing_max_latency = 0;
  290. /* disable preemption and interrupts for a bit */
  291. preempt_disable();
  292. local_irq_disable();
  293. udelay(100);
  294. preempt_enable();
  295. /* reverse the order of preempt vs irqs */
  296. local_irq_enable();
  297. /* stop the tracing. */
  298. tr->ctrl = 0;
  299. trace->ctrl_update(tr);
  300. /* check both trace buffers */
  301. ret = trace_test_buffer(tr, NULL);
  302. if (ret)
  303. goto out;
  304. ret = trace_test_buffer(&max_tr, &count);
  305. if (ret)
  306. goto out;
  307. if (!ret && !count) {
  308. printk(KERN_CONT ".. no entries found ..");
  309. ret = -1;
  310. goto out;
  311. }
  312. /* do the test by disabling interrupts first this time */
  313. tracing_max_latency = 0;
  314. tr->ctrl = 1;
  315. trace->ctrl_update(tr);
  316. preempt_disable();
  317. local_irq_disable();
  318. udelay(100);
  319. preempt_enable();
  320. /* reverse the order of preempt vs irqs */
  321. local_irq_enable();
  322. /* stop the tracing. */
  323. tr->ctrl = 0;
  324. trace->ctrl_update(tr);
  325. /* check both trace buffers */
  326. ret = trace_test_buffer(tr, NULL);
  327. if (ret)
  328. goto out;
  329. ret = trace_test_buffer(&max_tr, &count);
  330. if (!ret && !count) {
  331. printk(KERN_CONT ".. no entries found ..");
  332. ret = -1;
  333. goto out;
  334. }
  335. out:
  336. trace->reset(tr);
  337. tracing_max_latency = save_max;
  338. return ret;
  339. }
  340. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  341. #ifdef CONFIG_SCHED_TRACER
  342. static int trace_wakeup_test_thread(void *data)
  343. {
  344. struct completion *x = data;
  345. /* Make this a RT thread, doesn't need to be too high */
  346. rt_mutex_setprio(current, MAX_RT_PRIO - 5);
  347. /* Make it know we have a new prio */
  348. complete(x);
  349. /* now go to sleep and let the test wake us up */
  350. set_current_state(TASK_INTERRUPTIBLE);
  351. schedule();
  352. /* we are awake, now wait to disappear */
  353. while (!kthread_should_stop()) {
  354. /*
  355. * This is an RT task, do short sleeps to let
  356. * others run.
  357. */
  358. msleep(100);
  359. }
  360. return 0;
  361. }
  362. int
  363. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  364. {
  365. unsigned long save_max = tracing_max_latency;
  366. struct task_struct *p;
  367. struct completion isrt;
  368. unsigned long count;
  369. int ret;
  370. init_completion(&isrt);
  371. /* create a high prio thread */
  372. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  373. if (IS_ERR(p)) {
  374. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  375. return -1;
  376. }
  377. /* make sure the thread is running at an RT prio */
  378. wait_for_completion(&isrt);
  379. /* start the tracing */
  380. tr->ctrl = 1;
  381. trace->init(tr);
  382. /* reset the max latency */
  383. tracing_max_latency = 0;
  384. /* sleep to let the RT thread sleep too */
  385. msleep(100);
  386. /*
  387. * Yes this is slightly racy. It is possible that for some
  388. * strange reason that the RT thread we created, did not
  389. * call schedule for 100ms after doing the completion,
  390. * and we do a wakeup on a task that already is awake.
  391. * But that is extremely unlikely, and the worst thing that
  392. * happens in such a case, is that we disable tracing.
  393. * Honestly, if this race does happen something is horrible
  394. * wrong with the system.
  395. */
  396. wake_up_process(p);
  397. /* stop the tracing. */
  398. tr->ctrl = 0;
  399. trace->ctrl_update(tr);
  400. /* check both trace buffers */
  401. ret = trace_test_buffer(tr, NULL);
  402. if (!ret)
  403. ret = trace_test_buffer(&max_tr, &count);
  404. trace->reset(tr);
  405. tracing_max_latency = save_max;
  406. /* kill the thread */
  407. kthread_stop(p);
  408. if (!ret && !count) {
  409. printk(KERN_CONT ".. no entries found ..");
  410. ret = -1;
  411. }
  412. return ret;
  413. }
  414. #endif /* CONFIG_SCHED_TRACER */
  415. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  416. int
  417. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  418. {
  419. unsigned long count;
  420. int ret;
  421. /* start the tracing */
  422. tr->ctrl = 1;
  423. trace->init(tr);
  424. /* Sleep for a 1/10 of a second */
  425. msleep(100);
  426. /* stop the tracing. */
  427. tr->ctrl = 0;
  428. trace->ctrl_update(tr);
  429. /* check the trace buffer */
  430. ret = trace_test_buffer(tr, &count);
  431. trace->reset(tr);
  432. if (!ret && !count) {
  433. printk(KERN_CONT ".. no entries found ..");
  434. ret = -1;
  435. }
  436. return ret;
  437. }
  438. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */