trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_SPECIAL:
  12. return 1;
  13. }
  14. return 0;
  15. }
  16. static int
  17. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  18. {
  19. struct trace_entry *entries;
  20. struct page *page;
  21. int idx = 0;
  22. int i;
  23. BUG_ON(list_empty(&data->trace_pages));
  24. page = list_entry(data->trace_pages.next, struct page, lru);
  25. entries = page_address(page);
  26. if (head_page(data) != entries)
  27. goto failed;
  28. /*
  29. * The starting trace buffer always has valid elements,
  30. * if any element exists.
  31. */
  32. entries = head_page(data);
  33. for (i = 0; i < tr->entries; i++) {
  34. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  35. printk(KERN_CONT ".. invalid entry %d ",
  36. entries[idx].type);
  37. goto failed;
  38. }
  39. idx++;
  40. if (idx >= ENTRIES_PER_PAGE) {
  41. page = virt_to_page(entries);
  42. if (page->lru.next == &data->trace_pages) {
  43. if (i != tr->entries - 1) {
  44. printk(KERN_CONT ".. entries buffer mismatch");
  45. goto failed;
  46. }
  47. } else {
  48. page = list_entry(page->lru.next, struct page, lru);
  49. entries = page_address(page);
  50. }
  51. idx = 0;
  52. }
  53. }
  54. page = virt_to_page(entries);
  55. if (page->lru.next != &data->trace_pages) {
  56. printk(KERN_CONT ".. too many entries");
  57. goto failed;
  58. }
  59. return 0;
  60. failed:
  61. /* disable tracing */
  62. tracing_disabled = 1;
  63. printk(KERN_CONT ".. corrupted trace buffer .. ");
  64. return -1;
  65. }
  66. /*
  67. * Test the trace buffer to see if all the elements
  68. * are still sane.
  69. */
  70. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  71. {
  72. unsigned long flags, cnt = 0;
  73. int cpu, ret = 0;
  74. /* Don't allow flipping of max traces now */
  75. raw_local_irq_save(flags);
  76. __raw_spin_lock(&ftrace_max_lock);
  77. for_each_possible_cpu(cpu) {
  78. if (!head_page(tr->data[cpu]))
  79. continue;
  80. cnt += tr->data[cpu]->trace_idx;
  81. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  82. if (ret)
  83. break;
  84. }
  85. __raw_spin_unlock(&ftrace_max_lock);
  86. raw_local_irq_restore(flags);
  87. if (count)
  88. *count = cnt;
  89. return ret;
  90. }
  91. #ifdef CONFIG_FTRACE
  92. #ifdef CONFIG_DYNAMIC_FTRACE
  93. #define __STR(x) #x
  94. #define STR(x) __STR(x)
  95. /* Test dynamic code modification and ftrace filters */
  96. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  97. struct trace_array *tr,
  98. int (*func)(void))
  99. {
  100. unsigned long count;
  101. int ret;
  102. int save_ftrace_enabled = ftrace_enabled;
  103. int save_tracer_enabled = tracer_enabled;
  104. /* The ftrace test PASSED */
  105. printk(KERN_CONT "PASSED\n");
  106. pr_info("Testing dynamic ftrace: ");
  107. /* enable tracing, and record the filter function */
  108. ftrace_enabled = 1;
  109. tracer_enabled = 1;
  110. /* passed in by parameter to fool gcc from optimizing */
  111. func();
  112. /* update the records */
  113. ret = ftrace_force_update();
  114. if (ret) {
  115. printk(KERN_CONT ".. ftraced failed .. ");
  116. return ret;
  117. }
  118. /* filter only on our function */
  119. ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
  120. sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
  121. /* enable tracing */
  122. tr->ctrl = 1;
  123. trace->init(tr);
  124. /* Sleep for a 1/10 of a second */
  125. msleep(100);
  126. /* we should have nothing in the buffer */
  127. ret = trace_test_buffer(tr, &count);
  128. if (ret)
  129. goto out;
  130. if (count) {
  131. ret = -1;
  132. printk(KERN_CONT ".. filter did not filter .. ");
  133. goto out;
  134. }
  135. /* call our function again */
  136. func();
  137. /* sleep again */
  138. msleep(100);
  139. /* stop the tracing. */
  140. tr->ctrl = 0;
  141. trace->ctrl_update(tr);
  142. ftrace_enabled = 0;
  143. /* check the trace buffer */
  144. ret = trace_test_buffer(tr, &count);
  145. trace->reset(tr);
  146. /* we should only have one item */
  147. if (!ret && count != 1) {
  148. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  149. ret = -1;
  150. goto out;
  151. }
  152. out:
  153. ftrace_enabled = save_ftrace_enabled;
  154. tracer_enabled = save_tracer_enabled;
  155. /* Enable tracing on all functions again */
  156. ftrace_set_filter(NULL, 0, 1);
  157. return ret;
  158. }
  159. #else
  160. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  161. #endif /* CONFIG_DYNAMIC_FTRACE */
  162. /*
  163. * Simple verification test of ftrace function tracer.
  164. * Enable ftrace, sleep 1/10 second, and then read the trace
  165. * buffer to see if all is in order.
  166. */
  167. int
  168. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  169. {
  170. unsigned long count;
  171. int ret;
  172. int save_ftrace_enabled = ftrace_enabled;
  173. int save_tracer_enabled = tracer_enabled;
  174. /* make sure msleep has been recorded */
  175. msleep(1);
  176. /* force the recorded functions to be traced */
  177. ret = ftrace_force_update();
  178. if (ret) {
  179. printk(KERN_CONT ".. ftraced failed .. ");
  180. return ret;
  181. }
  182. /* start the tracing */
  183. ftrace_enabled = 1;
  184. tracer_enabled = 1;
  185. tr->ctrl = 1;
  186. trace->init(tr);
  187. /* Sleep for a 1/10 of a second */
  188. msleep(100);
  189. /* stop the tracing. */
  190. tr->ctrl = 0;
  191. trace->ctrl_update(tr);
  192. ftrace_enabled = 0;
  193. /* check the trace buffer */
  194. ret = trace_test_buffer(tr, &count);
  195. trace->reset(tr);
  196. if (!ret && !count) {
  197. printk(KERN_CONT ".. no entries found ..");
  198. ret = -1;
  199. goto out;
  200. }
  201. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  202. DYN_FTRACE_TEST_NAME);
  203. out:
  204. ftrace_enabled = save_ftrace_enabled;
  205. tracer_enabled = save_tracer_enabled;
  206. /* kill ftrace totally if we failed */
  207. if (ret)
  208. ftrace_kill();
  209. return ret;
  210. }
  211. #endif /* CONFIG_FTRACE */
  212. #ifdef CONFIG_IRQSOFF_TRACER
  213. int
  214. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  215. {
  216. unsigned long save_max = tracing_max_latency;
  217. unsigned long count;
  218. int ret;
  219. /* start the tracing */
  220. tr->ctrl = 1;
  221. trace->init(tr);
  222. /* reset the max latency */
  223. tracing_max_latency = 0;
  224. /* disable interrupts for a bit */
  225. local_irq_disable();
  226. udelay(100);
  227. local_irq_enable();
  228. /* stop the tracing. */
  229. tr->ctrl = 0;
  230. trace->ctrl_update(tr);
  231. /* check both trace buffers */
  232. ret = trace_test_buffer(tr, NULL);
  233. if (!ret)
  234. ret = trace_test_buffer(&max_tr, &count);
  235. trace->reset(tr);
  236. if (!ret && !count) {
  237. printk(KERN_CONT ".. no entries found ..");
  238. ret = -1;
  239. }
  240. tracing_max_latency = save_max;
  241. return ret;
  242. }
  243. #endif /* CONFIG_IRQSOFF_TRACER */
  244. #ifdef CONFIG_PREEMPT_TRACER
  245. int
  246. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  247. {
  248. unsigned long save_max = tracing_max_latency;
  249. unsigned long count;
  250. int ret;
  251. /* start the tracing */
  252. tr->ctrl = 1;
  253. trace->init(tr);
  254. /* reset the max latency */
  255. tracing_max_latency = 0;
  256. /* disable preemption for a bit */
  257. preempt_disable();
  258. udelay(100);
  259. preempt_enable();
  260. /* stop the tracing. */
  261. tr->ctrl = 0;
  262. trace->ctrl_update(tr);
  263. /* check both trace buffers */
  264. ret = trace_test_buffer(tr, NULL);
  265. if (!ret)
  266. ret = trace_test_buffer(&max_tr, &count);
  267. trace->reset(tr);
  268. if (!ret && !count) {
  269. printk(KERN_CONT ".. no entries found ..");
  270. ret = -1;
  271. }
  272. tracing_max_latency = save_max;
  273. return ret;
  274. }
  275. #endif /* CONFIG_PREEMPT_TRACER */
  276. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  277. int
  278. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  279. {
  280. unsigned long save_max = tracing_max_latency;
  281. unsigned long count;
  282. int ret;
  283. /* start the tracing */
  284. tr->ctrl = 1;
  285. trace->init(tr);
  286. /* reset the max latency */
  287. tracing_max_latency = 0;
  288. /* disable preemption and interrupts for a bit */
  289. preempt_disable();
  290. local_irq_disable();
  291. udelay(100);
  292. preempt_enable();
  293. /* reverse the order of preempt vs irqs */
  294. local_irq_enable();
  295. /* stop the tracing. */
  296. tr->ctrl = 0;
  297. trace->ctrl_update(tr);
  298. /* check both trace buffers */
  299. ret = trace_test_buffer(tr, NULL);
  300. if (ret)
  301. goto out;
  302. ret = trace_test_buffer(&max_tr, &count);
  303. if (ret)
  304. goto out;
  305. if (!ret && !count) {
  306. printk(KERN_CONT ".. no entries found ..");
  307. ret = -1;
  308. goto out;
  309. }
  310. /* do the test by disabling interrupts first this time */
  311. tracing_max_latency = 0;
  312. tr->ctrl = 1;
  313. trace->ctrl_update(tr);
  314. preempt_disable();
  315. local_irq_disable();
  316. udelay(100);
  317. preempt_enable();
  318. /* reverse the order of preempt vs irqs */
  319. local_irq_enable();
  320. /* stop the tracing. */
  321. tr->ctrl = 0;
  322. trace->ctrl_update(tr);
  323. /* check both trace buffers */
  324. ret = trace_test_buffer(tr, NULL);
  325. if (ret)
  326. goto out;
  327. ret = trace_test_buffer(&max_tr, &count);
  328. if (!ret && !count) {
  329. printk(KERN_CONT ".. no entries found ..");
  330. ret = -1;
  331. goto out;
  332. }
  333. out:
  334. trace->reset(tr);
  335. tracing_max_latency = save_max;
  336. return ret;
  337. }
  338. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  339. #ifdef CONFIG_SCHED_TRACER
  340. static int trace_wakeup_test_thread(void *data)
  341. {
  342. /* Make this a RT thread, doesn't need to be too high */
  343. struct sched_param param = { .sched_priority = 5 };
  344. struct completion *x = data;
  345. sched_setscheduler(current, SCHED_FIFO, &param);
  346. /* Make it know we have a new prio */
  347. complete(x);
  348. /* now go to sleep and let the test wake us up */
  349. set_current_state(TASK_INTERRUPTIBLE);
  350. schedule();
  351. /* we are awake, now wait to disappear */
  352. while (!kthread_should_stop()) {
  353. /*
  354. * This is an RT task, do short sleeps to let
  355. * others run.
  356. */
  357. msleep(100);
  358. }
  359. return 0;
  360. }
  361. int
  362. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  363. {
  364. unsigned long save_max = tracing_max_latency;
  365. struct task_struct *p;
  366. struct completion isrt;
  367. unsigned long count;
  368. int ret;
  369. init_completion(&isrt);
  370. /* create a high prio thread */
  371. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  372. if (IS_ERR(p)) {
  373. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  374. return -1;
  375. }
  376. /* make sure the thread is running at an RT prio */
  377. wait_for_completion(&isrt);
  378. /* start the tracing */
  379. tr->ctrl = 1;
  380. trace->init(tr);
  381. /* reset the max latency */
  382. tracing_max_latency = 0;
  383. /* sleep to let the RT thread sleep too */
  384. msleep(100);
  385. /*
  386. * Yes this is slightly racy. It is possible that for some
  387. * strange reason that the RT thread we created, did not
  388. * call schedule for 100ms after doing the completion,
  389. * and we do a wakeup on a task that already is awake.
  390. * But that is extremely unlikely, and the worst thing that
  391. * happens in such a case, is that we disable tracing.
  392. * Honestly, if this race does happen something is horrible
  393. * wrong with the system.
  394. */
  395. wake_up_process(p);
  396. /* stop the tracing. */
  397. tr->ctrl = 0;
  398. trace->ctrl_update(tr);
  399. /* check both trace buffers */
  400. ret = trace_test_buffer(tr, NULL);
  401. if (!ret)
  402. ret = trace_test_buffer(&max_tr, &count);
  403. trace->reset(tr);
  404. tracing_max_latency = save_max;
  405. /* kill the thread */
  406. kthread_stop(p);
  407. if (!ret && !count) {
  408. printk(KERN_CONT ".. no entries found ..");
  409. ret = -1;
  410. }
  411. return ret;
  412. }
  413. #endif /* CONFIG_SCHED_TRACER */
  414. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  415. int
  416. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  417. {
  418. unsigned long count;
  419. int ret;
  420. /* start the tracing */
  421. tr->ctrl = 1;
  422. trace->init(tr);
  423. /* Sleep for a 1/10 of a second */
  424. msleep(100);
  425. /* stop the tracing. */
  426. tr->ctrl = 0;
  427. trace->ctrl_update(tr);
  428. /* check the trace buffer */
  429. ret = trace_test_buffer(tr, &count);
  430. trace->reset(tr);
  431. if (!ret && !count) {
  432. printk(KERN_CONT ".. no entries found ..");
  433. ret = -1;
  434. }
  435. return ret;
  436. }
  437. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */