trace_selftest.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_CONT:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. return 1;
  15. }
  16. return 0;
  17. }
  18. static int
  19. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  20. {
  21. struct trace_entry *entries;
  22. struct page *page;
  23. int idx = 0;
  24. int i;
  25. BUG_ON(list_empty(&data->trace_pages));
  26. page = list_entry(data->trace_pages.next, struct page, lru);
  27. entries = page_address(page);
  28. check_pages(data);
  29. if (head_page(data) != entries)
  30. goto failed;
  31. /*
  32. * The starting trace buffer always has valid elements,
  33. * if any element exists.
  34. */
  35. entries = head_page(data);
  36. for (i = 0; i < tr->entries; i++) {
  37. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  38. printk(KERN_CONT ".. invalid entry %d ",
  39. entries[idx].type);
  40. goto failed;
  41. }
  42. idx++;
  43. if (idx >= ENTRIES_PER_PAGE) {
  44. page = virt_to_page(entries);
  45. if (page->lru.next == &data->trace_pages) {
  46. if (i != tr->entries - 1) {
  47. printk(KERN_CONT ".. entries buffer mismatch");
  48. goto failed;
  49. }
  50. } else {
  51. page = list_entry(page->lru.next, struct page, lru);
  52. entries = page_address(page);
  53. }
  54. idx = 0;
  55. }
  56. }
  57. page = virt_to_page(entries);
  58. if (page->lru.next != &data->trace_pages) {
  59. printk(KERN_CONT ".. too many entries");
  60. goto failed;
  61. }
  62. return 0;
  63. failed:
  64. /* disable tracing */
  65. tracing_disabled = 1;
  66. printk(KERN_CONT ".. corrupted trace buffer .. ");
  67. return -1;
  68. }
  69. /*
  70. * Test the trace buffer to see if all the elements
  71. * are still sane.
  72. */
  73. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  74. {
  75. unsigned long flags, cnt = 0;
  76. int cpu, ret = 0;
  77. /* Don't allow flipping of max traces now */
  78. raw_local_irq_save(flags);
  79. __raw_spin_lock(&ftrace_max_lock);
  80. for_each_possible_cpu(cpu) {
  81. if (!head_page(tr->data[cpu]))
  82. continue;
  83. cnt += tr->data[cpu]->trace_idx;
  84. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  85. if (ret)
  86. break;
  87. }
  88. __raw_spin_unlock(&ftrace_max_lock);
  89. raw_local_irq_restore(flags);
  90. if (count)
  91. *count = cnt;
  92. return ret;
  93. }
  94. #ifdef CONFIG_FTRACE
  95. #ifdef CONFIG_DYNAMIC_FTRACE
  96. #define __STR(x) #x
  97. #define STR(x) __STR(x)
  98. /* Test dynamic code modification and ftrace filters */
  99. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  100. struct trace_array *tr,
  101. int (*func)(void))
  102. {
  103. int save_ftrace_enabled = ftrace_enabled;
  104. int save_tracer_enabled = tracer_enabled;
  105. unsigned long count;
  106. char *func_name;
  107. int ret;
  108. /* The ftrace test PASSED */
  109. printk(KERN_CONT "PASSED\n");
  110. pr_info("Testing dynamic ftrace: ");
  111. /* enable tracing, and record the filter function */
  112. ftrace_enabled = 1;
  113. tracer_enabled = 1;
  114. /* passed in by parameter to fool gcc from optimizing */
  115. func();
  116. /* update the records */
  117. ret = ftrace_force_update();
  118. if (ret) {
  119. printk(KERN_CONT ".. ftraced failed .. ");
  120. return ret;
  121. }
  122. /*
  123. * Some archs *cough*PowerPC*cough* add charachters to the
  124. * start of the function names. We simply put a '*' to
  125. * accomodate them.
  126. */
  127. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  128. /* filter only on our function */
  129. ftrace_set_filter(func_name, strlen(func_name), 1);
  130. /* enable tracing */
  131. tr->ctrl = 1;
  132. trace->init(tr);
  133. /* Sleep for a 1/10 of a second */
  134. msleep(100);
  135. /* we should have nothing in the buffer */
  136. ret = trace_test_buffer(tr, &count);
  137. if (ret)
  138. goto out;
  139. if (count) {
  140. ret = -1;
  141. printk(KERN_CONT ".. filter did not filter .. ");
  142. goto out;
  143. }
  144. /* call our function again */
  145. func();
  146. /* sleep again */
  147. msleep(100);
  148. /* stop the tracing. */
  149. tr->ctrl = 0;
  150. trace->ctrl_update(tr);
  151. ftrace_enabled = 0;
  152. /* check the trace buffer */
  153. ret = trace_test_buffer(tr, &count);
  154. trace->reset(tr);
  155. /* we should only have one item */
  156. if (!ret && count != 1) {
  157. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  158. ret = -1;
  159. goto out;
  160. }
  161. out:
  162. ftrace_enabled = save_ftrace_enabled;
  163. tracer_enabled = save_tracer_enabled;
  164. /* Enable tracing on all functions again */
  165. ftrace_set_filter(NULL, 0, 1);
  166. return ret;
  167. }
  168. #else
  169. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  170. #endif /* CONFIG_DYNAMIC_FTRACE */
  171. /*
  172. * Simple verification test of ftrace function tracer.
  173. * Enable ftrace, sleep 1/10 second, and then read the trace
  174. * buffer to see if all is in order.
  175. */
  176. int
  177. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  178. {
  179. int save_ftrace_enabled = ftrace_enabled;
  180. int save_tracer_enabled = tracer_enabled;
  181. unsigned long count;
  182. int ret;
  183. /* make sure msleep has been recorded */
  184. msleep(1);
  185. /* force the recorded functions to be traced */
  186. ret = ftrace_force_update();
  187. if (ret) {
  188. printk(KERN_CONT ".. ftraced failed .. ");
  189. return ret;
  190. }
  191. /* start the tracing */
  192. ftrace_enabled = 1;
  193. tracer_enabled = 1;
  194. tr->ctrl = 1;
  195. trace->init(tr);
  196. /* Sleep for a 1/10 of a second */
  197. msleep(100);
  198. /* stop the tracing. */
  199. tr->ctrl = 0;
  200. trace->ctrl_update(tr);
  201. ftrace_enabled = 0;
  202. /* check the trace buffer */
  203. ret = trace_test_buffer(tr, &count);
  204. trace->reset(tr);
  205. if (!ret && !count) {
  206. printk(KERN_CONT ".. no entries found ..");
  207. ret = -1;
  208. goto out;
  209. }
  210. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  211. DYN_FTRACE_TEST_NAME);
  212. out:
  213. ftrace_enabled = save_ftrace_enabled;
  214. tracer_enabled = save_tracer_enabled;
  215. /* kill ftrace totally if we failed */
  216. if (ret)
  217. ftrace_kill();
  218. return ret;
  219. }
  220. #endif /* CONFIG_FTRACE */
  221. #ifdef CONFIG_IRQSOFF_TRACER
  222. int
  223. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  224. {
  225. unsigned long save_max = tracing_max_latency;
  226. unsigned long count;
  227. int ret;
  228. /* start the tracing */
  229. tr->ctrl = 1;
  230. trace->init(tr);
  231. /* reset the max latency */
  232. tracing_max_latency = 0;
  233. /* disable interrupts for a bit */
  234. local_irq_disable();
  235. udelay(100);
  236. local_irq_enable();
  237. /* stop the tracing. */
  238. tr->ctrl = 0;
  239. trace->ctrl_update(tr);
  240. /* check both trace buffers */
  241. ret = trace_test_buffer(tr, NULL);
  242. if (!ret)
  243. ret = trace_test_buffer(&max_tr, &count);
  244. trace->reset(tr);
  245. if (!ret && !count) {
  246. printk(KERN_CONT ".. no entries found ..");
  247. ret = -1;
  248. }
  249. tracing_max_latency = save_max;
  250. return ret;
  251. }
  252. #endif /* CONFIG_IRQSOFF_TRACER */
  253. #ifdef CONFIG_PREEMPT_TRACER
  254. int
  255. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  256. {
  257. unsigned long save_max = tracing_max_latency;
  258. unsigned long count;
  259. int ret;
  260. /* start the tracing */
  261. tr->ctrl = 1;
  262. trace->init(tr);
  263. /* reset the max latency */
  264. tracing_max_latency = 0;
  265. /* disable preemption for a bit */
  266. preempt_disable();
  267. udelay(100);
  268. preempt_enable();
  269. /* stop the tracing. */
  270. tr->ctrl = 0;
  271. trace->ctrl_update(tr);
  272. /* check both trace buffers */
  273. ret = trace_test_buffer(tr, NULL);
  274. if (!ret)
  275. ret = trace_test_buffer(&max_tr, &count);
  276. trace->reset(tr);
  277. if (!ret && !count) {
  278. printk(KERN_CONT ".. no entries found ..");
  279. ret = -1;
  280. }
  281. tracing_max_latency = save_max;
  282. return ret;
  283. }
  284. #endif /* CONFIG_PREEMPT_TRACER */
  285. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  286. int
  287. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  288. {
  289. unsigned long save_max = tracing_max_latency;
  290. unsigned long count;
  291. int ret;
  292. /* start the tracing */
  293. tr->ctrl = 1;
  294. trace->init(tr);
  295. /* reset the max latency */
  296. tracing_max_latency = 0;
  297. /* disable preemption and interrupts for a bit */
  298. preempt_disable();
  299. local_irq_disable();
  300. udelay(100);
  301. preempt_enable();
  302. /* reverse the order of preempt vs irqs */
  303. local_irq_enable();
  304. /* stop the tracing. */
  305. tr->ctrl = 0;
  306. trace->ctrl_update(tr);
  307. /* check both trace buffers */
  308. ret = trace_test_buffer(tr, NULL);
  309. if (ret)
  310. goto out;
  311. ret = trace_test_buffer(&max_tr, &count);
  312. if (ret)
  313. goto out;
  314. if (!ret && !count) {
  315. printk(KERN_CONT ".. no entries found ..");
  316. ret = -1;
  317. goto out;
  318. }
  319. /* do the test by disabling interrupts first this time */
  320. tracing_max_latency = 0;
  321. tr->ctrl = 1;
  322. trace->ctrl_update(tr);
  323. preempt_disable();
  324. local_irq_disable();
  325. udelay(100);
  326. preempt_enable();
  327. /* reverse the order of preempt vs irqs */
  328. local_irq_enable();
  329. /* stop the tracing. */
  330. tr->ctrl = 0;
  331. trace->ctrl_update(tr);
  332. /* check both trace buffers */
  333. ret = trace_test_buffer(tr, NULL);
  334. if (ret)
  335. goto out;
  336. ret = trace_test_buffer(&max_tr, &count);
  337. if (!ret && !count) {
  338. printk(KERN_CONT ".. no entries found ..");
  339. ret = -1;
  340. goto out;
  341. }
  342. out:
  343. trace->reset(tr);
  344. tracing_max_latency = save_max;
  345. return ret;
  346. }
  347. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  348. #ifdef CONFIG_SCHED_TRACER
  349. static int trace_wakeup_test_thread(void *data)
  350. {
  351. /* Make this a RT thread, doesn't need to be too high */
  352. struct sched_param param = { .sched_priority = 5 };
  353. struct completion *x = data;
  354. sched_setscheduler(current, SCHED_FIFO, &param);
  355. /* Make it know we have a new prio */
  356. complete(x);
  357. /* now go to sleep and let the test wake us up */
  358. set_current_state(TASK_INTERRUPTIBLE);
  359. schedule();
  360. /* we are awake, now wait to disappear */
  361. while (!kthread_should_stop()) {
  362. /*
  363. * This is an RT task, do short sleeps to let
  364. * others run.
  365. */
  366. msleep(100);
  367. }
  368. return 0;
  369. }
  370. int
  371. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  372. {
  373. unsigned long save_max = tracing_max_latency;
  374. struct task_struct *p;
  375. struct completion isrt;
  376. unsigned long count;
  377. int ret;
  378. init_completion(&isrt);
  379. /* create a high prio thread */
  380. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  381. if (IS_ERR(p)) {
  382. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  383. return -1;
  384. }
  385. /* make sure the thread is running at an RT prio */
  386. wait_for_completion(&isrt);
  387. /* start the tracing */
  388. tr->ctrl = 1;
  389. trace->init(tr);
  390. /* reset the max latency */
  391. tracing_max_latency = 0;
  392. /* sleep to let the RT thread sleep too */
  393. msleep(100);
  394. /*
  395. * Yes this is slightly racy. It is possible that for some
  396. * strange reason that the RT thread we created, did not
  397. * call schedule for 100ms after doing the completion,
  398. * and we do a wakeup on a task that already is awake.
  399. * But that is extremely unlikely, and the worst thing that
  400. * happens in such a case, is that we disable tracing.
  401. * Honestly, if this race does happen something is horrible
  402. * wrong with the system.
  403. */
  404. wake_up_process(p);
  405. /* stop the tracing. */
  406. tr->ctrl = 0;
  407. trace->ctrl_update(tr);
  408. /* check both trace buffers */
  409. ret = trace_test_buffer(tr, NULL);
  410. if (!ret)
  411. ret = trace_test_buffer(&max_tr, &count);
  412. trace->reset(tr);
  413. tracing_max_latency = save_max;
  414. /* kill the thread */
  415. kthread_stop(p);
  416. if (!ret && !count) {
  417. printk(KERN_CONT ".. no entries found ..");
  418. ret = -1;
  419. }
  420. return ret;
  421. }
  422. #endif /* CONFIG_SCHED_TRACER */
  423. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  424. int
  425. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  426. {
  427. unsigned long count;
  428. int ret;
  429. /* start the tracing */
  430. tr->ctrl = 1;
  431. trace->init(tr);
  432. /* Sleep for a 1/10 of a second */
  433. msleep(100);
  434. /* stop the tracing. */
  435. tr->ctrl = 0;
  436. trace->ctrl_update(tr);
  437. /* check the trace buffer */
  438. ret = trace_test_buffer(tr, &count);
  439. trace->reset(tr);
  440. if (!ret && !count) {
  441. printk(KERN_CONT ".. no entries found ..");
  442. ret = -1;
  443. }
  444. return ret;
  445. }
  446. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  447. #ifdef CONFIG_SYSPROF_TRACER
  448. int
  449. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  450. {
  451. unsigned long count;
  452. int ret;
  453. /* start the tracing */
  454. tr->ctrl = 1;
  455. trace->init(tr);
  456. /* Sleep for a 1/10 of a second */
  457. msleep(100);
  458. /* stop the tracing. */
  459. tr->ctrl = 0;
  460. trace->ctrl_update(tr);
  461. /* check the trace buffer */
  462. ret = trace_test_buffer(tr, &count);
  463. trace->reset(tr);
  464. return ret;
  465. }
  466. #endif /* CONFIG_SYSPROF_TRACER */