trace_selftest.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_PRINT:
  12. case TRACE_SPECIAL:
  13. case TRACE_BRANCH:
  14. case TRACE_GRAPH_ENT:
  15. case TRACE_GRAPH_RET:
  16. return 1;
  17. }
  18. return 0;
  19. }
  20. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  21. {
  22. struct ring_buffer_event *event;
  23. struct trace_entry *entry;
  24. unsigned int loops = 0;
  25. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  26. entry = ring_buffer_event_data(event);
  27. /*
  28. * The ring buffer is a size of trace_buf_size, if
  29. * we loop more than the size, there's something wrong
  30. * with the ring buffer.
  31. */
  32. if (loops++ > trace_buf_size) {
  33. printk(KERN_CONT ".. bad ring buffer ");
  34. goto failed;
  35. }
  36. if (!trace_valid_entry(entry)) {
  37. printk(KERN_CONT ".. invalid entry %d ",
  38. entry->type);
  39. goto failed;
  40. }
  41. }
  42. return 0;
  43. failed:
  44. /* disable tracing */
  45. tracing_disabled = 1;
  46. printk(KERN_CONT ".. corrupted trace buffer .. ");
  47. return -1;
  48. }
  49. /*
  50. * Test the trace buffer to see if all the elements
  51. * are still sane.
  52. */
  53. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  54. {
  55. unsigned long flags, cnt = 0;
  56. int cpu, ret = 0;
  57. /* Don't allow flipping of max traces now */
  58. local_irq_save(flags);
  59. __raw_spin_lock(&ftrace_max_lock);
  60. cnt = ring_buffer_entries(tr->buffer);
  61. /*
  62. * The trace_test_buffer_cpu runs a while loop to consume all data.
  63. * If the calling tracer is broken, and is constantly filling
  64. * the buffer, this will run forever, and hard lock the box.
  65. * We disable the ring buffer while we do this test to prevent
  66. * a hard lock up.
  67. */
  68. tracing_off();
  69. for_each_possible_cpu(cpu) {
  70. ret = trace_test_buffer_cpu(tr, cpu);
  71. if (ret)
  72. break;
  73. }
  74. tracing_on();
  75. __raw_spin_unlock(&ftrace_max_lock);
  76. local_irq_restore(flags);
  77. if (count)
  78. *count = cnt;
  79. return ret;
  80. }
  81. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  82. {
  83. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  84. trace->name, init_ret);
  85. }
  86. #ifdef CONFIG_FUNCTION_TRACER
  87. #ifdef CONFIG_DYNAMIC_FTRACE
  88. #define __STR(x) #x
  89. #define STR(x) __STR(x)
  90. /* Test dynamic code modification and ftrace filters */
  91. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  92. struct trace_array *tr,
  93. int (*func)(void))
  94. {
  95. int save_ftrace_enabled = ftrace_enabled;
  96. int save_tracer_enabled = tracer_enabled;
  97. unsigned long count;
  98. char *func_name;
  99. int ret;
  100. /* The ftrace test PASSED */
  101. printk(KERN_CONT "PASSED\n");
  102. pr_info("Testing dynamic ftrace: ");
  103. /* enable tracing, and record the filter function */
  104. ftrace_enabled = 1;
  105. tracer_enabled = 1;
  106. /* passed in by parameter to fool gcc from optimizing */
  107. func();
  108. /*
  109. * Some archs *cough*PowerPC*cough* add characters to the
  110. * start of the function names. We simply put a '*' to
  111. * accommodate them.
  112. */
  113. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  114. /* filter only on our function */
  115. ftrace_set_filter(func_name, strlen(func_name), 1);
  116. /* enable tracing */
  117. ret = tracer_init(trace, tr);
  118. if (ret) {
  119. warn_failed_init_tracer(trace, ret);
  120. goto out;
  121. }
  122. /* Sleep for a 1/10 of a second */
  123. msleep(100);
  124. /* we should have nothing in the buffer */
  125. ret = trace_test_buffer(tr, &count);
  126. if (ret)
  127. goto out;
  128. if (count) {
  129. ret = -1;
  130. printk(KERN_CONT ".. filter did not filter .. ");
  131. goto out;
  132. }
  133. /* call our function again */
  134. func();
  135. /* sleep again */
  136. msleep(100);
  137. /* stop the tracing. */
  138. tracing_stop();
  139. ftrace_enabled = 0;
  140. /* check the trace buffer */
  141. ret = trace_test_buffer(tr, &count);
  142. trace->reset(tr);
  143. tracing_start();
  144. /* we should only have one item */
  145. if (!ret && count != 1) {
  146. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  147. ret = -1;
  148. goto out;
  149. }
  150. out:
  151. ftrace_enabled = save_ftrace_enabled;
  152. tracer_enabled = save_tracer_enabled;
  153. /* Enable tracing on all functions again */
  154. ftrace_set_filter(NULL, 0, 1);
  155. return ret;
  156. }
  157. #else
  158. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  159. #endif /* CONFIG_DYNAMIC_FTRACE */
  160. /*
  161. * Simple verification test of ftrace function tracer.
  162. * Enable ftrace, sleep 1/10 second, and then read the trace
  163. * buffer to see if all is in order.
  164. */
  165. int
  166. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  167. {
  168. int save_ftrace_enabled = ftrace_enabled;
  169. int save_tracer_enabled = tracer_enabled;
  170. unsigned long count;
  171. int ret;
  172. /* make sure msleep has been recorded */
  173. msleep(1);
  174. /* start the tracing */
  175. ftrace_enabled = 1;
  176. tracer_enabled = 1;
  177. ret = tracer_init(trace, tr);
  178. if (ret) {
  179. warn_failed_init_tracer(trace, ret);
  180. goto out;
  181. }
  182. /* Sleep for a 1/10 of a second */
  183. msleep(100);
  184. /* stop the tracing. */
  185. tracing_stop();
  186. ftrace_enabled = 0;
  187. /* check the trace buffer */
  188. ret = trace_test_buffer(tr, &count);
  189. trace->reset(tr);
  190. tracing_start();
  191. if (!ret && !count) {
  192. printk(KERN_CONT ".. no entries found ..");
  193. ret = -1;
  194. goto out;
  195. }
  196. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  197. DYN_FTRACE_TEST_NAME);
  198. out:
  199. ftrace_enabled = save_ftrace_enabled;
  200. tracer_enabled = save_tracer_enabled;
  201. /* kill ftrace totally if we failed */
  202. if (ret)
  203. ftrace_kill();
  204. return ret;
  205. }
  206. #endif /* CONFIG_FUNCTION_TRACER */
  207. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  208. /*
  209. * Pretty much the same than for the function tracer from which the selftest
  210. * has been borrowed.
  211. */
  212. int
  213. trace_selftest_startup_function_graph(struct tracer *trace,
  214. struct trace_array *tr)
  215. {
  216. int ret;
  217. unsigned long count;
  218. ret = tracer_init(trace, tr);
  219. if (ret) {
  220. warn_failed_init_tracer(trace, ret);
  221. goto out;
  222. }
  223. /* Sleep for a 1/10 of a second */
  224. msleep(100);
  225. tracing_stop();
  226. /* check the trace buffer */
  227. ret = trace_test_buffer(tr, &count);
  228. trace->reset(tr);
  229. tracing_start();
  230. if (!ret && !count) {
  231. printk(KERN_CONT ".. no entries found ..");
  232. ret = -1;
  233. goto out;
  234. }
  235. /* Don't test dynamic tracing, the function tracer already did */
  236. out:
  237. /* Stop it if we failed */
  238. if (ret)
  239. ftrace_graph_stop();
  240. return ret;
  241. }
  242. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  243. #ifdef CONFIG_IRQSOFF_TRACER
  244. int
  245. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  246. {
  247. unsigned long save_max = tracing_max_latency;
  248. unsigned long count;
  249. int ret;
  250. /* start the tracing */
  251. ret = tracer_init(trace, tr);
  252. if (ret) {
  253. warn_failed_init_tracer(trace, ret);
  254. return ret;
  255. }
  256. /* reset the max latency */
  257. tracing_max_latency = 0;
  258. /* disable interrupts for a bit */
  259. local_irq_disable();
  260. udelay(100);
  261. local_irq_enable();
  262. /* stop the tracing. */
  263. tracing_stop();
  264. /* check both trace buffers */
  265. ret = trace_test_buffer(tr, NULL);
  266. if (!ret)
  267. ret = trace_test_buffer(&max_tr, &count);
  268. trace->reset(tr);
  269. tracing_start();
  270. if (!ret && !count) {
  271. printk(KERN_CONT ".. no entries found ..");
  272. ret = -1;
  273. }
  274. tracing_max_latency = save_max;
  275. return ret;
  276. }
  277. #endif /* CONFIG_IRQSOFF_TRACER */
  278. #ifdef CONFIG_PREEMPT_TRACER
  279. int
  280. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  281. {
  282. unsigned long save_max = tracing_max_latency;
  283. unsigned long count;
  284. int ret;
  285. /*
  286. * Now that the big kernel lock is no longer preemptable,
  287. * and this is called with the BKL held, it will always
  288. * fail. If preemption is already disabled, simply
  289. * pass the test. When the BKL is removed, or becomes
  290. * preemptible again, we will once again test this,
  291. * so keep it in.
  292. */
  293. if (preempt_count()) {
  294. printk(KERN_CONT "can not test ... force ");
  295. return 0;
  296. }
  297. /* start the tracing */
  298. ret = tracer_init(trace, tr);
  299. if (ret) {
  300. warn_failed_init_tracer(trace, ret);
  301. return ret;
  302. }
  303. /* reset the max latency */
  304. tracing_max_latency = 0;
  305. /* disable preemption for a bit */
  306. preempt_disable();
  307. udelay(100);
  308. preempt_enable();
  309. /* stop the tracing. */
  310. tracing_stop();
  311. /* check both trace buffers */
  312. ret = trace_test_buffer(tr, NULL);
  313. if (!ret)
  314. ret = trace_test_buffer(&max_tr, &count);
  315. trace->reset(tr);
  316. tracing_start();
  317. if (!ret && !count) {
  318. printk(KERN_CONT ".. no entries found ..");
  319. ret = -1;
  320. }
  321. tracing_max_latency = save_max;
  322. return ret;
  323. }
  324. #endif /* CONFIG_PREEMPT_TRACER */
  325. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  326. int
  327. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  328. {
  329. unsigned long save_max = tracing_max_latency;
  330. unsigned long count;
  331. int ret;
  332. /*
  333. * Now that the big kernel lock is no longer preemptable,
  334. * and this is called with the BKL held, it will always
  335. * fail. If preemption is already disabled, simply
  336. * pass the test. When the BKL is removed, or becomes
  337. * preemptible again, we will once again test this,
  338. * so keep it in.
  339. */
  340. if (preempt_count()) {
  341. printk(KERN_CONT "can not test ... force ");
  342. return 0;
  343. }
  344. /* start the tracing */
  345. ret = tracer_init(trace, tr);
  346. if (ret) {
  347. warn_failed_init_tracer(trace, ret);
  348. goto out;
  349. }
  350. /* reset the max latency */
  351. tracing_max_latency = 0;
  352. /* disable preemption and interrupts for a bit */
  353. preempt_disable();
  354. local_irq_disable();
  355. udelay(100);
  356. preempt_enable();
  357. /* reverse the order of preempt vs irqs */
  358. local_irq_enable();
  359. /* stop the tracing. */
  360. tracing_stop();
  361. /* check both trace buffers */
  362. ret = trace_test_buffer(tr, NULL);
  363. if (ret) {
  364. tracing_start();
  365. goto out;
  366. }
  367. ret = trace_test_buffer(&max_tr, &count);
  368. if (ret) {
  369. tracing_start();
  370. goto out;
  371. }
  372. if (!ret && !count) {
  373. printk(KERN_CONT ".. no entries found ..");
  374. ret = -1;
  375. tracing_start();
  376. goto out;
  377. }
  378. /* do the test by disabling interrupts first this time */
  379. tracing_max_latency = 0;
  380. tracing_start();
  381. preempt_disable();
  382. local_irq_disable();
  383. udelay(100);
  384. preempt_enable();
  385. /* reverse the order of preempt vs irqs */
  386. local_irq_enable();
  387. /* stop the tracing. */
  388. tracing_stop();
  389. /* check both trace buffers */
  390. ret = trace_test_buffer(tr, NULL);
  391. if (ret)
  392. goto out;
  393. ret = trace_test_buffer(&max_tr, &count);
  394. if (!ret && !count) {
  395. printk(KERN_CONT ".. no entries found ..");
  396. ret = -1;
  397. goto out;
  398. }
  399. out:
  400. trace->reset(tr);
  401. tracing_start();
  402. tracing_max_latency = save_max;
  403. return ret;
  404. }
  405. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  406. #ifdef CONFIG_NOP_TRACER
  407. int
  408. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  409. {
  410. /* What could possibly go wrong? */
  411. return 0;
  412. }
  413. #endif
  414. #ifdef CONFIG_SCHED_TRACER
  415. static int trace_wakeup_test_thread(void *data)
  416. {
  417. /* Make this a RT thread, doesn't need to be too high */
  418. struct sched_param param = { .sched_priority = 5 };
  419. struct completion *x = data;
  420. sched_setscheduler(current, SCHED_FIFO, &param);
  421. /* Make it know we have a new prio */
  422. complete(x);
  423. /* now go to sleep and let the test wake us up */
  424. set_current_state(TASK_INTERRUPTIBLE);
  425. schedule();
  426. /* we are awake, now wait to disappear */
  427. while (!kthread_should_stop()) {
  428. /*
  429. * This is an RT task, do short sleeps to let
  430. * others run.
  431. */
  432. msleep(100);
  433. }
  434. return 0;
  435. }
  436. int
  437. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  438. {
  439. unsigned long save_max = tracing_max_latency;
  440. struct task_struct *p;
  441. struct completion isrt;
  442. unsigned long count;
  443. int ret;
  444. init_completion(&isrt);
  445. /* create a high prio thread */
  446. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  447. if (IS_ERR(p)) {
  448. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  449. return -1;
  450. }
  451. /* make sure the thread is running at an RT prio */
  452. wait_for_completion(&isrt);
  453. /* start the tracing */
  454. ret = tracer_init(trace, tr);
  455. if (ret) {
  456. warn_failed_init_tracer(trace, ret);
  457. return ret;
  458. }
  459. /* reset the max latency */
  460. tracing_max_latency = 0;
  461. /* sleep to let the RT thread sleep too */
  462. msleep(100);
  463. /*
  464. * Yes this is slightly racy. It is possible that for some
  465. * strange reason that the RT thread we created, did not
  466. * call schedule for 100ms after doing the completion,
  467. * and we do a wakeup on a task that already is awake.
  468. * But that is extremely unlikely, and the worst thing that
  469. * happens in such a case, is that we disable tracing.
  470. * Honestly, if this race does happen something is horrible
  471. * wrong with the system.
  472. */
  473. wake_up_process(p);
  474. /* give a little time to let the thread wake up */
  475. msleep(100);
  476. /* stop the tracing. */
  477. tracing_stop();
  478. /* check both trace buffers */
  479. ret = trace_test_buffer(tr, NULL);
  480. if (!ret)
  481. ret = trace_test_buffer(&max_tr, &count);
  482. trace->reset(tr);
  483. tracing_start();
  484. tracing_max_latency = save_max;
  485. /* kill the thread */
  486. kthread_stop(p);
  487. if (!ret && !count) {
  488. printk(KERN_CONT ".. no entries found ..");
  489. ret = -1;
  490. }
  491. return ret;
  492. }
  493. #endif /* CONFIG_SCHED_TRACER */
  494. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  495. int
  496. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  497. {
  498. unsigned long count;
  499. int ret;
  500. /* start the tracing */
  501. ret = tracer_init(trace, tr);
  502. if (ret) {
  503. warn_failed_init_tracer(trace, ret);
  504. return ret;
  505. }
  506. /* Sleep for a 1/10 of a second */
  507. msleep(100);
  508. /* stop the tracing. */
  509. tracing_stop();
  510. /* check the trace buffer */
  511. ret = trace_test_buffer(tr, &count);
  512. trace->reset(tr);
  513. tracing_start();
  514. if (!ret && !count) {
  515. printk(KERN_CONT ".. no entries found ..");
  516. ret = -1;
  517. }
  518. return ret;
  519. }
  520. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  521. #ifdef CONFIG_SYSPROF_TRACER
  522. int
  523. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  524. {
  525. unsigned long count;
  526. int ret;
  527. /* start the tracing */
  528. ret = tracer_init(trace, tr);
  529. if (ret) {
  530. warn_failed_init_tracer(trace, ret);
  531. return ret;
  532. }
  533. /* Sleep for a 1/10 of a second */
  534. msleep(100);
  535. /* stop the tracing. */
  536. tracing_stop();
  537. /* check the trace buffer */
  538. ret = trace_test_buffer(tr, &count);
  539. trace->reset(tr);
  540. tracing_start();
  541. if (!ret && !count) {
  542. printk(KERN_CONT ".. no entries found ..");
  543. ret = -1;
  544. }
  545. return ret;
  546. }
  547. #endif /* CONFIG_SYSPROF_TRACER */
  548. #ifdef CONFIG_BRANCH_TRACER
  549. int
  550. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  551. {
  552. unsigned long count;
  553. int ret;
  554. /* start the tracing */
  555. ret = tracer_init(trace, tr);
  556. if (ret) {
  557. warn_failed_init_tracer(trace, ret);
  558. return ret;
  559. }
  560. /* Sleep for a 1/10 of a second */
  561. msleep(100);
  562. /* stop the tracing. */
  563. tracing_stop();
  564. /* check the trace buffer */
  565. ret = trace_test_buffer(tr, &count);
  566. trace->reset(tr);
  567. tracing_start();
  568. if (!ret && !count) {
  569. printk(KERN_CONT ".. no entries found ..");
  570. ret = -1;
  571. }
  572. return ret;
  573. }
  574. #endif /* CONFIG_BRANCH_TRACER */