trace_selftest.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /* Include in trace.c */
  2. #include <linux/stringify.h>
  3. #include <linux/kthread.h>
  4. #include <linux/delay.h>
  5. static inline int trace_valid_entry(struct trace_entry *entry)
  6. {
  7. switch (entry->type) {
  8. case TRACE_FN:
  9. case TRACE_CTX:
  10. case TRACE_WAKE:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. case TRACE_GRAPH_ENT:
  16. case TRACE_GRAPH_RET:
  17. return 1;
  18. }
  19. return 0;
  20. }
  21. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  22. {
  23. struct ring_buffer_event *event;
  24. struct trace_entry *entry;
  25. unsigned int loops = 0;
  26. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  27. entry = ring_buffer_event_data(event);
  28. /*
  29. * The ring buffer is a size of trace_buf_size, if
  30. * we loop more than the size, there's something wrong
  31. * with the ring buffer.
  32. */
  33. if (loops++ > trace_buf_size) {
  34. printk(KERN_CONT ".. bad ring buffer ");
  35. goto failed;
  36. }
  37. if (!trace_valid_entry(entry)) {
  38. printk(KERN_CONT ".. invalid entry %d ",
  39. entry->type);
  40. goto failed;
  41. }
  42. }
  43. return 0;
  44. failed:
  45. /* disable tracing */
  46. tracing_disabled = 1;
  47. printk(KERN_CONT ".. corrupted trace buffer .. ");
  48. return -1;
  49. }
  50. /*
  51. * Test the trace buffer to see if all the elements
  52. * are still sane.
  53. */
  54. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  55. {
  56. unsigned long flags, cnt = 0;
  57. int cpu, ret = 0;
  58. /* Don't allow flipping of max traces now */
  59. local_irq_save(flags);
  60. __raw_spin_lock(&ftrace_max_lock);
  61. cnt = ring_buffer_entries(tr->buffer);
  62. /*
  63. * The trace_test_buffer_cpu runs a while loop to consume all data.
  64. * If the calling tracer is broken, and is constantly filling
  65. * the buffer, this will run forever, and hard lock the box.
  66. * We disable the ring buffer while we do this test to prevent
  67. * a hard lock up.
  68. */
  69. tracing_off();
  70. for_each_possible_cpu(cpu) {
  71. ret = trace_test_buffer_cpu(tr, cpu);
  72. if (ret)
  73. break;
  74. }
  75. tracing_on();
  76. __raw_spin_unlock(&ftrace_max_lock);
  77. local_irq_restore(flags);
  78. if (count)
  79. *count = cnt;
  80. return ret;
  81. }
  82. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  83. {
  84. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  85. trace->name, init_ret);
  86. }
  87. #ifdef CONFIG_FUNCTION_TRACER
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. /* Test dynamic code modification and ftrace filters */
  90. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  91. struct trace_array *tr,
  92. int (*func)(void))
  93. {
  94. int save_ftrace_enabled = ftrace_enabled;
  95. int save_tracer_enabled = tracer_enabled;
  96. unsigned long count;
  97. char *func_name;
  98. int ret;
  99. /* The ftrace test PASSED */
  100. printk(KERN_CONT "PASSED\n");
  101. pr_info("Testing dynamic ftrace: ");
  102. /* enable tracing, and record the filter function */
  103. ftrace_enabled = 1;
  104. tracer_enabled = 1;
  105. /* passed in by parameter to fool gcc from optimizing */
  106. func();
  107. /*
  108. * Some archs *cough*PowerPC*cough* add characters to the
  109. * start of the function names. We simply put a '*' to
  110. * accommodate them.
  111. */
  112. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  113. /* filter only on our function */
  114. ftrace_set_filter(func_name, strlen(func_name), 1);
  115. /* enable tracing */
  116. ret = tracer_init(trace, tr);
  117. if (ret) {
  118. warn_failed_init_tracer(trace, ret);
  119. goto out;
  120. }
  121. /* Sleep for a 1/10 of a second */
  122. msleep(100);
  123. /* we should have nothing in the buffer */
  124. ret = trace_test_buffer(tr, &count);
  125. if (ret)
  126. goto out;
  127. if (count) {
  128. ret = -1;
  129. printk(KERN_CONT ".. filter did not filter .. ");
  130. goto out;
  131. }
  132. /* call our function again */
  133. func();
  134. /* sleep again */
  135. msleep(100);
  136. /* stop the tracing. */
  137. tracing_stop();
  138. ftrace_enabled = 0;
  139. /* check the trace buffer */
  140. ret = trace_test_buffer(tr, &count);
  141. trace->reset(tr);
  142. tracing_start();
  143. /* we should only have one item */
  144. if (!ret && count != 1) {
  145. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  146. ret = -1;
  147. goto out;
  148. }
  149. out:
  150. ftrace_enabled = save_ftrace_enabled;
  151. tracer_enabled = save_tracer_enabled;
  152. /* Enable tracing on all functions again */
  153. ftrace_set_filter(NULL, 0, 1);
  154. return ret;
  155. }
  156. #else
  157. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  158. #endif /* CONFIG_DYNAMIC_FTRACE */
  159. /*
  160. * Simple verification test of ftrace function tracer.
  161. * Enable ftrace, sleep 1/10 second, and then read the trace
  162. * buffer to see if all is in order.
  163. */
  164. int
  165. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  166. {
  167. int save_ftrace_enabled = ftrace_enabled;
  168. int save_tracer_enabled = tracer_enabled;
  169. unsigned long count;
  170. int ret;
  171. /* make sure msleep has been recorded */
  172. msleep(1);
  173. /* start the tracing */
  174. ftrace_enabled = 1;
  175. tracer_enabled = 1;
  176. ret = tracer_init(trace, tr);
  177. if (ret) {
  178. warn_failed_init_tracer(trace, ret);
  179. goto out;
  180. }
  181. /* Sleep for a 1/10 of a second */
  182. msleep(100);
  183. /* stop the tracing. */
  184. tracing_stop();
  185. ftrace_enabled = 0;
  186. /* check the trace buffer */
  187. ret = trace_test_buffer(tr, &count);
  188. trace->reset(tr);
  189. tracing_start();
  190. if (!ret && !count) {
  191. printk(KERN_CONT ".. no entries found ..");
  192. ret = -1;
  193. goto out;
  194. }
  195. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  196. DYN_FTRACE_TEST_NAME);
  197. out:
  198. ftrace_enabled = save_ftrace_enabled;
  199. tracer_enabled = save_tracer_enabled;
  200. /* kill ftrace totally if we failed */
  201. if (ret)
  202. ftrace_kill();
  203. return ret;
  204. }
  205. #endif /* CONFIG_FUNCTION_TRACER */
  206. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  207. /* Maximum number of functions to trace before diagnosing a hang */
  208. #define GRAPH_MAX_FUNC_TEST 100000000
  209. static void __ftrace_dump(bool disable_tracing);
  210. static unsigned int graph_hang_thresh;
  211. /* Wrap the real function entry probe to avoid possible hanging */
  212. static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
  213. {
  214. /* This is harmlessly racy, we want to approximately detect a hang */
  215. if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
  216. ftrace_graph_stop();
  217. printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
  218. if (ftrace_dump_on_oops)
  219. __ftrace_dump(false);
  220. return 0;
  221. }
  222. return trace_graph_entry(trace);
  223. }
  224. /*
  225. * Pretty much the same than for the function tracer from which the selftest
  226. * has been borrowed.
  227. */
  228. int
  229. trace_selftest_startup_function_graph(struct tracer *trace,
  230. struct trace_array *tr)
  231. {
  232. int ret;
  233. unsigned long count;
  234. /*
  235. * Simulate the init() callback but we attach a watchdog callback
  236. * to detect and recover from possible hangs
  237. */
  238. tracing_reset_online_cpus(tr);
  239. ret = register_ftrace_graph(&trace_graph_return,
  240. &trace_graph_entry_watchdog);
  241. if (ret) {
  242. warn_failed_init_tracer(trace, ret);
  243. goto out;
  244. }
  245. tracing_start_cmdline_record();
  246. /* Sleep for a 1/10 of a second */
  247. msleep(100);
  248. /* Have we just recovered from a hang? */
  249. if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
  250. tracing_selftest_disabled = true;
  251. ret = -1;
  252. goto out;
  253. }
  254. tracing_stop();
  255. /* check the trace buffer */
  256. ret = trace_test_buffer(tr, &count);
  257. trace->reset(tr);
  258. tracing_start();
  259. if (!ret && !count) {
  260. printk(KERN_CONT ".. no entries found ..");
  261. ret = -1;
  262. goto out;
  263. }
  264. /* Don't test dynamic tracing, the function tracer already did */
  265. out:
  266. /* Stop it if we failed */
  267. if (ret)
  268. ftrace_graph_stop();
  269. return ret;
  270. }
  271. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  272. #ifdef CONFIG_IRQSOFF_TRACER
  273. int
  274. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  275. {
  276. unsigned long save_max = tracing_max_latency;
  277. unsigned long count;
  278. int ret;
  279. /* start the tracing */
  280. ret = tracer_init(trace, tr);
  281. if (ret) {
  282. warn_failed_init_tracer(trace, ret);
  283. return ret;
  284. }
  285. /* reset the max latency */
  286. tracing_max_latency = 0;
  287. /* disable interrupts for a bit */
  288. local_irq_disable();
  289. udelay(100);
  290. local_irq_enable();
  291. /*
  292. * Stop the tracer to avoid a warning subsequent
  293. * to buffer flipping failure because tracing_stop()
  294. * disables the tr and max buffers, making flipping impossible
  295. * in case of parallels max irqs off latencies.
  296. */
  297. trace->stop(tr);
  298. /* stop the tracing. */
  299. tracing_stop();
  300. /* check both trace buffers */
  301. ret = trace_test_buffer(tr, NULL);
  302. if (!ret)
  303. ret = trace_test_buffer(&max_tr, &count);
  304. trace->reset(tr);
  305. tracing_start();
  306. if (!ret && !count) {
  307. printk(KERN_CONT ".. no entries found ..");
  308. ret = -1;
  309. }
  310. tracing_max_latency = save_max;
  311. return ret;
  312. }
  313. #endif /* CONFIG_IRQSOFF_TRACER */
  314. #ifdef CONFIG_PREEMPT_TRACER
  315. int
  316. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  317. {
  318. unsigned long save_max = tracing_max_latency;
  319. unsigned long count;
  320. int ret;
  321. /*
  322. * Now that the big kernel lock is no longer preemptable,
  323. * and this is called with the BKL held, it will always
  324. * fail. If preemption is already disabled, simply
  325. * pass the test. When the BKL is removed, or becomes
  326. * preemptible again, we will once again test this,
  327. * so keep it in.
  328. */
  329. if (preempt_count()) {
  330. printk(KERN_CONT "can not test ... force ");
  331. return 0;
  332. }
  333. /* start the tracing */
  334. ret = tracer_init(trace, tr);
  335. if (ret) {
  336. warn_failed_init_tracer(trace, ret);
  337. return ret;
  338. }
  339. /* reset the max latency */
  340. tracing_max_latency = 0;
  341. /* disable preemption for a bit */
  342. preempt_disable();
  343. udelay(100);
  344. preempt_enable();
  345. /*
  346. * Stop the tracer to avoid a warning subsequent
  347. * to buffer flipping failure because tracing_stop()
  348. * disables the tr and max buffers, making flipping impossible
  349. * in case of parallels max preempt off latencies.
  350. */
  351. trace->stop(tr);
  352. /* stop the tracing. */
  353. tracing_stop();
  354. /* check both trace buffers */
  355. ret = trace_test_buffer(tr, NULL);
  356. if (!ret)
  357. ret = trace_test_buffer(&max_tr, &count);
  358. trace->reset(tr);
  359. tracing_start();
  360. if (!ret && !count) {
  361. printk(KERN_CONT ".. no entries found ..");
  362. ret = -1;
  363. }
  364. tracing_max_latency = save_max;
  365. return ret;
  366. }
  367. #endif /* CONFIG_PREEMPT_TRACER */
  368. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  369. int
  370. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  371. {
  372. unsigned long save_max = tracing_max_latency;
  373. unsigned long count;
  374. int ret;
  375. /*
  376. * Now that the big kernel lock is no longer preemptable,
  377. * and this is called with the BKL held, it will always
  378. * fail. If preemption is already disabled, simply
  379. * pass the test. When the BKL is removed, or becomes
  380. * preemptible again, we will once again test this,
  381. * so keep it in.
  382. */
  383. if (preempt_count()) {
  384. printk(KERN_CONT "can not test ... force ");
  385. return 0;
  386. }
  387. /* start the tracing */
  388. ret = tracer_init(trace, tr);
  389. if (ret) {
  390. warn_failed_init_tracer(trace, ret);
  391. goto out_no_start;
  392. }
  393. /* reset the max latency */
  394. tracing_max_latency = 0;
  395. /* disable preemption and interrupts for a bit */
  396. preempt_disable();
  397. local_irq_disable();
  398. udelay(100);
  399. preempt_enable();
  400. /* reverse the order of preempt vs irqs */
  401. local_irq_enable();
  402. /*
  403. * Stop the tracer to avoid a warning subsequent
  404. * to buffer flipping failure because tracing_stop()
  405. * disables the tr and max buffers, making flipping impossible
  406. * in case of parallels max irqs/preempt off latencies.
  407. */
  408. trace->stop(tr);
  409. /* stop the tracing. */
  410. tracing_stop();
  411. /* check both trace buffers */
  412. ret = trace_test_buffer(tr, NULL);
  413. if (ret)
  414. goto out;
  415. ret = trace_test_buffer(&max_tr, &count);
  416. if (ret)
  417. goto out;
  418. if (!ret && !count) {
  419. printk(KERN_CONT ".. no entries found ..");
  420. ret = -1;
  421. goto out;
  422. }
  423. /* do the test by disabling interrupts first this time */
  424. tracing_max_latency = 0;
  425. tracing_start();
  426. trace->start(tr);
  427. preempt_disable();
  428. local_irq_disable();
  429. udelay(100);
  430. preempt_enable();
  431. /* reverse the order of preempt vs irqs */
  432. local_irq_enable();
  433. trace->stop(tr);
  434. /* stop the tracing. */
  435. tracing_stop();
  436. /* check both trace buffers */
  437. ret = trace_test_buffer(tr, NULL);
  438. if (ret)
  439. goto out;
  440. ret = trace_test_buffer(&max_tr, &count);
  441. if (!ret && !count) {
  442. printk(KERN_CONT ".. no entries found ..");
  443. ret = -1;
  444. goto out;
  445. }
  446. out:
  447. tracing_start();
  448. out_no_start:
  449. trace->reset(tr);
  450. tracing_max_latency = save_max;
  451. return ret;
  452. }
  453. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  454. #ifdef CONFIG_NOP_TRACER
  455. int
  456. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  457. {
  458. /* What could possibly go wrong? */
  459. return 0;
  460. }
  461. #endif
  462. #ifdef CONFIG_SCHED_TRACER
  463. static int trace_wakeup_test_thread(void *data)
  464. {
  465. /* Make this a RT thread, doesn't need to be too high */
  466. struct sched_param param = { .sched_priority = 5 };
  467. struct completion *x = data;
  468. sched_setscheduler(current, SCHED_FIFO, &param);
  469. /* Make it know we have a new prio */
  470. complete(x);
  471. /* now go to sleep and let the test wake us up */
  472. set_current_state(TASK_INTERRUPTIBLE);
  473. schedule();
  474. /* we are awake, now wait to disappear */
  475. while (!kthread_should_stop()) {
  476. /*
  477. * This is an RT task, do short sleeps to let
  478. * others run.
  479. */
  480. msleep(100);
  481. }
  482. return 0;
  483. }
  484. int
  485. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  486. {
  487. unsigned long save_max = tracing_max_latency;
  488. struct task_struct *p;
  489. struct completion isrt;
  490. unsigned long count;
  491. int ret;
  492. init_completion(&isrt);
  493. /* create a high prio thread */
  494. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  495. if (IS_ERR(p)) {
  496. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  497. return -1;
  498. }
  499. /* make sure the thread is running at an RT prio */
  500. wait_for_completion(&isrt);
  501. /* start the tracing */
  502. ret = tracer_init(trace, tr);
  503. if (ret) {
  504. warn_failed_init_tracer(trace, ret);
  505. return ret;
  506. }
  507. /* reset the max latency */
  508. tracing_max_latency = 0;
  509. /* sleep to let the RT thread sleep too */
  510. msleep(100);
  511. /*
  512. * Yes this is slightly racy. It is possible that for some
  513. * strange reason that the RT thread we created, did not
  514. * call schedule for 100ms after doing the completion,
  515. * and we do a wakeup on a task that already is awake.
  516. * But that is extremely unlikely, and the worst thing that
  517. * happens in such a case, is that we disable tracing.
  518. * Honestly, if this race does happen something is horrible
  519. * wrong with the system.
  520. */
  521. wake_up_process(p);
  522. /* give a little time to let the thread wake up */
  523. msleep(100);
  524. /* stop the tracing. */
  525. tracing_stop();
  526. /* check both trace buffers */
  527. ret = trace_test_buffer(tr, NULL);
  528. if (!ret)
  529. ret = trace_test_buffer(&max_tr, &count);
  530. trace->reset(tr);
  531. tracing_start();
  532. tracing_max_latency = save_max;
  533. /* kill the thread */
  534. kthread_stop(p);
  535. if (!ret && !count) {
  536. printk(KERN_CONT ".. no entries found ..");
  537. ret = -1;
  538. }
  539. return ret;
  540. }
  541. #endif /* CONFIG_SCHED_TRACER */
  542. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  543. int
  544. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  545. {
  546. unsigned long count;
  547. int ret;
  548. /* start the tracing */
  549. ret = tracer_init(trace, tr);
  550. if (ret) {
  551. warn_failed_init_tracer(trace, ret);
  552. return ret;
  553. }
  554. /* Sleep for a 1/10 of a second */
  555. msleep(100);
  556. /* stop the tracing. */
  557. tracing_stop();
  558. /* check the trace buffer */
  559. ret = trace_test_buffer(tr, &count);
  560. trace->reset(tr);
  561. tracing_start();
  562. if (!ret && !count) {
  563. printk(KERN_CONT ".. no entries found ..");
  564. ret = -1;
  565. }
  566. return ret;
  567. }
  568. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  569. #ifdef CONFIG_SYSPROF_TRACER
  570. int
  571. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  572. {
  573. unsigned long count;
  574. int ret;
  575. /* start the tracing */
  576. ret = tracer_init(trace, tr);
  577. if (ret) {
  578. warn_failed_init_tracer(trace, ret);
  579. return ret;
  580. }
  581. /* Sleep for a 1/10 of a second */
  582. msleep(100);
  583. /* stop the tracing. */
  584. tracing_stop();
  585. /* check the trace buffer */
  586. ret = trace_test_buffer(tr, &count);
  587. trace->reset(tr);
  588. tracing_start();
  589. if (!ret && !count) {
  590. printk(KERN_CONT ".. no entries found ..");
  591. ret = -1;
  592. }
  593. return ret;
  594. }
  595. #endif /* CONFIG_SYSPROF_TRACER */
  596. #ifdef CONFIG_BRANCH_TRACER
  597. int
  598. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  599. {
  600. unsigned long count;
  601. int ret;
  602. /* start the tracing */
  603. ret = tracer_init(trace, tr);
  604. if (ret) {
  605. warn_failed_init_tracer(trace, ret);
  606. return ret;
  607. }
  608. /* Sleep for a 1/10 of a second */
  609. msleep(100);
  610. /* stop the tracing. */
  611. tracing_stop();
  612. /* check the trace buffer */
  613. ret = trace_test_buffer(tr, &count);
  614. trace->reset(tr);
  615. tracing_start();
  616. if (!ret && !count) {
  617. printk(KERN_CONT ".. no entries found ..");
  618. ret = -1;
  619. }
  620. return ret;
  621. }
  622. #endif /* CONFIG_BRANCH_TRACER */