trace_selftest.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /* Include in trace.c */
  2. #include <linux/stringify.h>
  3. #include <linux/kthread.h>
  4. #include <linux/delay.h>
  5. static inline int trace_valid_entry(struct trace_entry *entry)
  6. {
  7. switch (entry->type) {
  8. case TRACE_FN:
  9. case TRACE_CTX:
  10. case TRACE_WAKE:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. case TRACE_GRAPH_ENT:
  16. case TRACE_GRAPH_RET:
  17. return 1;
  18. }
  19. return 0;
  20. }
  21. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  22. {
  23. struct ring_buffer_event *event;
  24. struct trace_entry *entry;
  25. unsigned int loops = 0;
  26. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  27. entry = ring_buffer_event_data(event);
  28. /*
  29. * The ring buffer is a size of trace_buf_size, if
  30. * we loop more than the size, there's something wrong
  31. * with the ring buffer.
  32. */
  33. if (loops++ > trace_buf_size) {
  34. printk(KERN_CONT ".. bad ring buffer ");
  35. goto failed;
  36. }
  37. if (!trace_valid_entry(entry)) {
  38. printk(KERN_CONT ".. invalid entry %d ",
  39. entry->type);
  40. goto failed;
  41. }
  42. }
  43. return 0;
  44. failed:
  45. /* disable tracing */
  46. tracing_disabled = 1;
  47. printk(KERN_CONT ".. corrupted trace buffer .. ");
  48. return -1;
  49. }
  50. /*
  51. * Test the trace buffer to see if all the elements
  52. * are still sane.
  53. */
  54. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  55. {
  56. unsigned long flags, cnt = 0;
  57. int cpu, ret = 0;
  58. /* Don't allow flipping of max traces now */
  59. local_irq_save(flags);
  60. __raw_spin_lock(&ftrace_max_lock);
  61. cnt = ring_buffer_entries(tr->buffer);
  62. /*
  63. * The trace_test_buffer_cpu runs a while loop to consume all data.
  64. * If the calling tracer is broken, and is constantly filling
  65. * the buffer, this will run forever, and hard lock the box.
  66. * We disable the ring buffer while we do this test to prevent
  67. * a hard lock up.
  68. */
  69. tracing_off();
  70. for_each_possible_cpu(cpu) {
  71. ret = trace_test_buffer_cpu(tr, cpu);
  72. if (ret)
  73. break;
  74. }
  75. tracing_on();
  76. __raw_spin_unlock(&ftrace_max_lock);
  77. local_irq_restore(flags);
  78. if (count)
  79. *count = cnt;
  80. return ret;
  81. }
  82. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  83. {
  84. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  85. trace->name, init_ret);
  86. }
  87. #ifdef CONFIG_FUNCTION_TRACER
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. /* Test dynamic code modification and ftrace filters */
  90. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  91. struct trace_array *tr,
  92. int (*func)(void))
  93. {
  94. int save_ftrace_enabled = ftrace_enabled;
  95. int save_tracer_enabled = tracer_enabled;
  96. unsigned long count;
  97. char *func_name;
  98. int ret;
  99. /* The ftrace test PASSED */
  100. printk(KERN_CONT "PASSED\n");
  101. pr_info("Testing dynamic ftrace: ");
  102. /* enable tracing, and record the filter function */
  103. ftrace_enabled = 1;
  104. tracer_enabled = 1;
  105. /* passed in by parameter to fool gcc from optimizing */
  106. func();
  107. /*
  108. * Some archs *cough*PowerPC*cough* add characters to the
  109. * start of the function names. We simply put a '*' to
  110. * accommodate them.
  111. */
  112. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  113. /* filter only on our function */
  114. ftrace_set_filter(func_name, strlen(func_name), 1);
  115. /* enable tracing */
  116. ret = tracer_init(trace, tr);
  117. if (ret) {
  118. warn_failed_init_tracer(trace, ret);
  119. goto out;
  120. }
  121. /* Sleep for a 1/10 of a second */
  122. msleep(100);
  123. /* we should have nothing in the buffer */
  124. ret = trace_test_buffer(tr, &count);
  125. if (ret)
  126. goto out;
  127. if (count) {
  128. ret = -1;
  129. printk(KERN_CONT ".. filter did not filter .. ");
  130. goto out;
  131. }
  132. /* call our function again */
  133. func();
  134. /* sleep again */
  135. msleep(100);
  136. /* stop the tracing. */
  137. tracing_stop();
  138. ftrace_enabled = 0;
  139. /* check the trace buffer */
  140. ret = trace_test_buffer(tr, &count);
  141. trace->reset(tr);
  142. tracing_start();
  143. /* we should only have one item */
  144. if (!ret && count != 1) {
  145. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  146. ret = -1;
  147. goto out;
  148. }
  149. out:
  150. ftrace_enabled = save_ftrace_enabled;
  151. tracer_enabled = save_tracer_enabled;
  152. /* Enable tracing on all functions again */
  153. ftrace_set_filter(NULL, 0, 1);
  154. return ret;
  155. }
  156. #else
  157. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  158. #endif /* CONFIG_DYNAMIC_FTRACE */
  159. /*
  160. * Simple verification test of ftrace function tracer.
  161. * Enable ftrace, sleep 1/10 second, and then read the trace
  162. * buffer to see if all is in order.
  163. */
  164. int
  165. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  166. {
  167. int save_ftrace_enabled = ftrace_enabled;
  168. int save_tracer_enabled = tracer_enabled;
  169. unsigned long count;
  170. int ret;
  171. /* make sure msleep has been recorded */
  172. msleep(1);
  173. /* start the tracing */
  174. ftrace_enabled = 1;
  175. tracer_enabled = 1;
  176. ret = tracer_init(trace, tr);
  177. if (ret) {
  178. warn_failed_init_tracer(trace, ret);
  179. goto out;
  180. }
  181. /* Sleep for a 1/10 of a second */
  182. msleep(100);
  183. /* stop the tracing. */
  184. tracing_stop();
  185. ftrace_enabled = 0;
  186. /* check the trace buffer */
  187. ret = trace_test_buffer(tr, &count);
  188. trace->reset(tr);
  189. tracing_start();
  190. if (!ret && !count) {
  191. printk(KERN_CONT ".. no entries found ..");
  192. ret = -1;
  193. goto out;
  194. }
  195. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  196. DYN_FTRACE_TEST_NAME);
  197. out:
  198. ftrace_enabled = save_ftrace_enabled;
  199. tracer_enabled = save_tracer_enabled;
  200. /* kill ftrace totally if we failed */
  201. if (ret)
  202. ftrace_kill();
  203. return ret;
  204. }
  205. #endif /* CONFIG_FUNCTION_TRACER */
  206. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  207. /*
  208. * Pretty much the same than for the function tracer from which the selftest
  209. * has been borrowed.
  210. */
  211. int
  212. trace_selftest_startup_function_graph(struct tracer *trace,
  213. struct trace_array *tr)
  214. {
  215. int ret;
  216. unsigned long count;
  217. ret = tracer_init(trace, tr);
  218. if (ret) {
  219. warn_failed_init_tracer(trace, ret);
  220. goto out;
  221. }
  222. /* Sleep for a 1/10 of a second */
  223. msleep(100);
  224. tracing_stop();
  225. /* check the trace buffer */
  226. ret = trace_test_buffer(tr, &count);
  227. trace->reset(tr);
  228. tracing_start();
  229. if (!ret && !count) {
  230. printk(KERN_CONT ".. no entries found ..");
  231. ret = -1;
  232. goto out;
  233. }
  234. /* Don't test dynamic tracing, the function tracer already did */
  235. out:
  236. /* Stop it if we failed */
  237. if (ret)
  238. ftrace_graph_stop();
  239. return ret;
  240. }
  241. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  242. #ifdef CONFIG_IRQSOFF_TRACER
  243. int
  244. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  245. {
  246. unsigned long save_max = tracing_max_latency;
  247. unsigned long count;
  248. int ret;
  249. /* start the tracing */
  250. ret = tracer_init(trace, tr);
  251. if (ret) {
  252. warn_failed_init_tracer(trace, ret);
  253. return ret;
  254. }
  255. /* reset the max latency */
  256. tracing_max_latency = 0;
  257. /* disable interrupts for a bit */
  258. local_irq_disable();
  259. udelay(100);
  260. local_irq_enable();
  261. /* stop the tracing. */
  262. tracing_stop();
  263. /* check both trace buffers */
  264. ret = trace_test_buffer(tr, NULL);
  265. if (!ret)
  266. ret = trace_test_buffer(&max_tr, &count);
  267. trace->reset(tr);
  268. tracing_start();
  269. if (!ret && !count) {
  270. printk(KERN_CONT ".. no entries found ..");
  271. ret = -1;
  272. }
  273. tracing_max_latency = save_max;
  274. return ret;
  275. }
  276. #endif /* CONFIG_IRQSOFF_TRACER */
  277. #ifdef CONFIG_PREEMPT_TRACER
  278. int
  279. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  280. {
  281. unsigned long save_max = tracing_max_latency;
  282. unsigned long count;
  283. int ret;
  284. /*
  285. * Now that the big kernel lock is no longer preemptable,
  286. * and this is called with the BKL held, it will always
  287. * fail. If preemption is already disabled, simply
  288. * pass the test. When the BKL is removed, or becomes
  289. * preemptible again, we will once again test this,
  290. * so keep it in.
  291. */
  292. if (preempt_count()) {
  293. printk(KERN_CONT "can not test ... force ");
  294. return 0;
  295. }
  296. /* start the tracing */
  297. ret = tracer_init(trace, tr);
  298. if (ret) {
  299. warn_failed_init_tracer(trace, ret);
  300. return ret;
  301. }
  302. /* reset the max latency */
  303. tracing_max_latency = 0;
  304. /* disable preemption for a bit */
  305. preempt_disable();
  306. udelay(100);
  307. preempt_enable();
  308. /* stop the tracing. */
  309. tracing_stop();
  310. /* check both trace buffers */
  311. ret = trace_test_buffer(tr, NULL);
  312. if (!ret)
  313. ret = trace_test_buffer(&max_tr, &count);
  314. trace->reset(tr);
  315. tracing_start();
  316. if (!ret && !count) {
  317. printk(KERN_CONT ".. no entries found ..");
  318. ret = -1;
  319. }
  320. tracing_max_latency = save_max;
  321. return ret;
  322. }
  323. #endif /* CONFIG_PREEMPT_TRACER */
  324. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  325. int
  326. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  327. {
  328. unsigned long save_max = tracing_max_latency;
  329. unsigned long count;
  330. int ret;
  331. /*
  332. * Now that the big kernel lock is no longer preemptable,
  333. * and this is called with the BKL held, it will always
  334. * fail. If preemption is already disabled, simply
  335. * pass the test. When the BKL is removed, or becomes
  336. * preemptible again, we will once again test this,
  337. * so keep it in.
  338. */
  339. if (preempt_count()) {
  340. printk(KERN_CONT "can not test ... force ");
  341. return 0;
  342. }
  343. /* start the tracing */
  344. ret = tracer_init(trace, tr);
  345. if (ret) {
  346. warn_failed_init_tracer(trace, ret);
  347. goto out;
  348. }
  349. /* reset the max latency */
  350. tracing_max_latency = 0;
  351. /* disable preemption and interrupts for a bit */
  352. preempt_disable();
  353. local_irq_disable();
  354. udelay(100);
  355. preempt_enable();
  356. /* reverse the order of preempt vs irqs */
  357. local_irq_enable();
  358. /* stop the tracing. */
  359. tracing_stop();
  360. /* check both trace buffers */
  361. ret = trace_test_buffer(tr, NULL);
  362. if (ret) {
  363. tracing_start();
  364. goto out;
  365. }
  366. ret = trace_test_buffer(&max_tr, &count);
  367. if (ret) {
  368. tracing_start();
  369. goto out;
  370. }
  371. if (!ret && !count) {
  372. printk(KERN_CONT ".. no entries found ..");
  373. ret = -1;
  374. tracing_start();
  375. goto out;
  376. }
  377. /* do the test by disabling interrupts first this time */
  378. tracing_max_latency = 0;
  379. tracing_start();
  380. preempt_disable();
  381. local_irq_disable();
  382. udelay(100);
  383. preempt_enable();
  384. /* reverse the order of preempt vs irqs */
  385. local_irq_enable();
  386. /* stop the tracing. */
  387. tracing_stop();
  388. /* check both trace buffers */
  389. ret = trace_test_buffer(tr, NULL);
  390. if (ret)
  391. goto out;
  392. ret = trace_test_buffer(&max_tr, &count);
  393. if (!ret && !count) {
  394. printk(KERN_CONT ".. no entries found ..");
  395. ret = -1;
  396. goto out;
  397. }
  398. out:
  399. trace->reset(tr);
  400. tracing_start();
  401. tracing_max_latency = save_max;
  402. return ret;
  403. }
  404. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  405. #ifdef CONFIG_NOP_TRACER
  406. int
  407. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  408. {
  409. /* What could possibly go wrong? */
  410. return 0;
  411. }
  412. #endif
  413. #ifdef CONFIG_SCHED_TRACER
  414. static int trace_wakeup_test_thread(void *data)
  415. {
  416. /* Make this a RT thread, doesn't need to be too high */
  417. struct sched_param param = { .sched_priority = 5 };
  418. struct completion *x = data;
  419. sched_setscheduler(current, SCHED_FIFO, &param);
  420. /* Make it know we have a new prio */
  421. complete(x);
  422. /* now go to sleep and let the test wake us up */
  423. set_current_state(TASK_INTERRUPTIBLE);
  424. schedule();
  425. /* we are awake, now wait to disappear */
  426. while (!kthread_should_stop()) {
  427. /*
  428. * This is an RT task, do short sleeps to let
  429. * others run.
  430. */
  431. msleep(100);
  432. }
  433. return 0;
  434. }
  435. int
  436. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  437. {
  438. unsigned long save_max = tracing_max_latency;
  439. struct task_struct *p;
  440. struct completion isrt;
  441. unsigned long count;
  442. int ret;
  443. init_completion(&isrt);
  444. /* create a high prio thread */
  445. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  446. if (IS_ERR(p)) {
  447. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  448. return -1;
  449. }
  450. /* make sure the thread is running at an RT prio */
  451. wait_for_completion(&isrt);
  452. /* start the tracing */
  453. ret = tracer_init(trace, tr);
  454. if (ret) {
  455. warn_failed_init_tracer(trace, ret);
  456. return ret;
  457. }
  458. /* reset the max latency */
  459. tracing_max_latency = 0;
  460. /* sleep to let the RT thread sleep too */
  461. msleep(100);
  462. /*
  463. * Yes this is slightly racy. It is possible that for some
  464. * strange reason that the RT thread we created, did not
  465. * call schedule for 100ms after doing the completion,
  466. * and we do a wakeup on a task that already is awake.
  467. * But that is extremely unlikely, and the worst thing that
  468. * happens in such a case, is that we disable tracing.
  469. * Honestly, if this race does happen something is horrible
  470. * wrong with the system.
  471. */
  472. wake_up_process(p);
  473. /* give a little time to let the thread wake up */
  474. msleep(100);
  475. /* stop the tracing. */
  476. tracing_stop();
  477. /* check both trace buffers */
  478. ret = trace_test_buffer(tr, NULL);
  479. if (!ret)
  480. ret = trace_test_buffer(&max_tr, &count);
  481. trace->reset(tr);
  482. tracing_start();
  483. tracing_max_latency = save_max;
  484. /* kill the thread */
  485. kthread_stop(p);
  486. if (!ret && !count) {
  487. printk(KERN_CONT ".. no entries found ..");
  488. ret = -1;
  489. }
  490. return ret;
  491. }
  492. #endif /* CONFIG_SCHED_TRACER */
  493. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  494. int
  495. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  496. {
  497. unsigned long count;
  498. int ret;
  499. /* start the tracing */
  500. ret = tracer_init(trace, tr);
  501. if (ret) {
  502. warn_failed_init_tracer(trace, ret);
  503. return ret;
  504. }
  505. /* Sleep for a 1/10 of a second */
  506. msleep(100);
  507. /* stop the tracing. */
  508. tracing_stop();
  509. /* check the trace buffer */
  510. ret = trace_test_buffer(tr, &count);
  511. trace->reset(tr);
  512. tracing_start();
  513. if (!ret && !count) {
  514. printk(KERN_CONT ".. no entries found ..");
  515. ret = -1;
  516. }
  517. return ret;
  518. }
  519. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  520. #ifdef CONFIG_SYSPROF_TRACER
  521. int
  522. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  523. {
  524. unsigned long count;
  525. int ret;
  526. /* start the tracing */
  527. ret = tracer_init(trace, tr);
  528. if (ret) {
  529. warn_failed_init_tracer(trace, ret);
  530. return ret;
  531. }
  532. /* Sleep for a 1/10 of a second */
  533. msleep(100);
  534. /* stop the tracing. */
  535. tracing_stop();
  536. /* check the trace buffer */
  537. ret = trace_test_buffer(tr, &count);
  538. trace->reset(tr);
  539. tracing_start();
  540. if (!ret && !count) {
  541. printk(KERN_CONT ".. no entries found ..");
  542. ret = -1;
  543. }
  544. return ret;
  545. }
  546. #endif /* CONFIG_SYSPROF_TRACER */
  547. #ifdef CONFIG_BRANCH_TRACER
  548. int
  549. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  550. {
  551. unsigned long count;
  552. int ret;
  553. /* start the tracing */
  554. ret = tracer_init(trace, tr);
  555. if (ret) {
  556. warn_failed_init_tracer(trace, ret);
  557. return ret;
  558. }
  559. /* Sleep for a 1/10 of a second */
  560. msleep(100);
  561. /* stop the tracing. */
  562. tracing_stop();
  563. /* check the trace buffer */
  564. ret = trace_test_buffer(tr, &count);
  565. trace->reset(tr);
  566. tracing_start();
  567. if (!ret && !count) {
  568. printk(KERN_CONT ".. no entries found ..");
  569. ret = -1;
  570. }
  571. return ret;
  572. }
  573. #endif /* CONFIG_BRANCH_TRACER */