trace_selftest.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_CONT:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. return 1;
  16. }
  17. return 0;
  18. }
  19. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  20. {
  21. struct ring_buffer_event *event;
  22. struct trace_entry *entry;
  23. unsigned int loops = 0;
  24. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  25. entry = ring_buffer_event_data(event);
  26. /*
  27. * The ring buffer is a size of trace_buf_size, if
  28. * we loop more than the size, there's something wrong
  29. * with the ring buffer.
  30. */
  31. if (loops++ > trace_buf_size) {
  32. printk(KERN_CONT ".. bad ring buffer ");
  33. goto failed;
  34. }
  35. if (!trace_valid_entry(entry)) {
  36. printk(KERN_CONT ".. invalid entry %d ",
  37. entry->type);
  38. goto failed;
  39. }
  40. }
  41. return 0;
  42. failed:
  43. /* disable tracing */
  44. tracing_disabled = 1;
  45. printk(KERN_CONT ".. corrupted trace buffer .. ");
  46. return -1;
  47. }
  48. /*
  49. * Test the trace buffer to see if all the elements
  50. * are still sane.
  51. */
  52. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  53. {
  54. unsigned long flags, cnt = 0;
  55. int cpu, ret = 0;
  56. /* Don't allow flipping of max traces now */
  57. local_irq_save(flags);
  58. __raw_spin_lock(&ftrace_max_lock);
  59. cnt = ring_buffer_entries(tr->buffer);
  60. /*
  61. * The trace_test_buffer_cpu runs a while loop to consume all data.
  62. * If the calling tracer is broken, and is constantly filling
  63. * the buffer, this will run forever, and hard lock the box.
  64. * We disable the ring buffer while we do this test to prevent
  65. * a hard lock up.
  66. */
  67. tracing_off();
  68. for_each_possible_cpu(cpu) {
  69. ret = trace_test_buffer_cpu(tr, cpu);
  70. if (ret)
  71. break;
  72. }
  73. tracing_on();
  74. __raw_spin_unlock(&ftrace_max_lock);
  75. local_irq_restore(flags);
  76. if (count)
  77. *count = cnt;
  78. return ret;
  79. }
  80. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  81. {
  82. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  83. trace->name, init_ret);
  84. }
  85. #ifdef CONFIG_FUNCTION_TRACER
  86. #ifdef CONFIG_DYNAMIC_FTRACE
  87. #define __STR(x) #x
  88. #define STR(x) __STR(x)
  89. /* Test dynamic code modification and ftrace filters */
  90. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  91. struct trace_array *tr,
  92. int (*func)(void))
  93. {
  94. int save_ftrace_enabled = ftrace_enabled;
  95. int save_tracer_enabled = tracer_enabled;
  96. unsigned long count;
  97. char *func_name;
  98. int ret;
  99. /* The ftrace test PASSED */
  100. printk(KERN_CONT "PASSED\n");
  101. pr_info("Testing dynamic ftrace: ");
  102. /* enable tracing, and record the filter function */
  103. ftrace_enabled = 1;
  104. tracer_enabled = 1;
  105. /* passed in by parameter to fool gcc from optimizing */
  106. func();
  107. /*
  108. * Some archs *cough*PowerPC*cough* add charachters to the
  109. * start of the function names. We simply put a '*' to
  110. * accomodate them.
  111. */
  112. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  113. /* filter only on our function */
  114. ftrace_set_filter(func_name, strlen(func_name), 1);
  115. /* enable tracing */
  116. ret = trace->init(tr);
  117. if (ret) {
  118. warn_failed_init_tracer(trace, ret);
  119. goto out;
  120. }
  121. /* Sleep for a 1/10 of a second */
  122. msleep(100);
  123. /* we should have nothing in the buffer */
  124. ret = trace_test_buffer(tr, &count);
  125. if (ret)
  126. goto out;
  127. if (count) {
  128. ret = -1;
  129. printk(KERN_CONT ".. filter did not filter .. ");
  130. goto out;
  131. }
  132. /* call our function again */
  133. func();
  134. /* sleep again */
  135. msleep(100);
  136. /* stop the tracing. */
  137. tracing_stop();
  138. ftrace_enabled = 0;
  139. /* check the trace buffer */
  140. ret = trace_test_buffer(tr, &count);
  141. trace->reset(tr);
  142. tracing_start();
  143. /* we should only have one item */
  144. if (!ret && count != 1) {
  145. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  146. ret = -1;
  147. goto out;
  148. }
  149. out:
  150. ftrace_enabled = save_ftrace_enabled;
  151. tracer_enabled = save_tracer_enabled;
  152. /* Enable tracing on all functions again */
  153. ftrace_set_filter(NULL, 0, 1);
  154. return ret;
  155. }
  156. #else
  157. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  158. #endif /* CONFIG_DYNAMIC_FTRACE */
  159. /*
  160. * Simple verification test of ftrace function tracer.
  161. * Enable ftrace, sleep 1/10 second, and then read the trace
  162. * buffer to see if all is in order.
  163. */
  164. int
  165. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  166. {
  167. int save_ftrace_enabled = ftrace_enabled;
  168. int save_tracer_enabled = tracer_enabled;
  169. unsigned long count;
  170. int ret;
  171. /* make sure msleep has been recorded */
  172. msleep(1);
  173. /* start the tracing */
  174. ftrace_enabled = 1;
  175. tracer_enabled = 1;
  176. ret = trace->init(tr);
  177. if (ret) {
  178. warn_failed_init_tracer(trace, ret);
  179. goto out;
  180. }
  181. /* Sleep for a 1/10 of a second */
  182. msleep(100);
  183. /* stop the tracing. */
  184. tracing_stop();
  185. ftrace_enabled = 0;
  186. /* check the trace buffer */
  187. ret = trace_test_buffer(tr, &count);
  188. trace->reset(tr);
  189. tracing_start();
  190. if (!ret && !count) {
  191. printk(KERN_CONT ".. no entries found ..");
  192. ret = -1;
  193. goto out;
  194. }
  195. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  196. DYN_FTRACE_TEST_NAME);
  197. out:
  198. ftrace_enabled = save_ftrace_enabled;
  199. tracer_enabled = save_tracer_enabled;
  200. /* kill ftrace totally if we failed */
  201. if (ret)
  202. ftrace_kill();
  203. return ret;
  204. }
  205. #endif /* CONFIG_FUNCTION_TRACER */
  206. #ifdef CONFIG_IRQSOFF_TRACER
  207. int
  208. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  209. {
  210. unsigned long save_max = tracing_max_latency;
  211. unsigned long count;
  212. int ret;
  213. /* start the tracing */
  214. ret = trace->init(tr);
  215. if (ret) {
  216. warn_failed_init_tracer(trace, ret);
  217. return ret;
  218. }
  219. /* reset the max latency */
  220. tracing_max_latency = 0;
  221. /* disable interrupts for a bit */
  222. local_irq_disable();
  223. udelay(100);
  224. local_irq_enable();
  225. /* stop the tracing. */
  226. tracing_stop();
  227. /* check both trace buffers */
  228. ret = trace_test_buffer(tr, NULL);
  229. if (!ret)
  230. ret = trace_test_buffer(&max_tr, &count);
  231. trace->reset(tr);
  232. tracing_start();
  233. if (!ret && !count) {
  234. printk(KERN_CONT ".. no entries found ..");
  235. ret = -1;
  236. }
  237. tracing_max_latency = save_max;
  238. return ret;
  239. }
  240. #endif /* CONFIG_IRQSOFF_TRACER */
  241. #ifdef CONFIG_PREEMPT_TRACER
  242. int
  243. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  244. {
  245. unsigned long save_max = tracing_max_latency;
  246. unsigned long count;
  247. int ret;
  248. /*
  249. * Now that the big kernel lock is no longer preemptable,
  250. * and this is called with the BKL held, it will always
  251. * fail. If preemption is already disabled, simply
  252. * pass the test. When the BKL is removed, or becomes
  253. * preemptible again, we will once again test this,
  254. * so keep it in.
  255. */
  256. if (preempt_count()) {
  257. printk(KERN_CONT "can not test ... force ");
  258. return 0;
  259. }
  260. /* start the tracing */
  261. ret = trace->init(tr);
  262. if (ret) {
  263. warn_failed_init_tracer(trace, ret);
  264. return ret;
  265. }
  266. /* reset the max latency */
  267. tracing_max_latency = 0;
  268. /* disable preemption for a bit */
  269. preempt_disable();
  270. udelay(100);
  271. preempt_enable();
  272. /* stop the tracing. */
  273. tracing_stop();
  274. /* check both trace buffers */
  275. ret = trace_test_buffer(tr, NULL);
  276. if (!ret)
  277. ret = trace_test_buffer(&max_tr, &count);
  278. trace->reset(tr);
  279. tracing_start();
  280. if (!ret && !count) {
  281. printk(KERN_CONT ".. no entries found ..");
  282. ret = -1;
  283. }
  284. tracing_max_latency = save_max;
  285. return ret;
  286. }
  287. #endif /* CONFIG_PREEMPT_TRACER */
  288. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  289. int
  290. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  291. {
  292. unsigned long save_max = tracing_max_latency;
  293. unsigned long count;
  294. int ret;
  295. /*
  296. * Now that the big kernel lock is no longer preemptable,
  297. * and this is called with the BKL held, it will always
  298. * fail. If preemption is already disabled, simply
  299. * pass the test. When the BKL is removed, or becomes
  300. * preemptible again, we will once again test this,
  301. * so keep it in.
  302. */
  303. if (preempt_count()) {
  304. printk(KERN_CONT "can not test ... force ");
  305. return 0;
  306. }
  307. /* start the tracing */
  308. ret = trace->init(tr);
  309. if (ret) {
  310. warn_failed_init_tracer(trace, ret);
  311. goto out;
  312. }
  313. /* reset the max latency */
  314. tracing_max_latency = 0;
  315. /* disable preemption and interrupts for a bit */
  316. preempt_disable();
  317. local_irq_disable();
  318. udelay(100);
  319. preempt_enable();
  320. /* reverse the order of preempt vs irqs */
  321. local_irq_enable();
  322. /* stop the tracing. */
  323. tracing_stop();
  324. /* check both trace buffers */
  325. ret = trace_test_buffer(tr, NULL);
  326. if (ret) {
  327. tracing_start();
  328. goto out;
  329. }
  330. ret = trace_test_buffer(&max_tr, &count);
  331. if (ret) {
  332. tracing_start();
  333. goto out;
  334. }
  335. if (!ret && !count) {
  336. printk(KERN_CONT ".. no entries found ..");
  337. ret = -1;
  338. tracing_start();
  339. goto out;
  340. }
  341. /* do the test by disabling interrupts first this time */
  342. tracing_max_latency = 0;
  343. tracing_start();
  344. preempt_disable();
  345. local_irq_disable();
  346. udelay(100);
  347. preempt_enable();
  348. /* reverse the order of preempt vs irqs */
  349. local_irq_enable();
  350. /* stop the tracing. */
  351. tracing_stop();
  352. /* check both trace buffers */
  353. ret = trace_test_buffer(tr, NULL);
  354. if (ret)
  355. goto out;
  356. ret = trace_test_buffer(&max_tr, &count);
  357. if (!ret && !count) {
  358. printk(KERN_CONT ".. no entries found ..");
  359. ret = -1;
  360. goto out;
  361. }
  362. out:
  363. trace->reset(tr);
  364. tracing_start();
  365. tracing_max_latency = save_max;
  366. return ret;
  367. }
  368. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  369. #ifdef CONFIG_NOP_TRACER
  370. int
  371. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  372. {
  373. /* What could possibly go wrong? */
  374. return 0;
  375. }
  376. #endif
  377. #ifdef CONFIG_SCHED_TRACER
  378. static int trace_wakeup_test_thread(void *data)
  379. {
  380. /* Make this a RT thread, doesn't need to be too high */
  381. struct sched_param param = { .sched_priority = 5 };
  382. struct completion *x = data;
  383. sched_setscheduler(current, SCHED_FIFO, &param);
  384. /* Make it know we have a new prio */
  385. complete(x);
  386. /* now go to sleep and let the test wake us up */
  387. set_current_state(TASK_INTERRUPTIBLE);
  388. schedule();
  389. /* we are awake, now wait to disappear */
  390. while (!kthread_should_stop()) {
  391. /*
  392. * This is an RT task, do short sleeps to let
  393. * others run.
  394. */
  395. msleep(100);
  396. }
  397. return 0;
  398. }
  399. int
  400. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  401. {
  402. unsigned long save_max = tracing_max_latency;
  403. struct task_struct *p;
  404. struct completion isrt;
  405. unsigned long count;
  406. int ret;
  407. init_completion(&isrt);
  408. /* create a high prio thread */
  409. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  410. if (IS_ERR(p)) {
  411. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  412. return -1;
  413. }
  414. /* make sure the thread is running at an RT prio */
  415. wait_for_completion(&isrt);
  416. /* start the tracing */
  417. ret = trace->init(tr);
  418. if (ret) {
  419. warn_failed_init_tracer(trace, ret);
  420. return ret;
  421. }
  422. /* reset the max latency */
  423. tracing_max_latency = 0;
  424. /* sleep to let the RT thread sleep too */
  425. msleep(100);
  426. /*
  427. * Yes this is slightly racy. It is possible that for some
  428. * strange reason that the RT thread we created, did not
  429. * call schedule for 100ms after doing the completion,
  430. * and we do a wakeup on a task that already is awake.
  431. * But that is extremely unlikely, and the worst thing that
  432. * happens in such a case, is that we disable tracing.
  433. * Honestly, if this race does happen something is horrible
  434. * wrong with the system.
  435. */
  436. wake_up_process(p);
  437. /* give a little time to let the thread wake up */
  438. msleep(100);
  439. /* stop the tracing. */
  440. tracing_stop();
  441. /* check both trace buffers */
  442. ret = trace_test_buffer(tr, NULL);
  443. if (!ret)
  444. ret = trace_test_buffer(&max_tr, &count);
  445. trace->reset(tr);
  446. tracing_start();
  447. tracing_max_latency = save_max;
  448. /* kill the thread */
  449. kthread_stop(p);
  450. if (!ret && !count) {
  451. printk(KERN_CONT ".. no entries found ..");
  452. ret = -1;
  453. }
  454. return ret;
  455. }
  456. #endif /* CONFIG_SCHED_TRACER */
  457. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  458. int
  459. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  460. {
  461. unsigned long count;
  462. int ret;
  463. /* start the tracing */
  464. ret = trace->init(tr);
  465. if (ret) {
  466. warn_failed_init_tracer(trace, ret);
  467. return ret;
  468. }
  469. /* Sleep for a 1/10 of a second */
  470. msleep(100);
  471. /* stop the tracing. */
  472. tracing_stop();
  473. /* check the trace buffer */
  474. ret = trace_test_buffer(tr, &count);
  475. trace->reset(tr);
  476. tracing_start();
  477. if (!ret && !count) {
  478. printk(KERN_CONT ".. no entries found ..");
  479. ret = -1;
  480. }
  481. return ret;
  482. }
  483. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  484. #ifdef CONFIG_SYSPROF_TRACER
  485. int
  486. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  487. {
  488. unsigned long count;
  489. int ret;
  490. /* start the tracing */
  491. ret = trace->init(tr);
  492. if (ret) {
  493. warn_failed_init_tracer(trace, ret);
  494. return 0;
  495. }
  496. /* Sleep for a 1/10 of a second */
  497. msleep(100);
  498. /* stop the tracing. */
  499. tracing_stop();
  500. /* check the trace buffer */
  501. ret = trace_test_buffer(tr, &count);
  502. trace->reset(tr);
  503. tracing_start();
  504. return ret;
  505. }
  506. #endif /* CONFIG_SYSPROF_TRACER */
  507. #ifdef CONFIG_BRANCH_TRACER
  508. int
  509. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  510. {
  511. unsigned long count;
  512. int ret;
  513. /* start the tracing */
  514. ret = trace->init(tr);
  515. if (ret) {
  516. warn_failed_init_tracer(trace, ret);
  517. return ret;
  518. }
  519. /* Sleep for a 1/10 of a second */
  520. msleep(100);
  521. /* stop the tracing. */
  522. tracing_stop();
  523. /* check the trace buffer */
  524. ret = trace_test_buffer(tr, &count);
  525. trace->reset(tr);
  526. tracing_start();
  527. return ret;
  528. }
  529. #endif /* CONFIG_BRANCH_TRACER */