trace_selftest.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_CONT:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. return 1;
  16. }
  17. return 0;
  18. }
  19. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  20. {
  21. struct ring_buffer_event *event;
  22. struct trace_entry *entry;
  23. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  24. entry = ring_buffer_event_data(event);
  25. if (!trace_valid_entry(entry)) {
  26. printk(KERN_CONT ".. invalid entry %d ",
  27. entry->type);
  28. goto failed;
  29. }
  30. }
  31. return 0;
  32. failed:
  33. /* disable tracing */
  34. tracing_disabled = 1;
  35. printk(KERN_CONT ".. corrupted trace buffer .. ");
  36. return -1;
  37. }
  38. /*
  39. * Test the trace buffer to see if all the elements
  40. * are still sane.
  41. */
  42. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  43. {
  44. unsigned long flags, cnt = 0;
  45. int cpu, ret = 0;
  46. /* Don't allow flipping of max traces now */
  47. local_irq_save(flags);
  48. __raw_spin_lock(&ftrace_max_lock);
  49. cnt = ring_buffer_entries(tr->buffer);
  50. /*
  51. * The trace_test_buffer_cpu runs a while loop to consume all data.
  52. * If the calling tracer is broken, and is constantly filling
  53. * the buffer, this will run forever, and hard lock the box.
  54. * We disable the ring buffer while we do this test to prevent
  55. * a hard lock up.
  56. */
  57. tracing_off();
  58. for_each_possible_cpu(cpu) {
  59. ret = trace_test_buffer_cpu(tr, cpu);
  60. if (ret)
  61. break;
  62. }
  63. tracing_on();
  64. __raw_spin_unlock(&ftrace_max_lock);
  65. local_irq_restore(flags);
  66. if (count)
  67. *count = cnt;
  68. return ret;
  69. }
  70. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  71. {
  72. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  73. trace->name, init_ret);
  74. }
  75. #ifdef CONFIG_FUNCTION_TRACER
  76. #ifdef CONFIG_DYNAMIC_FTRACE
  77. #define __STR(x) #x
  78. #define STR(x) __STR(x)
  79. /* Test dynamic code modification and ftrace filters */
  80. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  81. struct trace_array *tr,
  82. int (*func)(void))
  83. {
  84. int save_ftrace_enabled = ftrace_enabled;
  85. int save_tracer_enabled = tracer_enabled;
  86. unsigned long count;
  87. char *func_name;
  88. int ret;
  89. /* The ftrace test PASSED */
  90. printk(KERN_CONT "PASSED\n");
  91. pr_info("Testing dynamic ftrace: ");
  92. /* enable tracing, and record the filter function */
  93. ftrace_enabled = 1;
  94. tracer_enabled = 1;
  95. /* passed in by parameter to fool gcc from optimizing */
  96. func();
  97. /*
  98. * Some archs *cough*PowerPC*cough* add charachters to the
  99. * start of the function names. We simply put a '*' to
  100. * accomodate them.
  101. */
  102. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  103. /* filter only on our function */
  104. ftrace_set_filter(func_name, strlen(func_name), 1);
  105. /* enable tracing */
  106. ret = trace->init(tr);
  107. if (ret) {
  108. warn_failed_init_tracer(trace, ret);
  109. goto out;
  110. }
  111. /* Sleep for a 1/10 of a second */
  112. msleep(100);
  113. /* we should have nothing in the buffer */
  114. ret = trace_test_buffer(tr, &count);
  115. if (ret)
  116. goto out;
  117. if (count) {
  118. ret = -1;
  119. printk(KERN_CONT ".. filter did not filter .. ");
  120. goto out;
  121. }
  122. /* call our function again */
  123. func();
  124. /* sleep again */
  125. msleep(100);
  126. /* stop the tracing. */
  127. tracing_stop();
  128. ftrace_enabled = 0;
  129. /* check the trace buffer */
  130. ret = trace_test_buffer(tr, &count);
  131. trace->reset(tr);
  132. tracing_start();
  133. /* we should only have one item */
  134. if (!ret && count != 1) {
  135. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  136. ret = -1;
  137. goto out;
  138. }
  139. out:
  140. ftrace_enabled = save_ftrace_enabled;
  141. tracer_enabled = save_tracer_enabled;
  142. /* Enable tracing on all functions again */
  143. ftrace_set_filter(NULL, 0, 1);
  144. return ret;
  145. }
  146. #else
  147. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  148. #endif /* CONFIG_DYNAMIC_FTRACE */
  149. /*
  150. * Simple verification test of ftrace function tracer.
  151. * Enable ftrace, sleep 1/10 second, and then read the trace
  152. * buffer to see if all is in order.
  153. */
  154. int
  155. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  156. {
  157. int save_ftrace_enabled = ftrace_enabled;
  158. int save_tracer_enabled = tracer_enabled;
  159. unsigned long count;
  160. int ret;
  161. /* make sure msleep has been recorded */
  162. msleep(1);
  163. /* start the tracing */
  164. ftrace_enabled = 1;
  165. tracer_enabled = 1;
  166. ret = trace->init(tr);
  167. if (ret) {
  168. warn_failed_init_tracer(trace, ret);
  169. goto out;
  170. }
  171. /* Sleep for a 1/10 of a second */
  172. msleep(100);
  173. /* stop the tracing. */
  174. tracing_stop();
  175. ftrace_enabled = 0;
  176. /* check the trace buffer */
  177. ret = trace_test_buffer(tr, &count);
  178. trace->reset(tr);
  179. tracing_start();
  180. if (!ret && !count) {
  181. printk(KERN_CONT ".. no entries found ..");
  182. ret = -1;
  183. goto out;
  184. }
  185. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  186. DYN_FTRACE_TEST_NAME);
  187. out:
  188. ftrace_enabled = save_ftrace_enabled;
  189. tracer_enabled = save_tracer_enabled;
  190. /* kill ftrace totally if we failed */
  191. if (ret)
  192. ftrace_kill();
  193. return ret;
  194. }
  195. #endif /* CONFIG_FUNCTION_TRACER */
  196. #ifdef CONFIG_IRQSOFF_TRACER
  197. int
  198. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  199. {
  200. unsigned long save_max = tracing_max_latency;
  201. unsigned long count;
  202. int ret;
  203. /* start the tracing */
  204. ret = trace->init(tr);
  205. if (ret) {
  206. warn_failed_init_tracer(trace, ret);
  207. return ret;
  208. }
  209. /* reset the max latency */
  210. tracing_max_latency = 0;
  211. /* disable interrupts for a bit */
  212. local_irq_disable();
  213. udelay(100);
  214. local_irq_enable();
  215. /* stop the tracing. */
  216. tracing_stop();
  217. /* check both trace buffers */
  218. ret = trace_test_buffer(tr, NULL);
  219. if (!ret)
  220. ret = trace_test_buffer(&max_tr, &count);
  221. trace->reset(tr);
  222. tracing_start();
  223. if (!ret && !count) {
  224. printk(KERN_CONT ".. no entries found ..");
  225. ret = -1;
  226. }
  227. tracing_max_latency = save_max;
  228. return ret;
  229. }
  230. #endif /* CONFIG_IRQSOFF_TRACER */
  231. #ifdef CONFIG_PREEMPT_TRACER
  232. int
  233. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  234. {
  235. unsigned long save_max = tracing_max_latency;
  236. unsigned long count;
  237. int ret;
  238. /*
  239. * Now that the big kernel lock is no longer preemptable,
  240. * and this is called with the BKL held, it will always
  241. * fail. If preemption is already disabled, simply
  242. * pass the test. When the BKL is removed, or becomes
  243. * preemptible again, we will once again test this,
  244. * so keep it in.
  245. */
  246. if (preempt_count()) {
  247. printk(KERN_CONT "can not test ... force ");
  248. return 0;
  249. }
  250. /* start the tracing */
  251. ret = trace->init(tr);
  252. if (ret) {
  253. warn_failed_init_tracer(trace, ret);
  254. return ret;
  255. }
  256. /* reset the max latency */
  257. tracing_max_latency = 0;
  258. /* disable preemption for a bit */
  259. preempt_disable();
  260. udelay(100);
  261. preempt_enable();
  262. /* stop the tracing. */
  263. tracing_stop();
  264. /* check both trace buffers */
  265. ret = trace_test_buffer(tr, NULL);
  266. if (!ret)
  267. ret = trace_test_buffer(&max_tr, &count);
  268. trace->reset(tr);
  269. tracing_start();
  270. if (!ret && !count) {
  271. printk(KERN_CONT ".. no entries found ..");
  272. ret = -1;
  273. }
  274. tracing_max_latency = save_max;
  275. return ret;
  276. }
  277. #endif /* CONFIG_PREEMPT_TRACER */
  278. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  279. int
  280. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  281. {
  282. unsigned long save_max = tracing_max_latency;
  283. unsigned long count;
  284. int ret;
  285. /*
  286. * Now that the big kernel lock is no longer preemptable,
  287. * and this is called with the BKL held, it will always
  288. * fail. If preemption is already disabled, simply
  289. * pass the test. When the BKL is removed, or becomes
  290. * preemptible again, we will once again test this,
  291. * so keep it in.
  292. */
  293. if (preempt_count()) {
  294. printk(KERN_CONT "can not test ... force ");
  295. return 0;
  296. }
  297. /* start the tracing */
  298. ret = trace->init(tr);
  299. if (ret) {
  300. warn_failed_init_tracer(trace, ret);
  301. goto out;
  302. }
  303. /* reset the max latency */
  304. tracing_max_latency = 0;
  305. /* disable preemption and interrupts for a bit */
  306. preempt_disable();
  307. local_irq_disable();
  308. udelay(100);
  309. preempt_enable();
  310. /* reverse the order of preempt vs irqs */
  311. local_irq_enable();
  312. /* stop the tracing. */
  313. tracing_stop();
  314. /* check both trace buffers */
  315. ret = trace_test_buffer(tr, NULL);
  316. if (ret) {
  317. tracing_start();
  318. goto out;
  319. }
  320. ret = trace_test_buffer(&max_tr, &count);
  321. if (ret) {
  322. tracing_start();
  323. goto out;
  324. }
  325. if (!ret && !count) {
  326. printk(KERN_CONT ".. no entries found ..");
  327. ret = -1;
  328. tracing_start();
  329. goto out;
  330. }
  331. /* do the test by disabling interrupts first this time */
  332. tracing_max_latency = 0;
  333. tracing_start();
  334. preempt_disable();
  335. local_irq_disable();
  336. udelay(100);
  337. preempt_enable();
  338. /* reverse the order of preempt vs irqs */
  339. local_irq_enable();
  340. /* stop the tracing. */
  341. tracing_stop();
  342. /* check both trace buffers */
  343. ret = trace_test_buffer(tr, NULL);
  344. if (ret)
  345. goto out;
  346. ret = trace_test_buffer(&max_tr, &count);
  347. if (!ret && !count) {
  348. printk(KERN_CONT ".. no entries found ..");
  349. ret = -1;
  350. goto out;
  351. }
  352. out:
  353. trace->reset(tr);
  354. tracing_start();
  355. tracing_max_latency = save_max;
  356. return ret;
  357. }
  358. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  359. #ifdef CONFIG_NOP_TRACER
  360. int
  361. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  362. {
  363. /* What could possibly go wrong? */
  364. return 0;
  365. }
  366. #endif
  367. #ifdef CONFIG_SCHED_TRACER
  368. static int trace_wakeup_test_thread(void *data)
  369. {
  370. /* Make this a RT thread, doesn't need to be too high */
  371. struct sched_param param = { .sched_priority = 5 };
  372. struct completion *x = data;
  373. sched_setscheduler(current, SCHED_FIFO, &param);
  374. /* Make it know we have a new prio */
  375. complete(x);
  376. /* now go to sleep and let the test wake us up */
  377. set_current_state(TASK_INTERRUPTIBLE);
  378. schedule();
  379. /* we are awake, now wait to disappear */
  380. while (!kthread_should_stop()) {
  381. /*
  382. * This is an RT task, do short sleeps to let
  383. * others run.
  384. */
  385. msleep(100);
  386. }
  387. return 0;
  388. }
  389. int
  390. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  391. {
  392. unsigned long save_max = tracing_max_latency;
  393. struct task_struct *p;
  394. struct completion isrt;
  395. unsigned long count;
  396. int ret;
  397. init_completion(&isrt);
  398. /* create a high prio thread */
  399. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  400. if (IS_ERR(p)) {
  401. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  402. return -1;
  403. }
  404. /* make sure the thread is running at an RT prio */
  405. wait_for_completion(&isrt);
  406. /* start the tracing */
  407. ret = trace->init(tr);
  408. if (ret) {
  409. warn_failed_init_tracer(trace, ret);
  410. return ret;
  411. }
  412. /* reset the max latency */
  413. tracing_max_latency = 0;
  414. /* sleep to let the RT thread sleep too */
  415. msleep(100);
  416. /*
  417. * Yes this is slightly racy. It is possible that for some
  418. * strange reason that the RT thread we created, did not
  419. * call schedule for 100ms after doing the completion,
  420. * and we do a wakeup on a task that already is awake.
  421. * But that is extremely unlikely, and the worst thing that
  422. * happens in such a case, is that we disable tracing.
  423. * Honestly, if this race does happen something is horrible
  424. * wrong with the system.
  425. */
  426. wake_up_process(p);
  427. /* give a little time to let the thread wake up */
  428. msleep(100);
  429. /* stop the tracing. */
  430. tracing_stop();
  431. /* check both trace buffers */
  432. ret = trace_test_buffer(tr, NULL);
  433. if (!ret)
  434. ret = trace_test_buffer(&max_tr, &count);
  435. trace->reset(tr);
  436. tracing_start();
  437. tracing_max_latency = save_max;
  438. /* kill the thread */
  439. kthread_stop(p);
  440. if (!ret && !count) {
  441. printk(KERN_CONT ".. no entries found ..");
  442. ret = -1;
  443. }
  444. return ret;
  445. }
  446. #endif /* CONFIG_SCHED_TRACER */
  447. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  448. int
  449. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  450. {
  451. unsigned long count;
  452. int ret;
  453. /* start the tracing */
  454. ret = trace->init(tr);
  455. if (ret) {
  456. warn_failed_init_tracer(trace, ret);
  457. return ret;
  458. }
  459. /* Sleep for a 1/10 of a second */
  460. msleep(100);
  461. /* stop the tracing. */
  462. tracing_stop();
  463. /* check the trace buffer */
  464. ret = trace_test_buffer(tr, &count);
  465. trace->reset(tr);
  466. tracing_start();
  467. if (!ret && !count) {
  468. printk(KERN_CONT ".. no entries found ..");
  469. ret = -1;
  470. }
  471. return ret;
  472. }
  473. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  474. #ifdef CONFIG_SYSPROF_TRACER
  475. int
  476. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  477. {
  478. unsigned long count;
  479. int ret;
  480. /* start the tracing */
  481. ret = trace->init(tr);
  482. if (ret) {
  483. warn_failed_init_tracer(trace, ret);
  484. return 0;
  485. }
  486. /* Sleep for a 1/10 of a second */
  487. msleep(100);
  488. /* stop the tracing. */
  489. tracing_stop();
  490. /* check the trace buffer */
  491. ret = trace_test_buffer(tr, &count);
  492. trace->reset(tr);
  493. tracing_start();
  494. return ret;
  495. }
  496. #endif /* CONFIG_SYSPROF_TRACER */
  497. #ifdef CONFIG_BRANCH_TRACER
  498. int
  499. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  500. {
  501. unsigned long count;
  502. int ret;
  503. /* start the tracing */
  504. ret = trace->init(tr);
  505. if (ret) {
  506. warn_failed_init_tracer(trace, ret);
  507. return ret;
  508. }
  509. /* Sleep for a 1/10 of a second */
  510. msleep(100);
  511. /* stop the tracing. */
  512. tracing_stop();
  513. /* check the trace buffer */
  514. ret = trace_test_buffer(tr, &count);
  515. trace->reset(tr);
  516. tracing_start();
  517. return ret;
  518. }
  519. #endif /* CONFIG_BRANCH_TRACER */