trace_selftest.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936
  1. /* Include in trace.c */
  2. #include <linux/stringify.h>
  3. #include <linux/kthread.h>
  4. #include <linux/delay.h>
  5. #include <linux/slab.h>
  6. static inline int trace_valid_entry(struct trace_entry *entry)
  7. {
  8. switch (entry->type) {
  9. case TRACE_FN:
  10. case TRACE_CTX:
  11. case TRACE_WAKE:
  12. case TRACE_STACK:
  13. case TRACE_PRINT:
  14. case TRACE_BRANCH:
  15. case TRACE_GRAPH_ENT:
  16. case TRACE_GRAPH_RET:
  17. return 1;
  18. }
  19. return 0;
  20. }
  21. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  22. {
  23. struct ring_buffer_event *event;
  24. struct trace_entry *entry;
  25. unsigned int loops = 0;
  26. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
  27. entry = ring_buffer_event_data(event);
  28. /*
  29. * The ring buffer is a size of trace_buf_size, if
  30. * we loop more than the size, there's something wrong
  31. * with the ring buffer.
  32. */
  33. if (loops++ > trace_buf_size) {
  34. printk(KERN_CONT ".. bad ring buffer ");
  35. goto failed;
  36. }
  37. if (!trace_valid_entry(entry)) {
  38. printk(KERN_CONT ".. invalid entry %d ",
  39. entry->type);
  40. goto failed;
  41. }
  42. }
  43. return 0;
  44. failed:
  45. /* disable tracing */
  46. tracing_disabled = 1;
  47. printk(KERN_CONT ".. corrupted trace buffer .. ");
  48. return -1;
  49. }
  50. /*
  51. * Test the trace buffer to see if all the elements
  52. * are still sane.
  53. */
  54. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  55. {
  56. unsigned long flags, cnt = 0;
  57. int cpu, ret = 0;
  58. /* Don't allow flipping of max traces now */
  59. local_irq_save(flags);
  60. arch_spin_lock(&ftrace_max_lock);
  61. cnt = ring_buffer_entries(tr->buffer);
  62. /*
  63. * The trace_test_buffer_cpu runs a while loop to consume all data.
  64. * If the calling tracer is broken, and is constantly filling
  65. * the buffer, this will run forever, and hard lock the box.
  66. * We disable the ring buffer while we do this test to prevent
  67. * a hard lock up.
  68. */
  69. tracing_off();
  70. for_each_possible_cpu(cpu) {
  71. ret = trace_test_buffer_cpu(tr, cpu);
  72. if (ret)
  73. break;
  74. }
  75. tracing_on();
  76. arch_spin_unlock(&ftrace_max_lock);
  77. local_irq_restore(flags);
  78. if (count)
  79. *count = cnt;
  80. return ret;
  81. }
  82. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  83. {
  84. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  85. trace->name, init_ret);
  86. }
  87. #ifdef CONFIG_FUNCTION_TRACER
  88. #ifdef CONFIG_DYNAMIC_FTRACE
  89. static int trace_selftest_test_probe1_cnt;
  90. static void trace_selftest_test_probe1_func(unsigned long ip,
  91. unsigned long pip,
  92. struct ftrace_ops *op)
  93. {
  94. trace_selftest_test_probe1_cnt++;
  95. }
  96. static int trace_selftest_test_probe2_cnt;
  97. static void trace_selftest_test_probe2_func(unsigned long ip,
  98. unsigned long pip,
  99. struct ftrace_ops *op)
  100. {
  101. trace_selftest_test_probe2_cnt++;
  102. }
  103. static int trace_selftest_test_probe3_cnt;
  104. static void trace_selftest_test_probe3_func(unsigned long ip,
  105. unsigned long pip,
  106. struct ftrace_ops *op)
  107. {
  108. trace_selftest_test_probe3_cnt++;
  109. }
  110. static int trace_selftest_test_global_cnt;
  111. static void trace_selftest_test_global_func(unsigned long ip,
  112. unsigned long pip,
  113. struct ftrace_ops *op)
  114. {
  115. trace_selftest_test_global_cnt++;
  116. }
  117. static int trace_selftest_test_dyn_cnt;
  118. static void trace_selftest_test_dyn_func(unsigned long ip,
  119. unsigned long pip,
  120. struct ftrace_ops *op)
  121. {
  122. trace_selftest_test_dyn_cnt++;
  123. }
  124. static struct ftrace_ops test_probe1 = {
  125. .func = trace_selftest_test_probe1_func,
  126. };
  127. static struct ftrace_ops test_probe2 = {
  128. .func = trace_selftest_test_probe2_func,
  129. };
  130. static struct ftrace_ops test_probe3 = {
  131. .func = trace_selftest_test_probe3_func,
  132. };
  133. static struct ftrace_ops test_global = {
  134. .func = trace_selftest_test_global_func,
  135. .flags = FTRACE_OPS_FL_GLOBAL,
  136. };
  137. static void print_counts(void)
  138. {
  139. printk("(%d %d %d %d %d) ",
  140. trace_selftest_test_probe1_cnt,
  141. trace_selftest_test_probe2_cnt,
  142. trace_selftest_test_probe3_cnt,
  143. trace_selftest_test_global_cnt,
  144. trace_selftest_test_dyn_cnt);
  145. }
  146. static void reset_counts(void)
  147. {
  148. trace_selftest_test_probe1_cnt = 0;
  149. trace_selftest_test_probe2_cnt = 0;
  150. trace_selftest_test_probe3_cnt = 0;
  151. trace_selftest_test_global_cnt = 0;
  152. trace_selftest_test_dyn_cnt = 0;
  153. }
  154. static int trace_selftest_ops(int cnt)
  155. {
  156. int save_ftrace_enabled = ftrace_enabled;
  157. struct ftrace_ops *dyn_ops;
  158. char *func1_name;
  159. char *func2_name;
  160. int len1;
  161. int len2;
  162. int ret = -1;
  163. printk(KERN_CONT "PASSED\n");
  164. pr_info("Testing dynamic ftrace ops #%d: ", cnt);
  165. ftrace_enabled = 1;
  166. reset_counts();
  167. /* Handle PPC64 '.' name */
  168. func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  169. func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
  170. len1 = strlen(func1_name);
  171. len2 = strlen(func2_name);
  172. /*
  173. * Probe 1 will trace function 1.
  174. * Probe 2 will trace function 2.
  175. * Probe 3 will trace functions 1 and 2.
  176. */
  177. ftrace_set_filter(&test_probe1, func1_name, len1, 1);
  178. ftrace_set_filter(&test_probe2, func2_name, len2, 1);
  179. ftrace_set_filter(&test_probe3, func1_name, len1, 1);
  180. ftrace_set_filter(&test_probe3, func2_name, len2, 0);
  181. register_ftrace_function(&test_probe1);
  182. register_ftrace_function(&test_probe2);
  183. register_ftrace_function(&test_probe3);
  184. register_ftrace_function(&test_global);
  185. DYN_FTRACE_TEST_NAME();
  186. print_counts();
  187. if (trace_selftest_test_probe1_cnt != 1)
  188. goto out;
  189. if (trace_selftest_test_probe2_cnt != 0)
  190. goto out;
  191. if (trace_selftest_test_probe3_cnt != 1)
  192. goto out;
  193. if (trace_selftest_test_global_cnt == 0)
  194. goto out;
  195. DYN_FTRACE_TEST_NAME2();
  196. print_counts();
  197. if (trace_selftest_test_probe1_cnt != 1)
  198. goto out;
  199. if (trace_selftest_test_probe2_cnt != 1)
  200. goto out;
  201. if (trace_selftest_test_probe3_cnt != 2)
  202. goto out;
  203. /* Add a dynamic probe */
  204. dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
  205. if (!dyn_ops) {
  206. printk("MEMORY ERROR ");
  207. goto out;
  208. }
  209. dyn_ops->func = trace_selftest_test_dyn_func;
  210. register_ftrace_function(dyn_ops);
  211. trace_selftest_test_global_cnt = 0;
  212. DYN_FTRACE_TEST_NAME();
  213. print_counts();
  214. if (trace_selftest_test_probe1_cnt != 2)
  215. goto out_free;
  216. if (trace_selftest_test_probe2_cnt != 1)
  217. goto out_free;
  218. if (trace_selftest_test_probe3_cnt != 3)
  219. goto out_free;
  220. if (trace_selftest_test_global_cnt == 0)
  221. goto out;
  222. if (trace_selftest_test_dyn_cnt == 0)
  223. goto out_free;
  224. DYN_FTRACE_TEST_NAME2();
  225. print_counts();
  226. if (trace_selftest_test_probe1_cnt != 2)
  227. goto out_free;
  228. if (trace_selftest_test_probe2_cnt != 2)
  229. goto out_free;
  230. if (trace_selftest_test_probe3_cnt != 4)
  231. goto out_free;
  232. ret = 0;
  233. out_free:
  234. unregister_ftrace_function(dyn_ops);
  235. kfree(dyn_ops);
  236. out:
  237. /* Purposely unregister in the same order */
  238. unregister_ftrace_function(&test_probe1);
  239. unregister_ftrace_function(&test_probe2);
  240. unregister_ftrace_function(&test_probe3);
  241. unregister_ftrace_function(&test_global);
  242. /* Make sure everything is off */
  243. reset_counts();
  244. DYN_FTRACE_TEST_NAME();
  245. DYN_FTRACE_TEST_NAME();
  246. if (trace_selftest_test_probe1_cnt ||
  247. trace_selftest_test_probe2_cnt ||
  248. trace_selftest_test_probe3_cnt ||
  249. trace_selftest_test_global_cnt ||
  250. trace_selftest_test_dyn_cnt)
  251. ret = -1;
  252. ftrace_enabled = save_ftrace_enabled;
  253. return ret;
  254. }
  255. /* Test dynamic code modification and ftrace filters */
  256. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  257. struct trace_array *tr,
  258. int (*func)(void))
  259. {
  260. int save_ftrace_enabled = ftrace_enabled;
  261. int save_tracer_enabled = tracer_enabled;
  262. unsigned long count;
  263. char *func_name;
  264. int ret;
  265. /* The ftrace test PASSED */
  266. printk(KERN_CONT "PASSED\n");
  267. pr_info("Testing dynamic ftrace: ");
  268. /* enable tracing, and record the filter function */
  269. ftrace_enabled = 1;
  270. tracer_enabled = 1;
  271. /* passed in by parameter to fool gcc from optimizing */
  272. func();
  273. /*
  274. * Some archs *cough*PowerPC*cough* add characters to the
  275. * start of the function names. We simply put a '*' to
  276. * accommodate them.
  277. */
  278. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  279. /* filter only on our function */
  280. ftrace_set_global_filter(func_name, strlen(func_name), 1);
  281. /* enable tracing */
  282. ret = tracer_init(trace, tr);
  283. if (ret) {
  284. warn_failed_init_tracer(trace, ret);
  285. goto out;
  286. }
  287. /* Sleep for a 1/10 of a second */
  288. msleep(100);
  289. /* we should have nothing in the buffer */
  290. ret = trace_test_buffer(tr, &count);
  291. if (ret)
  292. goto out;
  293. if (count) {
  294. ret = -1;
  295. printk(KERN_CONT ".. filter did not filter .. ");
  296. goto out;
  297. }
  298. /* call our function again */
  299. func();
  300. /* sleep again */
  301. msleep(100);
  302. /* stop the tracing. */
  303. tracing_stop();
  304. ftrace_enabled = 0;
  305. /* check the trace buffer */
  306. ret = trace_test_buffer(tr, &count);
  307. tracing_start();
  308. /* we should only have one item */
  309. if (!ret && count != 1) {
  310. trace->reset(tr);
  311. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  312. ret = -1;
  313. goto out;
  314. }
  315. /* Test the ops with global tracing running */
  316. ret = trace_selftest_ops(1);
  317. trace->reset(tr);
  318. out:
  319. ftrace_enabled = save_ftrace_enabled;
  320. tracer_enabled = save_tracer_enabled;
  321. /* Enable tracing on all functions again */
  322. ftrace_set_global_filter(NULL, 0, 1);
  323. /* Test the ops with global tracing off */
  324. if (!ret)
  325. ret = trace_selftest_ops(2);
  326. return ret;
  327. }
  328. #else
  329. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  330. #endif /* CONFIG_DYNAMIC_FTRACE */
  331. /*
  332. * Simple verification test of ftrace function tracer.
  333. * Enable ftrace, sleep 1/10 second, and then read the trace
  334. * buffer to see if all is in order.
  335. */
  336. int
  337. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  338. {
  339. int save_ftrace_enabled = ftrace_enabled;
  340. int save_tracer_enabled = tracer_enabled;
  341. unsigned long count;
  342. int ret;
  343. /* make sure msleep has been recorded */
  344. msleep(1);
  345. /* start the tracing */
  346. ftrace_enabled = 1;
  347. tracer_enabled = 1;
  348. ret = tracer_init(trace, tr);
  349. if (ret) {
  350. warn_failed_init_tracer(trace, ret);
  351. goto out;
  352. }
  353. /* Sleep for a 1/10 of a second */
  354. msleep(100);
  355. /* stop the tracing. */
  356. tracing_stop();
  357. ftrace_enabled = 0;
  358. /* check the trace buffer */
  359. ret = trace_test_buffer(tr, &count);
  360. trace->reset(tr);
  361. tracing_start();
  362. if (!ret && !count) {
  363. printk(KERN_CONT ".. no entries found ..");
  364. ret = -1;
  365. goto out;
  366. }
  367. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  368. DYN_FTRACE_TEST_NAME);
  369. out:
  370. ftrace_enabled = save_ftrace_enabled;
  371. tracer_enabled = save_tracer_enabled;
  372. /* kill ftrace totally if we failed */
  373. if (ret)
  374. ftrace_kill();
  375. return ret;
  376. }
  377. #endif /* CONFIG_FUNCTION_TRACER */
  378. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  379. /* Maximum number of functions to trace before diagnosing a hang */
  380. #define GRAPH_MAX_FUNC_TEST 100000000
  381. static void
  382. __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
  383. static unsigned int graph_hang_thresh;
  384. /* Wrap the real function entry probe to avoid possible hanging */
  385. static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
  386. {
  387. /* This is harmlessly racy, we want to approximately detect a hang */
  388. if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
  389. ftrace_graph_stop();
  390. printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
  391. if (ftrace_dump_on_oops)
  392. __ftrace_dump(false, DUMP_ALL);
  393. return 0;
  394. }
  395. return trace_graph_entry(trace);
  396. }
  397. /*
  398. * Pretty much the same than for the function tracer from which the selftest
  399. * has been borrowed.
  400. */
  401. int
  402. trace_selftest_startup_function_graph(struct tracer *trace,
  403. struct trace_array *tr)
  404. {
  405. int ret;
  406. unsigned long count;
  407. /*
  408. * Simulate the init() callback but we attach a watchdog callback
  409. * to detect and recover from possible hangs
  410. */
  411. tracing_reset_online_cpus(tr);
  412. set_graph_array(tr);
  413. ret = register_ftrace_graph(&trace_graph_return,
  414. &trace_graph_entry_watchdog);
  415. if (ret) {
  416. warn_failed_init_tracer(trace, ret);
  417. goto out;
  418. }
  419. tracing_start_cmdline_record();
  420. /* Sleep for a 1/10 of a second */
  421. msleep(100);
  422. /* Have we just recovered from a hang? */
  423. if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
  424. tracing_selftest_disabled = true;
  425. ret = -1;
  426. goto out;
  427. }
  428. tracing_stop();
  429. /* check the trace buffer */
  430. ret = trace_test_buffer(tr, &count);
  431. trace->reset(tr);
  432. tracing_start();
  433. if (!ret && !count) {
  434. printk(KERN_CONT ".. no entries found ..");
  435. ret = -1;
  436. goto out;
  437. }
  438. /* Don't test dynamic tracing, the function tracer already did */
  439. out:
  440. /* Stop it if we failed */
  441. if (ret)
  442. ftrace_graph_stop();
  443. return ret;
  444. }
  445. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  446. #ifdef CONFIG_IRQSOFF_TRACER
  447. int
  448. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  449. {
  450. unsigned long save_max = tracing_max_latency;
  451. unsigned long count;
  452. int ret;
  453. /* start the tracing */
  454. ret = tracer_init(trace, tr);
  455. if (ret) {
  456. warn_failed_init_tracer(trace, ret);
  457. return ret;
  458. }
  459. /* reset the max latency */
  460. tracing_max_latency = 0;
  461. /* disable interrupts for a bit */
  462. local_irq_disable();
  463. udelay(100);
  464. local_irq_enable();
  465. /*
  466. * Stop the tracer to avoid a warning subsequent
  467. * to buffer flipping failure because tracing_stop()
  468. * disables the tr and max buffers, making flipping impossible
  469. * in case of parallels max irqs off latencies.
  470. */
  471. trace->stop(tr);
  472. /* stop the tracing. */
  473. tracing_stop();
  474. /* check both trace buffers */
  475. ret = trace_test_buffer(tr, NULL);
  476. if (!ret)
  477. ret = trace_test_buffer(&max_tr, &count);
  478. trace->reset(tr);
  479. tracing_start();
  480. if (!ret && !count) {
  481. printk(KERN_CONT ".. no entries found ..");
  482. ret = -1;
  483. }
  484. tracing_max_latency = save_max;
  485. return ret;
  486. }
  487. #endif /* CONFIG_IRQSOFF_TRACER */
  488. #ifdef CONFIG_PREEMPT_TRACER
  489. int
  490. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  491. {
  492. unsigned long save_max = tracing_max_latency;
  493. unsigned long count;
  494. int ret;
  495. /*
  496. * Now that the big kernel lock is no longer preemptable,
  497. * and this is called with the BKL held, it will always
  498. * fail. If preemption is already disabled, simply
  499. * pass the test. When the BKL is removed, or becomes
  500. * preemptible again, we will once again test this,
  501. * so keep it in.
  502. */
  503. if (preempt_count()) {
  504. printk(KERN_CONT "can not test ... force ");
  505. return 0;
  506. }
  507. /* start the tracing */
  508. ret = tracer_init(trace, tr);
  509. if (ret) {
  510. warn_failed_init_tracer(trace, ret);
  511. return ret;
  512. }
  513. /* reset the max latency */
  514. tracing_max_latency = 0;
  515. /* disable preemption for a bit */
  516. preempt_disable();
  517. udelay(100);
  518. preempt_enable();
  519. /*
  520. * Stop the tracer to avoid a warning subsequent
  521. * to buffer flipping failure because tracing_stop()
  522. * disables the tr and max buffers, making flipping impossible
  523. * in case of parallels max preempt off latencies.
  524. */
  525. trace->stop(tr);
  526. /* stop the tracing. */
  527. tracing_stop();
  528. /* check both trace buffers */
  529. ret = trace_test_buffer(tr, NULL);
  530. if (!ret)
  531. ret = trace_test_buffer(&max_tr, &count);
  532. trace->reset(tr);
  533. tracing_start();
  534. if (!ret && !count) {
  535. printk(KERN_CONT ".. no entries found ..");
  536. ret = -1;
  537. }
  538. tracing_max_latency = save_max;
  539. return ret;
  540. }
  541. #endif /* CONFIG_PREEMPT_TRACER */
  542. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  543. int
  544. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  545. {
  546. unsigned long save_max = tracing_max_latency;
  547. unsigned long count;
  548. int ret;
  549. /*
  550. * Now that the big kernel lock is no longer preemptable,
  551. * and this is called with the BKL held, it will always
  552. * fail. If preemption is already disabled, simply
  553. * pass the test. When the BKL is removed, or becomes
  554. * preemptible again, we will once again test this,
  555. * so keep it in.
  556. */
  557. if (preempt_count()) {
  558. printk(KERN_CONT "can not test ... force ");
  559. return 0;
  560. }
  561. /* start the tracing */
  562. ret = tracer_init(trace, tr);
  563. if (ret) {
  564. warn_failed_init_tracer(trace, ret);
  565. goto out_no_start;
  566. }
  567. /* reset the max latency */
  568. tracing_max_latency = 0;
  569. /* disable preemption and interrupts for a bit */
  570. preempt_disable();
  571. local_irq_disable();
  572. udelay(100);
  573. preempt_enable();
  574. /* reverse the order of preempt vs irqs */
  575. local_irq_enable();
  576. /*
  577. * Stop the tracer to avoid a warning subsequent
  578. * to buffer flipping failure because tracing_stop()
  579. * disables the tr and max buffers, making flipping impossible
  580. * in case of parallels max irqs/preempt off latencies.
  581. */
  582. trace->stop(tr);
  583. /* stop the tracing. */
  584. tracing_stop();
  585. /* check both trace buffers */
  586. ret = trace_test_buffer(tr, NULL);
  587. if (ret)
  588. goto out;
  589. ret = trace_test_buffer(&max_tr, &count);
  590. if (ret)
  591. goto out;
  592. if (!ret && !count) {
  593. printk(KERN_CONT ".. no entries found ..");
  594. ret = -1;
  595. goto out;
  596. }
  597. /* do the test by disabling interrupts first this time */
  598. tracing_max_latency = 0;
  599. tracing_start();
  600. trace->start(tr);
  601. preempt_disable();
  602. local_irq_disable();
  603. udelay(100);
  604. preempt_enable();
  605. /* reverse the order of preempt vs irqs */
  606. local_irq_enable();
  607. trace->stop(tr);
  608. /* stop the tracing. */
  609. tracing_stop();
  610. /* check both trace buffers */
  611. ret = trace_test_buffer(tr, NULL);
  612. if (ret)
  613. goto out;
  614. ret = trace_test_buffer(&max_tr, &count);
  615. if (!ret && !count) {
  616. printk(KERN_CONT ".. no entries found ..");
  617. ret = -1;
  618. goto out;
  619. }
  620. out:
  621. tracing_start();
  622. out_no_start:
  623. trace->reset(tr);
  624. tracing_max_latency = save_max;
  625. return ret;
  626. }
  627. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  628. #ifdef CONFIG_NOP_TRACER
  629. int
  630. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  631. {
  632. /* What could possibly go wrong? */
  633. return 0;
  634. }
  635. #endif
  636. #ifdef CONFIG_SCHED_TRACER
  637. static int trace_wakeup_test_thread(void *data)
  638. {
  639. /* Make this a RT thread, doesn't need to be too high */
  640. static const struct sched_param param = { .sched_priority = 5 };
  641. struct completion *x = data;
  642. sched_setscheduler(current, SCHED_FIFO, &param);
  643. /* Make it know we have a new prio */
  644. complete(x);
  645. /* now go to sleep and let the test wake us up */
  646. set_current_state(TASK_INTERRUPTIBLE);
  647. schedule();
  648. /* we are awake, now wait to disappear */
  649. while (!kthread_should_stop()) {
  650. /*
  651. * This is an RT task, do short sleeps to let
  652. * others run.
  653. */
  654. msleep(100);
  655. }
  656. return 0;
  657. }
  658. int
  659. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  660. {
  661. unsigned long save_max = tracing_max_latency;
  662. struct task_struct *p;
  663. struct completion isrt;
  664. unsigned long count;
  665. int ret;
  666. init_completion(&isrt);
  667. /* create a high prio thread */
  668. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  669. if (IS_ERR(p)) {
  670. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  671. return -1;
  672. }
  673. /* make sure the thread is running at an RT prio */
  674. wait_for_completion(&isrt);
  675. /* start the tracing */
  676. ret = tracer_init(trace, tr);
  677. if (ret) {
  678. warn_failed_init_tracer(trace, ret);
  679. return ret;
  680. }
  681. /* reset the max latency */
  682. tracing_max_latency = 0;
  683. /* sleep to let the RT thread sleep too */
  684. msleep(100);
  685. /*
  686. * Yes this is slightly racy. It is possible that for some
  687. * strange reason that the RT thread we created, did not
  688. * call schedule for 100ms after doing the completion,
  689. * and we do a wakeup on a task that already is awake.
  690. * But that is extremely unlikely, and the worst thing that
  691. * happens in such a case, is that we disable tracing.
  692. * Honestly, if this race does happen something is horrible
  693. * wrong with the system.
  694. */
  695. wake_up_process(p);
  696. /* give a little time to let the thread wake up */
  697. msleep(100);
  698. /* stop the tracing. */
  699. tracing_stop();
  700. /* check both trace buffers */
  701. ret = trace_test_buffer(tr, NULL);
  702. if (!ret)
  703. ret = trace_test_buffer(&max_tr, &count);
  704. trace->reset(tr);
  705. tracing_start();
  706. tracing_max_latency = save_max;
  707. /* kill the thread */
  708. kthread_stop(p);
  709. if (!ret && !count) {
  710. printk(KERN_CONT ".. no entries found ..");
  711. ret = -1;
  712. }
  713. return ret;
  714. }
  715. #endif /* CONFIG_SCHED_TRACER */
  716. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  717. int
  718. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  719. {
  720. unsigned long count;
  721. int ret;
  722. /* start the tracing */
  723. ret = tracer_init(trace, tr);
  724. if (ret) {
  725. warn_failed_init_tracer(trace, ret);
  726. return ret;
  727. }
  728. /* Sleep for a 1/10 of a second */
  729. msleep(100);
  730. /* stop the tracing. */
  731. tracing_stop();
  732. /* check the trace buffer */
  733. ret = trace_test_buffer(tr, &count);
  734. trace->reset(tr);
  735. tracing_start();
  736. if (!ret && !count) {
  737. printk(KERN_CONT ".. no entries found ..");
  738. ret = -1;
  739. }
  740. return ret;
  741. }
  742. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  743. #ifdef CONFIG_BRANCH_TRACER
  744. int
  745. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  746. {
  747. unsigned long count;
  748. int ret;
  749. /* start the tracing */
  750. ret = tracer_init(trace, tr);
  751. if (ret) {
  752. warn_failed_init_tracer(trace, ret);
  753. return ret;
  754. }
  755. /* Sleep for a 1/10 of a second */
  756. msleep(100);
  757. /* stop the tracing. */
  758. tracing_stop();
  759. /* check the trace buffer */
  760. ret = trace_test_buffer(tr, &count);
  761. trace->reset(tr);
  762. tracing_start();
  763. if (!ret && !count) {
  764. printk(KERN_CONT ".. no entries found ..");
  765. ret = -1;
  766. }
  767. return ret;
  768. }
  769. #endif /* CONFIG_BRANCH_TRACER */