trace_selftest.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809
  1. /* Include in trace.c */
  2. #include <linux/stringify.h>
  3. #include <linux/kthread.h>
  4. #include <linux/delay.h>
  5. static inline int trace_valid_entry(struct trace_entry *entry)
  6. {
  7. switch (entry->type) {
  8. case TRACE_FN:
  9. case TRACE_CTX:
  10. case TRACE_WAKE:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. case TRACE_GRAPH_ENT:
  16. case TRACE_GRAPH_RET:
  17. case TRACE_HW_BRANCHES:
  18. return 1;
  19. }
  20. return 0;
  21. }
  22. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  23. {
  24. struct ring_buffer_event *event;
  25. struct trace_entry *entry;
  26. unsigned int loops = 0;
  27. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  28. entry = ring_buffer_event_data(event);
  29. /*
  30. * The ring buffer is a size of trace_buf_size, if
  31. * we loop more than the size, there's something wrong
  32. * with the ring buffer.
  33. */
  34. if (loops++ > trace_buf_size) {
  35. printk(KERN_CONT ".. bad ring buffer ");
  36. goto failed;
  37. }
  38. if (!trace_valid_entry(entry)) {
  39. printk(KERN_CONT ".. invalid entry %d ",
  40. entry->type);
  41. goto failed;
  42. }
  43. }
  44. return 0;
  45. failed:
  46. /* disable tracing */
  47. tracing_disabled = 1;
  48. printk(KERN_CONT ".. corrupted trace buffer .. ");
  49. return -1;
  50. }
  51. /*
  52. * Test the trace buffer to see if all the elements
  53. * are still sane.
  54. */
  55. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  56. {
  57. unsigned long flags, cnt = 0;
  58. int cpu, ret = 0;
  59. /* Don't allow flipping of max traces now */
  60. local_irq_save(flags);
  61. __raw_spin_lock(&ftrace_max_lock);
  62. cnt = ring_buffer_entries(tr->buffer);
  63. /*
  64. * The trace_test_buffer_cpu runs a while loop to consume all data.
  65. * If the calling tracer is broken, and is constantly filling
  66. * the buffer, this will run forever, and hard lock the box.
  67. * We disable the ring buffer while we do this test to prevent
  68. * a hard lock up.
  69. */
  70. tracing_off();
  71. for_each_possible_cpu(cpu) {
  72. ret = trace_test_buffer_cpu(tr, cpu);
  73. if (ret)
  74. break;
  75. }
  76. tracing_on();
  77. __raw_spin_unlock(&ftrace_max_lock);
  78. local_irq_restore(flags);
  79. if (count)
  80. *count = cnt;
  81. return ret;
  82. }
  83. static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  84. {
  85. printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  86. trace->name, init_ret);
  87. }
  88. #ifdef CONFIG_FUNCTION_TRACER
  89. #ifdef CONFIG_DYNAMIC_FTRACE
  90. /* Test dynamic code modification and ftrace filters */
  91. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  92. struct trace_array *tr,
  93. int (*func)(void))
  94. {
  95. int save_ftrace_enabled = ftrace_enabled;
  96. int save_tracer_enabled = tracer_enabled;
  97. unsigned long count;
  98. char *func_name;
  99. int ret;
  100. /* The ftrace test PASSED */
  101. printk(KERN_CONT "PASSED\n");
  102. pr_info("Testing dynamic ftrace: ");
  103. /* enable tracing, and record the filter function */
  104. ftrace_enabled = 1;
  105. tracer_enabled = 1;
  106. /* passed in by parameter to fool gcc from optimizing */
  107. func();
  108. /*
  109. * Some archs *cough*PowerPC*cough* add characters to the
  110. * start of the function names. We simply put a '*' to
  111. * accommodate them.
  112. */
  113. func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
  114. /* filter only on our function */
  115. ftrace_set_filter(func_name, strlen(func_name), 1);
  116. /* enable tracing */
  117. ret = tracer_init(trace, tr);
  118. if (ret) {
  119. warn_failed_init_tracer(trace, ret);
  120. goto out;
  121. }
  122. /* Sleep for a 1/10 of a second */
  123. msleep(100);
  124. /* we should have nothing in the buffer */
  125. ret = trace_test_buffer(tr, &count);
  126. if (ret)
  127. goto out;
  128. if (count) {
  129. ret = -1;
  130. printk(KERN_CONT ".. filter did not filter .. ");
  131. goto out;
  132. }
  133. /* call our function again */
  134. func();
  135. /* sleep again */
  136. msleep(100);
  137. /* stop the tracing. */
  138. tracing_stop();
  139. ftrace_enabled = 0;
  140. /* check the trace buffer */
  141. ret = trace_test_buffer(tr, &count);
  142. trace->reset(tr);
  143. tracing_start();
  144. /* we should only have one item */
  145. if (!ret && count != 1) {
  146. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  147. ret = -1;
  148. goto out;
  149. }
  150. out:
  151. ftrace_enabled = save_ftrace_enabled;
  152. tracer_enabled = save_tracer_enabled;
  153. /* Enable tracing on all functions again */
  154. ftrace_set_filter(NULL, 0, 1);
  155. return ret;
  156. }
  157. #else
  158. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  159. #endif /* CONFIG_DYNAMIC_FTRACE */
  160. /*
  161. * Simple verification test of ftrace function tracer.
  162. * Enable ftrace, sleep 1/10 second, and then read the trace
  163. * buffer to see if all is in order.
  164. */
  165. int
  166. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  167. {
  168. int save_ftrace_enabled = ftrace_enabled;
  169. int save_tracer_enabled = tracer_enabled;
  170. unsigned long count;
  171. int ret;
  172. /* make sure msleep has been recorded */
  173. msleep(1);
  174. /* start the tracing */
  175. ftrace_enabled = 1;
  176. tracer_enabled = 1;
  177. ret = tracer_init(trace, tr);
  178. if (ret) {
  179. warn_failed_init_tracer(trace, ret);
  180. goto out;
  181. }
  182. /* Sleep for a 1/10 of a second */
  183. msleep(100);
  184. /* stop the tracing. */
  185. tracing_stop();
  186. ftrace_enabled = 0;
  187. /* check the trace buffer */
  188. ret = trace_test_buffer(tr, &count);
  189. trace->reset(tr);
  190. tracing_start();
  191. if (!ret && !count) {
  192. printk(KERN_CONT ".. no entries found ..");
  193. ret = -1;
  194. goto out;
  195. }
  196. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  197. DYN_FTRACE_TEST_NAME);
  198. out:
  199. ftrace_enabled = save_ftrace_enabled;
  200. tracer_enabled = save_tracer_enabled;
  201. /* kill ftrace totally if we failed */
  202. if (ret)
  203. ftrace_kill();
  204. return ret;
  205. }
  206. #endif /* CONFIG_FUNCTION_TRACER */
  207. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  208. /* Maximum number of functions to trace before diagnosing a hang */
  209. #define GRAPH_MAX_FUNC_TEST 100000000
  210. static void __ftrace_dump(bool disable_tracing);
  211. static unsigned int graph_hang_thresh;
  212. /* Wrap the real function entry probe to avoid possible hanging */
  213. static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
  214. {
  215. /* This is harmlessly racy, we want to approximately detect a hang */
  216. if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
  217. ftrace_graph_stop();
  218. printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
  219. if (ftrace_dump_on_oops)
  220. __ftrace_dump(false);
  221. return 0;
  222. }
  223. return trace_graph_entry(trace);
  224. }
  225. /*
  226. * Pretty much the same than for the function tracer from which the selftest
  227. * has been borrowed.
  228. */
  229. int
  230. trace_selftest_startup_function_graph(struct tracer *trace,
  231. struct trace_array *tr)
  232. {
  233. int ret;
  234. unsigned long count;
  235. /*
  236. * Simulate the init() callback but we attach a watchdog callback
  237. * to detect and recover from possible hangs
  238. */
  239. tracing_reset_online_cpus(tr);
  240. ret = register_ftrace_graph(&trace_graph_return,
  241. &trace_graph_entry_watchdog);
  242. if (ret) {
  243. warn_failed_init_tracer(trace, ret);
  244. goto out;
  245. }
  246. tracing_start_cmdline_record();
  247. /* Sleep for a 1/10 of a second */
  248. msleep(100);
  249. /* Have we just recovered from a hang? */
  250. if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
  251. tracing_selftest_disabled = true;
  252. ret = -1;
  253. goto out;
  254. }
  255. tracing_stop();
  256. /* check the trace buffer */
  257. ret = trace_test_buffer(tr, &count);
  258. trace->reset(tr);
  259. tracing_start();
  260. if (!ret && !count) {
  261. printk(KERN_CONT ".. no entries found ..");
  262. ret = -1;
  263. goto out;
  264. }
  265. /* Don't test dynamic tracing, the function tracer already did */
  266. out:
  267. /* Stop it if we failed */
  268. if (ret)
  269. ftrace_graph_stop();
  270. return ret;
  271. }
  272. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  273. #ifdef CONFIG_IRQSOFF_TRACER
  274. int
  275. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  276. {
  277. unsigned long save_max = tracing_max_latency;
  278. unsigned long count;
  279. int ret;
  280. /* start the tracing */
  281. ret = tracer_init(trace, tr);
  282. if (ret) {
  283. warn_failed_init_tracer(trace, ret);
  284. return ret;
  285. }
  286. /* reset the max latency */
  287. tracing_max_latency = 0;
  288. /* disable interrupts for a bit */
  289. local_irq_disable();
  290. udelay(100);
  291. local_irq_enable();
  292. /*
  293. * Stop the tracer to avoid a warning subsequent
  294. * to buffer flipping failure because tracing_stop()
  295. * disables the tr and max buffers, making flipping impossible
  296. * in case of parallels max irqs off latencies.
  297. */
  298. trace->stop(tr);
  299. /* stop the tracing. */
  300. tracing_stop();
  301. /* check both trace buffers */
  302. ret = trace_test_buffer(tr, NULL);
  303. if (!ret)
  304. ret = trace_test_buffer(&max_tr, &count);
  305. trace->reset(tr);
  306. tracing_start();
  307. if (!ret && !count) {
  308. printk(KERN_CONT ".. no entries found ..");
  309. ret = -1;
  310. }
  311. tracing_max_latency = save_max;
  312. return ret;
  313. }
  314. #endif /* CONFIG_IRQSOFF_TRACER */
  315. #ifdef CONFIG_PREEMPT_TRACER
  316. int
  317. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  318. {
  319. unsigned long save_max = tracing_max_latency;
  320. unsigned long count;
  321. int ret;
  322. /*
  323. * Now that the big kernel lock is no longer preemptable,
  324. * and this is called with the BKL held, it will always
  325. * fail. If preemption is already disabled, simply
  326. * pass the test. When the BKL is removed, or becomes
  327. * preemptible again, we will once again test this,
  328. * so keep it in.
  329. */
  330. if (preempt_count()) {
  331. printk(KERN_CONT "can not test ... force ");
  332. return 0;
  333. }
  334. /* start the tracing */
  335. ret = tracer_init(trace, tr);
  336. if (ret) {
  337. warn_failed_init_tracer(trace, ret);
  338. return ret;
  339. }
  340. /* reset the max latency */
  341. tracing_max_latency = 0;
  342. /* disable preemption for a bit */
  343. preempt_disable();
  344. udelay(100);
  345. preempt_enable();
  346. /*
  347. * Stop the tracer to avoid a warning subsequent
  348. * to buffer flipping failure because tracing_stop()
  349. * disables the tr and max buffers, making flipping impossible
  350. * in case of parallels max preempt off latencies.
  351. */
  352. trace->stop(tr);
  353. /* stop the tracing. */
  354. tracing_stop();
  355. /* check both trace buffers */
  356. ret = trace_test_buffer(tr, NULL);
  357. if (!ret)
  358. ret = trace_test_buffer(&max_tr, &count);
  359. trace->reset(tr);
  360. tracing_start();
  361. if (!ret && !count) {
  362. printk(KERN_CONT ".. no entries found ..");
  363. ret = -1;
  364. }
  365. tracing_max_latency = save_max;
  366. return ret;
  367. }
  368. #endif /* CONFIG_PREEMPT_TRACER */
  369. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  370. int
  371. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  372. {
  373. unsigned long save_max = tracing_max_latency;
  374. unsigned long count;
  375. int ret;
  376. /*
  377. * Now that the big kernel lock is no longer preemptable,
  378. * and this is called with the BKL held, it will always
  379. * fail. If preemption is already disabled, simply
  380. * pass the test. When the BKL is removed, or becomes
  381. * preemptible again, we will once again test this,
  382. * so keep it in.
  383. */
  384. if (preempt_count()) {
  385. printk(KERN_CONT "can not test ... force ");
  386. return 0;
  387. }
  388. /* start the tracing */
  389. ret = tracer_init(trace, tr);
  390. if (ret) {
  391. warn_failed_init_tracer(trace, ret);
  392. goto out_no_start;
  393. }
  394. /* reset the max latency */
  395. tracing_max_latency = 0;
  396. /* disable preemption and interrupts for a bit */
  397. preempt_disable();
  398. local_irq_disable();
  399. udelay(100);
  400. preempt_enable();
  401. /* reverse the order of preempt vs irqs */
  402. local_irq_enable();
  403. /*
  404. * Stop the tracer to avoid a warning subsequent
  405. * to buffer flipping failure because tracing_stop()
  406. * disables the tr and max buffers, making flipping impossible
  407. * in case of parallels max irqs/preempt off latencies.
  408. */
  409. trace->stop(tr);
  410. /* stop the tracing. */
  411. tracing_stop();
  412. /* check both trace buffers */
  413. ret = trace_test_buffer(tr, NULL);
  414. if (ret)
  415. goto out;
  416. ret = trace_test_buffer(&max_tr, &count);
  417. if (ret)
  418. goto out;
  419. if (!ret && !count) {
  420. printk(KERN_CONT ".. no entries found ..");
  421. ret = -1;
  422. goto out;
  423. }
  424. /* do the test by disabling interrupts first this time */
  425. tracing_max_latency = 0;
  426. tracing_start();
  427. trace->start(tr);
  428. preempt_disable();
  429. local_irq_disable();
  430. udelay(100);
  431. preempt_enable();
  432. /* reverse the order of preempt vs irqs */
  433. local_irq_enable();
  434. trace->stop(tr);
  435. /* stop the tracing. */
  436. tracing_stop();
  437. /* check both trace buffers */
  438. ret = trace_test_buffer(tr, NULL);
  439. if (ret)
  440. goto out;
  441. ret = trace_test_buffer(&max_tr, &count);
  442. if (!ret && !count) {
  443. printk(KERN_CONT ".. no entries found ..");
  444. ret = -1;
  445. goto out;
  446. }
  447. out:
  448. tracing_start();
  449. out_no_start:
  450. trace->reset(tr);
  451. tracing_max_latency = save_max;
  452. return ret;
  453. }
  454. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  455. #ifdef CONFIG_NOP_TRACER
  456. int
  457. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  458. {
  459. /* What could possibly go wrong? */
  460. return 0;
  461. }
  462. #endif
  463. #ifdef CONFIG_SCHED_TRACER
  464. static int trace_wakeup_test_thread(void *data)
  465. {
  466. /* Make this a RT thread, doesn't need to be too high */
  467. struct sched_param param = { .sched_priority = 5 };
  468. struct completion *x = data;
  469. sched_setscheduler(current, SCHED_FIFO, &param);
  470. /* Make it know we have a new prio */
  471. complete(x);
  472. /* now go to sleep and let the test wake us up */
  473. set_current_state(TASK_INTERRUPTIBLE);
  474. schedule();
  475. /* we are awake, now wait to disappear */
  476. while (!kthread_should_stop()) {
  477. /*
  478. * This is an RT task, do short sleeps to let
  479. * others run.
  480. */
  481. msleep(100);
  482. }
  483. return 0;
  484. }
  485. int
  486. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  487. {
  488. unsigned long save_max = tracing_max_latency;
  489. struct task_struct *p;
  490. struct completion isrt;
  491. unsigned long count;
  492. int ret;
  493. init_completion(&isrt);
  494. /* create a high prio thread */
  495. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  496. if (IS_ERR(p)) {
  497. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  498. return -1;
  499. }
  500. /* make sure the thread is running at an RT prio */
  501. wait_for_completion(&isrt);
  502. /* start the tracing */
  503. ret = tracer_init(trace, tr);
  504. if (ret) {
  505. warn_failed_init_tracer(trace, ret);
  506. return ret;
  507. }
  508. /* reset the max latency */
  509. tracing_max_latency = 0;
  510. /* sleep to let the RT thread sleep too */
  511. msleep(100);
  512. /*
  513. * Yes this is slightly racy. It is possible that for some
  514. * strange reason that the RT thread we created, did not
  515. * call schedule for 100ms after doing the completion,
  516. * and we do a wakeup on a task that already is awake.
  517. * But that is extremely unlikely, and the worst thing that
  518. * happens in such a case, is that we disable tracing.
  519. * Honestly, if this race does happen something is horrible
  520. * wrong with the system.
  521. */
  522. wake_up_process(p);
  523. /* give a little time to let the thread wake up */
  524. msleep(100);
  525. /* stop the tracing. */
  526. tracing_stop();
  527. /* check both trace buffers */
  528. ret = trace_test_buffer(tr, NULL);
  529. if (!ret)
  530. ret = trace_test_buffer(&max_tr, &count);
  531. trace->reset(tr);
  532. tracing_start();
  533. tracing_max_latency = save_max;
  534. /* kill the thread */
  535. kthread_stop(p);
  536. if (!ret && !count) {
  537. printk(KERN_CONT ".. no entries found ..");
  538. ret = -1;
  539. }
  540. return ret;
  541. }
  542. #endif /* CONFIG_SCHED_TRACER */
  543. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  544. int
  545. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  546. {
  547. unsigned long count;
  548. int ret;
  549. /* start the tracing */
  550. ret = tracer_init(trace, tr);
  551. if (ret) {
  552. warn_failed_init_tracer(trace, ret);
  553. return ret;
  554. }
  555. /* Sleep for a 1/10 of a second */
  556. msleep(100);
  557. /* stop the tracing. */
  558. tracing_stop();
  559. /* check the trace buffer */
  560. ret = trace_test_buffer(tr, &count);
  561. trace->reset(tr);
  562. tracing_start();
  563. if (!ret && !count) {
  564. printk(KERN_CONT ".. no entries found ..");
  565. ret = -1;
  566. }
  567. return ret;
  568. }
  569. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  570. #ifdef CONFIG_SYSPROF_TRACER
  571. int
  572. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  573. {
  574. unsigned long count;
  575. int ret;
  576. /* start the tracing */
  577. ret = tracer_init(trace, tr);
  578. if (ret) {
  579. warn_failed_init_tracer(trace, ret);
  580. return ret;
  581. }
  582. /* Sleep for a 1/10 of a second */
  583. msleep(100);
  584. /* stop the tracing. */
  585. tracing_stop();
  586. /* check the trace buffer */
  587. ret = trace_test_buffer(tr, &count);
  588. trace->reset(tr);
  589. tracing_start();
  590. if (!ret && !count) {
  591. printk(KERN_CONT ".. no entries found ..");
  592. ret = -1;
  593. }
  594. return ret;
  595. }
  596. #endif /* CONFIG_SYSPROF_TRACER */
  597. #ifdef CONFIG_BRANCH_TRACER
  598. int
  599. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  600. {
  601. unsigned long count;
  602. int ret;
  603. /* start the tracing */
  604. ret = tracer_init(trace, tr);
  605. if (ret) {
  606. warn_failed_init_tracer(trace, ret);
  607. return ret;
  608. }
  609. /* Sleep for a 1/10 of a second */
  610. msleep(100);
  611. /* stop the tracing. */
  612. tracing_stop();
  613. /* check the trace buffer */
  614. ret = trace_test_buffer(tr, &count);
  615. trace->reset(tr);
  616. tracing_start();
  617. if (!ret && !count) {
  618. printk(KERN_CONT ".. no entries found ..");
  619. ret = -1;
  620. }
  621. return ret;
  622. }
  623. #endif /* CONFIG_BRANCH_TRACER */
  624. #ifdef CONFIG_HW_BRANCH_TRACER
  625. int
  626. trace_selftest_startup_hw_branches(struct tracer *trace,
  627. struct trace_array *tr)
  628. {
  629. struct trace_iterator *iter;
  630. struct tracer tracer;
  631. unsigned long count;
  632. int ret;
  633. if (!trace->open) {
  634. printk(KERN_CONT "missing open function...");
  635. return -1;
  636. }
  637. ret = tracer_init(trace, tr);
  638. if (ret) {
  639. warn_failed_init_tracer(trace, ret);
  640. return ret;
  641. }
  642. /*
  643. * The hw-branch tracer needs to collect the trace from the various
  644. * cpu trace buffers - before tracing is stopped.
  645. */
  646. iter = kzalloc(sizeof(*iter), GFP_KERNEL);
  647. if (!iter)
  648. return -ENOMEM;
  649. memcpy(&tracer, trace, sizeof(tracer));
  650. iter->trace = &tracer;
  651. iter->tr = tr;
  652. iter->pos = -1;
  653. mutex_init(&iter->mutex);
  654. trace->open(iter);
  655. mutex_destroy(&iter->mutex);
  656. kfree(iter);
  657. tracing_stop();
  658. ret = trace_test_buffer(tr, &count);
  659. trace->reset(tr);
  660. tracing_start();
  661. if (!ret && !count) {
  662. printk(KERN_CONT "no entries found..");
  663. ret = -1;
  664. }
  665. return ret;
  666. }
  667. #endif /* CONFIG_HW_BRANCH_TRACER */