trace_selftest.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_CONT:
  11. case TRACE_STACK:
  12. case TRACE_PRINT:
  13. case TRACE_SPECIAL:
  14. case TRACE_BRANCH:
  15. return 1;
  16. }
  17. return 0;
  18. }
  19. static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  20. {
  21. struct ring_buffer_event *event;
  22. struct trace_entry *entry;
  23. while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
  24. entry = ring_buffer_event_data(event);
  25. if (!trace_valid_entry(entry)) {
  26. printk(KERN_CONT ".. invalid entry %d ",
  27. entry->type);
  28. goto failed;
  29. }
  30. }
  31. return 0;
  32. failed:
  33. /* disable tracing */
  34. tracing_disabled = 1;
  35. printk(KERN_CONT ".. corrupted trace buffer .. ");
  36. return -1;
  37. }
  38. /*
  39. * Test the trace buffer to see if all the elements
  40. * are still sane.
  41. */
  42. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  43. {
  44. unsigned long flags, cnt = 0;
  45. int cpu, ret = 0;
  46. /* Don't allow flipping of max traces now */
  47. raw_local_irq_save(flags);
  48. __raw_spin_lock(&ftrace_max_lock);
  49. cnt = ring_buffer_entries(tr->buffer);
  50. for_each_possible_cpu(cpu) {
  51. ret = trace_test_buffer_cpu(tr, cpu);
  52. if (ret)
  53. break;
  54. }
  55. __raw_spin_unlock(&ftrace_max_lock);
  56. raw_local_irq_restore(flags);
  57. if (count)
  58. *count = cnt;
  59. return ret;
  60. }
  61. #ifdef CONFIG_FUNCTION_TRACER
  62. #ifdef CONFIG_DYNAMIC_FTRACE
  63. #define __STR(x) #x
  64. #define STR(x) __STR(x)
  65. /* Test dynamic code modification and ftrace filters */
  66. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  67. struct trace_array *tr,
  68. int (*func)(void))
  69. {
  70. int save_ftrace_enabled = ftrace_enabled;
  71. int save_tracer_enabled = tracer_enabled;
  72. unsigned long count;
  73. char *func_name;
  74. int ret;
  75. /* The ftrace test PASSED */
  76. printk(KERN_CONT "PASSED\n");
  77. pr_info("Testing dynamic ftrace: ");
  78. /* enable tracing, and record the filter function */
  79. ftrace_enabled = 1;
  80. tracer_enabled = 1;
  81. /* passed in by parameter to fool gcc from optimizing */
  82. func();
  83. /*
  84. * Some archs *cough*PowerPC*cough* add charachters to the
  85. * start of the function names. We simply put a '*' to
  86. * accomodate them.
  87. */
  88. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  89. /* filter only on our function */
  90. ftrace_set_filter(func_name, strlen(func_name), 1);
  91. /* enable tracing */
  92. trace->init(tr);
  93. /* Sleep for a 1/10 of a second */
  94. msleep(100);
  95. /* we should have nothing in the buffer */
  96. ret = trace_test_buffer(tr, &count);
  97. if (ret)
  98. goto out;
  99. if (count) {
  100. ret = -1;
  101. printk(KERN_CONT ".. filter did not filter .. ");
  102. goto out;
  103. }
  104. /* call our function again */
  105. func();
  106. /* sleep again */
  107. msleep(100);
  108. /* stop the tracing. */
  109. tracing_stop();
  110. ftrace_enabled = 0;
  111. /* check the trace buffer */
  112. ret = trace_test_buffer(tr, &count);
  113. trace->reset(tr);
  114. tracing_start();
  115. /* we should only have one item */
  116. if (!ret && count != 1) {
  117. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  118. ret = -1;
  119. goto out;
  120. }
  121. out:
  122. ftrace_enabled = save_ftrace_enabled;
  123. tracer_enabled = save_tracer_enabled;
  124. /* Enable tracing on all functions again */
  125. ftrace_set_filter(NULL, 0, 1);
  126. return ret;
  127. }
  128. #else
  129. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  130. #endif /* CONFIG_DYNAMIC_FTRACE */
  131. /*
  132. * Simple verification test of ftrace function tracer.
  133. * Enable ftrace, sleep 1/10 second, and then read the trace
  134. * buffer to see if all is in order.
  135. */
  136. int
  137. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  138. {
  139. int save_ftrace_enabled = ftrace_enabled;
  140. int save_tracer_enabled = tracer_enabled;
  141. unsigned long count;
  142. int ret;
  143. /* make sure msleep has been recorded */
  144. msleep(1);
  145. /* start the tracing */
  146. ftrace_enabled = 1;
  147. tracer_enabled = 1;
  148. trace->init(tr);
  149. /* Sleep for a 1/10 of a second */
  150. msleep(100);
  151. /* stop the tracing. */
  152. tracing_stop();
  153. ftrace_enabled = 0;
  154. /* check the trace buffer */
  155. ret = trace_test_buffer(tr, &count);
  156. trace->reset(tr);
  157. tracing_start();
  158. if (!ret && !count) {
  159. printk(KERN_CONT ".. no entries found ..");
  160. ret = -1;
  161. goto out;
  162. }
  163. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  164. DYN_FTRACE_TEST_NAME);
  165. out:
  166. ftrace_enabled = save_ftrace_enabled;
  167. tracer_enabled = save_tracer_enabled;
  168. /* kill ftrace totally if we failed */
  169. if (ret)
  170. ftrace_kill();
  171. return ret;
  172. }
  173. #endif /* CONFIG_FUNCTION_TRACER */
  174. #ifdef CONFIG_IRQSOFF_TRACER
  175. int
  176. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  177. {
  178. unsigned long save_max = tracing_max_latency;
  179. unsigned long count;
  180. int ret;
  181. /* start the tracing */
  182. trace->init(tr);
  183. /* reset the max latency */
  184. tracing_max_latency = 0;
  185. /* disable interrupts for a bit */
  186. local_irq_disable();
  187. udelay(100);
  188. local_irq_enable();
  189. /* stop the tracing. */
  190. tracing_stop();
  191. /* check both trace buffers */
  192. ret = trace_test_buffer(tr, NULL);
  193. if (!ret)
  194. ret = trace_test_buffer(&max_tr, &count);
  195. trace->reset(tr);
  196. tracing_start();
  197. if (!ret && !count) {
  198. printk(KERN_CONT ".. no entries found ..");
  199. ret = -1;
  200. }
  201. tracing_max_latency = save_max;
  202. return ret;
  203. }
  204. #endif /* CONFIG_IRQSOFF_TRACER */
  205. #ifdef CONFIG_PREEMPT_TRACER
  206. int
  207. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  208. {
  209. unsigned long save_max = tracing_max_latency;
  210. unsigned long count;
  211. int ret;
  212. /*
  213. * Now that the big kernel lock is no longer preemptable,
  214. * and this is called with the BKL held, it will always
  215. * fail. If preemption is already disabled, simply
  216. * pass the test. When the BKL is removed, or becomes
  217. * preemptible again, we will once again test this,
  218. * so keep it in.
  219. */
  220. if (preempt_count()) {
  221. printk(KERN_CONT "can not test ... force ");
  222. return 0;
  223. }
  224. /* start the tracing */
  225. trace->init(tr);
  226. /* reset the max latency */
  227. tracing_max_latency = 0;
  228. /* disable preemption for a bit */
  229. preempt_disable();
  230. udelay(100);
  231. preempt_enable();
  232. /* stop the tracing. */
  233. tracing_stop();
  234. /* check both trace buffers */
  235. ret = trace_test_buffer(tr, NULL);
  236. if (!ret)
  237. ret = trace_test_buffer(&max_tr, &count);
  238. trace->reset(tr);
  239. tracing_start();
  240. if (!ret && !count) {
  241. printk(KERN_CONT ".. no entries found ..");
  242. ret = -1;
  243. }
  244. tracing_max_latency = save_max;
  245. return ret;
  246. }
  247. #endif /* CONFIG_PREEMPT_TRACER */
  248. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  249. int
  250. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  251. {
  252. unsigned long save_max = tracing_max_latency;
  253. unsigned long count;
  254. int ret;
  255. /*
  256. * Now that the big kernel lock is no longer preemptable,
  257. * and this is called with the BKL held, it will always
  258. * fail. If preemption is already disabled, simply
  259. * pass the test. When the BKL is removed, or becomes
  260. * preemptible again, we will once again test this,
  261. * so keep it in.
  262. */
  263. if (preempt_count()) {
  264. printk(KERN_CONT "can not test ... force ");
  265. return 0;
  266. }
  267. /* start the tracing */
  268. trace->init(tr);
  269. /* reset the max latency */
  270. tracing_max_latency = 0;
  271. /* disable preemption and interrupts for a bit */
  272. preempt_disable();
  273. local_irq_disable();
  274. udelay(100);
  275. preempt_enable();
  276. /* reverse the order of preempt vs irqs */
  277. local_irq_enable();
  278. /* stop the tracing. */
  279. tracing_stop();
  280. /* check both trace buffers */
  281. ret = trace_test_buffer(tr, NULL);
  282. if (ret) {
  283. tracing_start();
  284. goto out;
  285. }
  286. ret = trace_test_buffer(&max_tr, &count);
  287. if (ret) {
  288. tracing_start();
  289. goto out;
  290. }
  291. if (!ret && !count) {
  292. printk(KERN_CONT ".. no entries found ..");
  293. ret = -1;
  294. tracing_start();
  295. goto out;
  296. }
  297. /* do the test by disabling interrupts first this time */
  298. tracing_max_latency = 0;
  299. tracing_start();
  300. preempt_disable();
  301. local_irq_disable();
  302. udelay(100);
  303. preempt_enable();
  304. /* reverse the order of preempt vs irqs */
  305. local_irq_enable();
  306. /* stop the tracing. */
  307. tracing_stop();
  308. /* check both trace buffers */
  309. ret = trace_test_buffer(tr, NULL);
  310. if (ret)
  311. goto out;
  312. ret = trace_test_buffer(&max_tr, &count);
  313. if (!ret && !count) {
  314. printk(KERN_CONT ".. no entries found ..");
  315. ret = -1;
  316. goto out;
  317. }
  318. out:
  319. trace->reset(tr);
  320. tracing_start();
  321. tracing_max_latency = save_max;
  322. return ret;
  323. }
  324. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  325. #ifdef CONFIG_NOP_TRACER
  326. int
  327. trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
  328. {
  329. /* What could possibly go wrong? */
  330. return 0;
  331. }
  332. #endif
  333. #ifdef CONFIG_SCHED_TRACER
  334. static int trace_wakeup_test_thread(void *data)
  335. {
  336. /* Make this a RT thread, doesn't need to be too high */
  337. struct sched_param param = { .sched_priority = 5 };
  338. struct completion *x = data;
  339. sched_setscheduler(current, SCHED_FIFO, &param);
  340. /* Make it know we have a new prio */
  341. complete(x);
  342. /* now go to sleep and let the test wake us up */
  343. set_current_state(TASK_INTERRUPTIBLE);
  344. schedule();
  345. /* we are awake, now wait to disappear */
  346. while (!kthread_should_stop()) {
  347. /*
  348. * This is an RT task, do short sleeps to let
  349. * others run.
  350. */
  351. msleep(100);
  352. }
  353. return 0;
  354. }
  355. int
  356. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  357. {
  358. unsigned long save_max = tracing_max_latency;
  359. struct task_struct *p;
  360. struct completion isrt;
  361. unsigned long count;
  362. int ret;
  363. init_completion(&isrt);
  364. /* create a high prio thread */
  365. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  366. if (IS_ERR(p)) {
  367. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  368. return -1;
  369. }
  370. /* make sure the thread is running at an RT prio */
  371. wait_for_completion(&isrt);
  372. /* start the tracing */
  373. trace->init(tr);
  374. /* reset the max latency */
  375. tracing_max_latency = 0;
  376. /* sleep to let the RT thread sleep too */
  377. msleep(100);
  378. /*
  379. * Yes this is slightly racy. It is possible that for some
  380. * strange reason that the RT thread we created, did not
  381. * call schedule for 100ms after doing the completion,
  382. * and we do a wakeup on a task that already is awake.
  383. * But that is extremely unlikely, and the worst thing that
  384. * happens in such a case, is that we disable tracing.
  385. * Honestly, if this race does happen something is horrible
  386. * wrong with the system.
  387. */
  388. wake_up_process(p);
  389. /* give a little time to let the thread wake up */
  390. msleep(100);
  391. /* stop the tracing. */
  392. tracing_stop();
  393. /* check both trace buffers */
  394. ret = trace_test_buffer(tr, NULL);
  395. if (!ret)
  396. ret = trace_test_buffer(&max_tr, &count);
  397. trace->reset(tr);
  398. tracing_start();
  399. tracing_max_latency = save_max;
  400. /* kill the thread */
  401. kthread_stop(p);
  402. if (!ret && !count) {
  403. printk(KERN_CONT ".. no entries found ..");
  404. ret = -1;
  405. }
  406. return ret;
  407. }
  408. #endif /* CONFIG_SCHED_TRACER */
  409. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  410. int
  411. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  412. {
  413. unsigned long count;
  414. int ret;
  415. /* start the tracing */
  416. trace->init(tr);
  417. /* Sleep for a 1/10 of a second */
  418. msleep(100);
  419. /* stop the tracing. */
  420. tracing_stop();
  421. /* check the trace buffer */
  422. ret = trace_test_buffer(tr, &count);
  423. trace->reset(tr);
  424. tracing_start();
  425. if (!ret && !count) {
  426. printk(KERN_CONT ".. no entries found ..");
  427. ret = -1;
  428. }
  429. return ret;
  430. }
  431. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  432. #ifdef CONFIG_SYSPROF_TRACER
  433. int
  434. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  435. {
  436. unsigned long count;
  437. int ret;
  438. /* start the tracing */
  439. trace->init(tr);
  440. /* Sleep for a 1/10 of a second */
  441. msleep(100);
  442. /* stop the tracing. */
  443. tracing_stop();
  444. /* check the trace buffer */
  445. ret = trace_test_buffer(tr, &count);
  446. trace->reset(tr);
  447. tracing_start();
  448. return ret;
  449. }
  450. #endif /* CONFIG_SYSPROF_TRACER */
  451. #ifdef CONFIG_BRANCH_TRACER
  452. int
  453. trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
  454. {
  455. unsigned long count;
  456. int ret;
  457. /* start the tracing */
  458. trace->init(tr);
  459. /* Sleep for a 1/10 of a second */
  460. msleep(100);
  461. /* stop the tracing. */
  462. tracing_stop();
  463. /* check the trace buffer */
  464. ret = trace_test_buffer(tr, &count);
  465. trace->reset(tr);
  466. tracing_start();
  467. return ret;
  468. }
  469. #endif /* CONFIG_BRANCH_TRACER */