trace_selftest.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_SPECIAL:
  12. return 1;
  13. }
  14. return 0;
  15. }
  16. static int
  17. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  18. {
  19. struct trace_entry *entries;
  20. struct page *page;
  21. int idx = 0;
  22. int i;
  23. BUG_ON(list_empty(&data->trace_pages));
  24. page = list_entry(data->trace_pages.next, struct page, lru);
  25. entries = page_address(page);
  26. check_pages(data);
  27. if (head_page(data) != entries)
  28. goto failed;
  29. /*
  30. * The starting trace buffer always has valid elements,
  31. * if any element exists.
  32. */
  33. entries = head_page(data);
  34. for (i = 0; i < tr->entries; i++) {
  35. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  36. printk(KERN_CONT ".. invalid entry %d ",
  37. entries[idx].type);
  38. goto failed;
  39. }
  40. idx++;
  41. if (idx >= ENTRIES_PER_PAGE) {
  42. page = virt_to_page(entries);
  43. if (page->lru.next == &data->trace_pages) {
  44. if (i != tr->entries - 1) {
  45. printk(KERN_CONT ".. entries buffer mismatch");
  46. goto failed;
  47. }
  48. } else {
  49. page = list_entry(page->lru.next, struct page, lru);
  50. entries = page_address(page);
  51. }
  52. idx = 0;
  53. }
  54. }
  55. page = virt_to_page(entries);
  56. if (page->lru.next != &data->trace_pages) {
  57. printk(KERN_CONT ".. too many entries");
  58. goto failed;
  59. }
  60. return 0;
  61. failed:
  62. /* disable tracing */
  63. tracing_disabled = 1;
  64. printk(KERN_CONT ".. corrupted trace buffer .. ");
  65. return -1;
  66. }
  67. /*
  68. * Test the trace buffer to see if all the elements
  69. * are still sane.
  70. */
  71. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  72. {
  73. unsigned long flags, cnt = 0;
  74. int cpu, ret = 0;
  75. /* Don't allow flipping of max traces now */
  76. raw_local_irq_save(flags);
  77. __raw_spin_lock(&ftrace_max_lock);
  78. for_each_possible_cpu(cpu) {
  79. if (!head_page(tr->data[cpu]))
  80. continue;
  81. cnt += tr->data[cpu]->trace_idx;
  82. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  83. if (ret)
  84. break;
  85. }
  86. __raw_spin_unlock(&ftrace_max_lock);
  87. raw_local_irq_restore(flags);
  88. if (count)
  89. *count = cnt;
  90. return ret;
  91. }
  92. #ifdef CONFIG_FTRACE
  93. #ifdef CONFIG_DYNAMIC_FTRACE
  94. #define __STR(x) #x
  95. #define STR(x) __STR(x)
  96. /* Test dynamic code modification and ftrace filters */
  97. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  98. struct trace_array *tr,
  99. int (*func)(void))
  100. {
  101. unsigned long count;
  102. int ret;
  103. int save_ftrace_enabled = ftrace_enabled;
  104. int save_tracer_enabled = tracer_enabled;
  105. char *func_name;
  106. /* The ftrace test PASSED */
  107. printk(KERN_CONT "PASSED\n");
  108. pr_info("Testing dynamic ftrace: ");
  109. /* enable tracing, and record the filter function */
  110. ftrace_enabled = 1;
  111. tracer_enabled = 1;
  112. /* passed in by parameter to fool gcc from optimizing */
  113. func();
  114. /* update the records */
  115. ret = ftrace_force_update();
  116. if (ret) {
  117. printk(KERN_CONT ".. ftraced failed .. ");
  118. return ret;
  119. }
  120. /*
  121. * Some archs *cough*PowerPC*cough* add charachters to the
  122. * start of the function names. We simply put a '*' to
  123. * accomodate them.
  124. */
  125. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  126. /* filter only on our function */
  127. ftrace_set_filter(func_name, strlen(func_name), 1);
  128. /* enable tracing */
  129. tr->ctrl = 1;
  130. trace->init(tr);
  131. /* Sleep for a 1/10 of a second */
  132. msleep(100);
  133. /* we should have nothing in the buffer */
  134. ret = trace_test_buffer(tr, &count);
  135. if (ret)
  136. goto out;
  137. if (count) {
  138. ret = -1;
  139. printk(KERN_CONT ".. filter did not filter .. ");
  140. goto out;
  141. }
  142. /* call our function again */
  143. func();
  144. /* sleep again */
  145. msleep(100);
  146. /* stop the tracing. */
  147. tr->ctrl = 0;
  148. trace->ctrl_update(tr);
  149. ftrace_enabled = 0;
  150. /* check the trace buffer */
  151. ret = trace_test_buffer(tr, &count);
  152. trace->reset(tr);
  153. /* we should only have one item */
  154. if (!ret && count != 1) {
  155. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  156. ret = -1;
  157. goto out;
  158. }
  159. out:
  160. ftrace_enabled = save_ftrace_enabled;
  161. tracer_enabled = save_tracer_enabled;
  162. /* Enable tracing on all functions again */
  163. ftrace_set_filter(NULL, 0, 1);
  164. return ret;
  165. }
  166. #else
  167. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  168. #endif /* CONFIG_DYNAMIC_FTRACE */
  169. /*
  170. * Simple verification test of ftrace function tracer.
  171. * Enable ftrace, sleep 1/10 second, and then read the trace
  172. * buffer to see if all is in order.
  173. */
  174. int
  175. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  176. {
  177. unsigned long count;
  178. int ret;
  179. int save_ftrace_enabled = ftrace_enabled;
  180. int save_tracer_enabled = tracer_enabled;
  181. /* make sure msleep has been recorded */
  182. msleep(1);
  183. /* force the recorded functions to be traced */
  184. ret = ftrace_force_update();
  185. if (ret) {
  186. printk(KERN_CONT ".. ftraced failed .. ");
  187. return ret;
  188. }
  189. /* start the tracing */
  190. ftrace_enabled = 1;
  191. tracer_enabled = 1;
  192. tr->ctrl = 1;
  193. trace->init(tr);
  194. /* Sleep for a 1/10 of a second */
  195. msleep(100);
  196. /* stop the tracing. */
  197. tr->ctrl = 0;
  198. trace->ctrl_update(tr);
  199. ftrace_enabled = 0;
  200. /* check the trace buffer */
  201. ret = trace_test_buffer(tr, &count);
  202. trace->reset(tr);
  203. if (!ret && !count) {
  204. printk(KERN_CONT ".. no entries found ..");
  205. ret = -1;
  206. goto out;
  207. }
  208. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  209. DYN_FTRACE_TEST_NAME);
  210. out:
  211. ftrace_enabled = save_ftrace_enabled;
  212. tracer_enabled = save_tracer_enabled;
  213. /* kill ftrace totally if we failed */
  214. if (ret)
  215. ftrace_kill();
  216. return ret;
  217. }
  218. #endif /* CONFIG_FTRACE */
  219. #ifdef CONFIG_IRQSOFF_TRACER
  220. int
  221. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  222. {
  223. unsigned long save_max = tracing_max_latency;
  224. unsigned long count;
  225. int ret;
  226. /* start the tracing */
  227. tr->ctrl = 1;
  228. trace->init(tr);
  229. /* reset the max latency */
  230. tracing_max_latency = 0;
  231. /* disable interrupts for a bit */
  232. local_irq_disable();
  233. udelay(100);
  234. local_irq_enable();
  235. /* stop the tracing. */
  236. tr->ctrl = 0;
  237. trace->ctrl_update(tr);
  238. /* check both trace buffers */
  239. ret = trace_test_buffer(tr, NULL);
  240. if (!ret)
  241. ret = trace_test_buffer(&max_tr, &count);
  242. trace->reset(tr);
  243. if (!ret && !count) {
  244. printk(KERN_CONT ".. no entries found ..");
  245. ret = -1;
  246. }
  247. tracing_max_latency = save_max;
  248. return ret;
  249. }
  250. #endif /* CONFIG_IRQSOFF_TRACER */
  251. #ifdef CONFIG_PREEMPT_TRACER
  252. int
  253. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  254. {
  255. unsigned long save_max = tracing_max_latency;
  256. unsigned long count;
  257. int ret;
  258. /* start the tracing */
  259. tr->ctrl = 1;
  260. trace->init(tr);
  261. /* reset the max latency */
  262. tracing_max_latency = 0;
  263. /* disable preemption for a bit */
  264. preempt_disable();
  265. udelay(100);
  266. preempt_enable();
  267. /* stop the tracing. */
  268. tr->ctrl = 0;
  269. trace->ctrl_update(tr);
  270. /* check both trace buffers */
  271. ret = trace_test_buffer(tr, NULL);
  272. if (!ret)
  273. ret = trace_test_buffer(&max_tr, &count);
  274. trace->reset(tr);
  275. if (!ret && !count) {
  276. printk(KERN_CONT ".. no entries found ..");
  277. ret = -1;
  278. }
  279. tracing_max_latency = save_max;
  280. return ret;
  281. }
  282. #endif /* CONFIG_PREEMPT_TRACER */
  283. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  284. int
  285. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  286. {
  287. unsigned long save_max = tracing_max_latency;
  288. unsigned long count;
  289. int ret;
  290. /* start the tracing */
  291. tr->ctrl = 1;
  292. trace->init(tr);
  293. /* reset the max latency */
  294. tracing_max_latency = 0;
  295. /* disable preemption and interrupts for a bit */
  296. preempt_disable();
  297. local_irq_disable();
  298. udelay(100);
  299. preempt_enable();
  300. /* reverse the order of preempt vs irqs */
  301. local_irq_enable();
  302. /* stop the tracing. */
  303. tr->ctrl = 0;
  304. trace->ctrl_update(tr);
  305. /* check both trace buffers */
  306. ret = trace_test_buffer(tr, NULL);
  307. if (ret)
  308. goto out;
  309. ret = trace_test_buffer(&max_tr, &count);
  310. if (ret)
  311. goto out;
  312. if (!ret && !count) {
  313. printk(KERN_CONT ".. no entries found ..");
  314. ret = -1;
  315. goto out;
  316. }
  317. /* do the test by disabling interrupts first this time */
  318. tracing_max_latency = 0;
  319. tr->ctrl = 1;
  320. trace->ctrl_update(tr);
  321. preempt_disable();
  322. local_irq_disable();
  323. udelay(100);
  324. preempt_enable();
  325. /* reverse the order of preempt vs irqs */
  326. local_irq_enable();
  327. /* stop the tracing. */
  328. tr->ctrl = 0;
  329. trace->ctrl_update(tr);
  330. /* check both trace buffers */
  331. ret = trace_test_buffer(tr, NULL);
  332. if (ret)
  333. goto out;
  334. ret = trace_test_buffer(&max_tr, &count);
  335. if (!ret && !count) {
  336. printk(KERN_CONT ".. no entries found ..");
  337. ret = -1;
  338. goto out;
  339. }
  340. out:
  341. trace->reset(tr);
  342. tracing_max_latency = save_max;
  343. return ret;
  344. }
  345. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  346. #ifdef CONFIG_SCHED_TRACER
  347. static int trace_wakeup_test_thread(void *data)
  348. {
  349. /* Make this a RT thread, doesn't need to be too high */
  350. struct sched_param param = { .sched_priority = 5 };
  351. struct completion *x = data;
  352. sched_setscheduler(current, SCHED_FIFO, &param);
  353. /* Make it know we have a new prio */
  354. complete(x);
  355. /* now go to sleep and let the test wake us up */
  356. set_current_state(TASK_INTERRUPTIBLE);
  357. schedule();
  358. /* we are awake, now wait to disappear */
  359. while (!kthread_should_stop()) {
  360. /*
  361. * This is an RT task, do short sleeps to let
  362. * others run.
  363. */
  364. msleep(100);
  365. }
  366. return 0;
  367. }
  368. int
  369. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  370. {
  371. unsigned long save_max = tracing_max_latency;
  372. struct task_struct *p;
  373. struct completion isrt;
  374. unsigned long count;
  375. int ret;
  376. init_completion(&isrt);
  377. /* create a high prio thread */
  378. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  379. if (IS_ERR(p)) {
  380. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  381. return -1;
  382. }
  383. /* make sure the thread is running at an RT prio */
  384. wait_for_completion(&isrt);
  385. /* start the tracing */
  386. tr->ctrl = 1;
  387. trace->init(tr);
  388. /* reset the max latency */
  389. tracing_max_latency = 0;
  390. /* sleep to let the RT thread sleep too */
  391. msleep(100);
  392. /*
  393. * Yes this is slightly racy. It is possible that for some
  394. * strange reason that the RT thread we created, did not
  395. * call schedule for 100ms after doing the completion,
  396. * and we do a wakeup on a task that already is awake.
  397. * But that is extremely unlikely, and the worst thing that
  398. * happens in such a case, is that we disable tracing.
  399. * Honestly, if this race does happen something is horrible
  400. * wrong with the system.
  401. */
  402. wake_up_process(p);
  403. /* stop the tracing. */
  404. tr->ctrl = 0;
  405. trace->ctrl_update(tr);
  406. /* check both trace buffers */
  407. ret = trace_test_buffer(tr, NULL);
  408. if (!ret)
  409. ret = trace_test_buffer(&max_tr, &count);
  410. trace->reset(tr);
  411. tracing_max_latency = save_max;
  412. /* kill the thread */
  413. kthread_stop(p);
  414. if (!ret && !count) {
  415. printk(KERN_CONT ".. no entries found ..");
  416. ret = -1;
  417. }
  418. return ret;
  419. }
  420. #endif /* CONFIG_SCHED_TRACER */
  421. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  422. int
  423. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  424. {
  425. unsigned long count;
  426. int ret;
  427. /* start the tracing */
  428. tr->ctrl = 1;
  429. trace->init(tr);
  430. /* Sleep for a 1/10 of a second */
  431. msleep(100);
  432. /* stop the tracing. */
  433. tr->ctrl = 0;
  434. trace->ctrl_update(tr);
  435. /* check the trace buffer */
  436. ret = trace_test_buffer(tr, &count);
  437. trace->reset(tr);
  438. if (!ret && !count) {
  439. printk(KERN_CONT ".. no entries found ..");
  440. ret = -1;
  441. }
  442. return ret;
  443. }
  444. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  445. #ifdef CONFIG_SYSPROF_TRACER
  446. int
  447. trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
  448. {
  449. unsigned long count;
  450. int ret;
  451. /* start the tracing */
  452. tr->ctrl = 1;
  453. trace->init(tr);
  454. /* Sleep for a 1/10 of a second */
  455. msleep(100);
  456. /* stop the tracing. */
  457. tr->ctrl = 0;
  458. trace->ctrl_update(tr);
  459. /* check the trace buffer */
  460. ret = trace_test_buffer(tr, &count);
  461. trace->reset(tr);
  462. return ret;
  463. }
  464. #endif /* CONFIG_SYSPROF_TRACER */