trace_selftest.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /* Include in trace.c */
  2. #include <linux/kthread.h>
  3. #include <linux/delay.h>
  4. static inline int trace_valid_entry(struct trace_entry *entry)
  5. {
  6. switch (entry->type) {
  7. case TRACE_FN:
  8. case TRACE_CTX:
  9. case TRACE_WAKE:
  10. case TRACE_STACK:
  11. case TRACE_SPECIAL:
  12. return 1;
  13. }
  14. return 0;
  15. }
  16. static int
  17. trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
  18. {
  19. struct trace_entry *entries;
  20. struct page *page;
  21. int idx = 0;
  22. int i;
  23. BUG_ON(list_empty(&data->trace_pages));
  24. page = list_entry(data->trace_pages.next, struct page, lru);
  25. entries = page_address(page);
  26. if (head_page(data) != entries)
  27. goto failed;
  28. /*
  29. * The starting trace buffer always has valid elements,
  30. * if any element exists.
  31. */
  32. entries = head_page(data);
  33. for (i = 0; i < tr->entries; i++) {
  34. if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
  35. printk(KERN_CONT ".. invalid entry %d ",
  36. entries[idx].type);
  37. goto failed;
  38. }
  39. idx++;
  40. if (idx >= ENTRIES_PER_PAGE) {
  41. page = virt_to_page(entries);
  42. if (page->lru.next == &data->trace_pages) {
  43. if (i != tr->entries - 1) {
  44. printk(KERN_CONT ".. entries buffer mismatch");
  45. goto failed;
  46. }
  47. } else {
  48. page = list_entry(page->lru.next, struct page, lru);
  49. entries = page_address(page);
  50. }
  51. idx = 0;
  52. }
  53. }
  54. page = virt_to_page(entries);
  55. if (page->lru.next != &data->trace_pages) {
  56. printk(KERN_CONT ".. too many entries");
  57. goto failed;
  58. }
  59. return 0;
  60. failed:
  61. /* disable tracing */
  62. tracing_disabled = 1;
  63. printk(KERN_CONT ".. corrupted trace buffer .. ");
  64. return -1;
  65. }
  66. /*
  67. * Test the trace buffer to see if all the elements
  68. * are still sane.
  69. */
  70. static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  71. {
  72. unsigned long flags, cnt = 0;
  73. int cpu, ret = 0;
  74. /* Don't allow flipping of max traces now */
  75. raw_local_irq_save(flags);
  76. __raw_spin_lock(&ftrace_max_lock);
  77. for_each_possible_cpu(cpu) {
  78. if (!head_page(tr->data[cpu]))
  79. continue;
  80. cnt += tr->data[cpu]->trace_idx;
  81. ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
  82. if (ret)
  83. break;
  84. }
  85. __raw_spin_unlock(&ftrace_max_lock);
  86. raw_local_irq_restore(flags);
  87. if (count)
  88. *count = cnt;
  89. return ret;
  90. }
  91. #ifdef CONFIG_FTRACE
  92. #ifdef CONFIG_DYNAMIC_FTRACE
  93. #define __STR(x) #x
  94. #define STR(x) __STR(x)
  95. /* Test dynamic code modification and ftrace filters */
  96. int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
  97. struct trace_array *tr,
  98. int (*func)(void))
  99. {
  100. unsigned long count;
  101. int ret;
  102. int save_ftrace_enabled = ftrace_enabled;
  103. int save_tracer_enabled = tracer_enabled;
  104. char *func_name;
  105. /* The ftrace test PASSED */
  106. printk(KERN_CONT "PASSED\n");
  107. pr_info("Testing dynamic ftrace: ");
  108. /* enable tracing, and record the filter function */
  109. ftrace_enabled = 1;
  110. tracer_enabled = 1;
  111. /* passed in by parameter to fool gcc from optimizing */
  112. func();
  113. /* update the records */
  114. ret = ftrace_force_update();
  115. if (ret) {
  116. printk(KERN_CONT ".. ftraced failed .. ");
  117. return ret;
  118. }
  119. /*
  120. * Some archs *cough*PowerPC*cough* add charachters to the
  121. * start of the function names. We simply put a '*' to
  122. * accomodate them.
  123. */
  124. func_name = "*" STR(DYN_FTRACE_TEST_NAME);
  125. /* filter only on our function */
  126. ftrace_set_filter(func_name, strlen(func_name), 1);
  127. /* enable tracing */
  128. tr->ctrl = 1;
  129. trace->init(tr);
  130. /* Sleep for a 1/10 of a second */
  131. msleep(100);
  132. /* we should have nothing in the buffer */
  133. ret = trace_test_buffer(tr, &count);
  134. if (ret)
  135. goto out;
  136. if (count) {
  137. ret = -1;
  138. printk(KERN_CONT ".. filter did not filter .. ");
  139. goto out;
  140. }
  141. /* call our function again */
  142. func();
  143. /* sleep again */
  144. msleep(100);
  145. /* stop the tracing. */
  146. tr->ctrl = 0;
  147. trace->ctrl_update(tr);
  148. ftrace_enabled = 0;
  149. /* check the trace buffer */
  150. ret = trace_test_buffer(tr, &count);
  151. trace->reset(tr);
  152. /* we should only have one item */
  153. if (!ret && count != 1) {
  154. printk(KERN_CONT ".. filter failed count=%ld ..", count);
  155. ret = -1;
  156. goto out;
  157. }
  158. out:
  159. ftrace_enabled = save_ftrace_enabled;
  160. tracer_enabled = save_tracer_enabled;
  161. /* Enable tracing on all functions again */
  162. ftrace_set_filter(NULL, 0, 1);
  163. return ret;
  164. }
  165. #else
  166. # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
  167. #endif /* CONFIG_DYNAMIC_FTRACE */
  168. /*
  169. * Simple verification test of ftrace function tracer.
  170. * Enable ftrace, sleep 1/10 second, and then read the trace
  171. * buffer to see if all is in order.
  172. */
  173. int
  174. trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
  175. {
  176. unsigned long count;
  177. int ret;
  178. int save_ftrace_enabled = ftrace_enabled;
  179. int save_tracer_enabled = tracer_enabled;
  180. /* make sure msleep has been recorded */
  181. msleep(1);
  182. /* force the recorded functions to be traced */
  183. ret = ftrace_force_update();
  184. if (ret) {
  185. printk(KERN_CONT ".. ftraced failed .. ");
  186. return ret;
  187. }
  188. /* start the tracing */
  189. ftrace_enabled = 1;
  190. tracer_enabled = 1;
  191. tr->ctrl = 1;
  192. trace->init(tr);
  193. /* Sleep for a 1/10 of a second */
  194. msleep(100);
  195. /* stop the tracing. */
  196. tr->ctrl = 0;
  197. trace->ctrl_update(tr);
  198. ftrace_enabled = 0;
  199. /* check the trace buffer */
  200. ret = trace_test_buffer(tr, &count);
  201. trace->reset(tr);
  202. if (!ret && !count) {
  203. printk(KERN_CONT ".. no entries found ..");
  204. ret = -1;
  205. goto out;
  206. }
  207. ret = trace_selftest_startup_dynamic_tracing(trace, tr,
  208. DYN_FTRACE_TEST_NAME);
  209. out:
  210. ftrace_enabled = save_ftrace_enabled;
  211. tracer_enabled = save_tracer_enabled;
  212. /* kill ftrace totally if we failed */
  213. if (ret)
  214. ftrace_kill();
  215. return ret;
  216. }
  217. #endif /* CONFIG_FTRACE */
  218. #ifdef CONFIG_IRQSOFF_TRACER
  219. int
  220. trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
  221. {
  222. unsigned long save_max = tracing_max_latency;
  223. unsigned long count;
  224. int ret;
  225. /* start the tracing */
  226. tr->ctrl = 1;
  227. trace->init(tr);
  228. /* reset the max latency */
  229. tracing_max_latency = 0;
  230. /* disable interrupts for a bit */
  231. local_irq_disable();
  232. udelay(100);
  233. local_irq_enable();
  234. /* stop the tracing. */
  235. tr->ctrl = 0;
  236. trace->ctrl_update(tr);
  237. /* check both trace buffers */
  238. ret = trace_test_buffer(tr, NULL);
  239. if (!ret)
  240. ret = trace_test_buffer(&max_tr, &count);
  241. trace->reset(tr);
  242. if (!ret && !count) {
  243. printk(KERN_CONT ".. no entries found ..");
  244. ret = -1;
  245. }
  246. tracing_max_latency = save_max;
  247. return ret;
  248. }
  249. #endif /* CONFIG_IRQSOFF_TRACER */
  250. #ifdef CONFIG_PREEMPT_TRACER
  251. int
  252. trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
  253. {
  254. unsigned long save_max = tracing_max_latency;
  255. unsigned long count;
  256. int ret;
  257. /* start the tracing */
  258. tr->ctrl = 1;
  259. trace->init(tr);
  260. /* reset the max latency */
  261. tracing_max_latency = 0;
  262. /* disable preemption for a bit */
  263. preempt_disable();
  264. udelay(100);
  265. preempt_enable();
  266. /* stop the tracing. */
  267. tr->ctrl = 0;
  268. trace->ctrl_update(tr);
  269. /* check both trace buffers */
  270. ret = trace_test_buffer(tr, NULL);
  271. if (!ret)
  272. ret = trace_test_buffer(&max_tr, &count);
  273. trace->reset(tr);
  274. if (!ret && !count) {
  275. printk(KERN_CONT ".. no entries found ..");
  276. ret = -1;
  277. }
  278. tracing_max_latency = save_max;
  279. return ret;
  280. }
  281. #endif /* CONFIG_PREEMPT_TRACER */
  282. #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
  283. int
  284. trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
  285. {
  286. unsigned long save_max = tracing_max_latency;
  287. unsigned long count;
  288. int ret;
  289. /* start the tracing */
  290. tr->ctrl = 1;
  291. trace->init(tr);
  292. /* reset the max latency */
  293. tracing_max_latency = 0;
  294. /* disable preemption and interrupts for a bit */
  295. preempt_disable();
  296. local_irq_disable();
  297. udelay(100);
  298. preempt_enable();
  299. /* reverse the order of preempt vs irqs */
  300. local_irq_enable();
  301. /* stop the tracing. */
  302. tr->ctrl = 0;
  303. trace->ctrl_update(tr);
  304. /* check both trace buffers */
  305. ret = trace_test_buffer(tr, NULL);
  306. if (ret)
  307. goto out;
  308. ret = trace_test_buffer(&max_tr, &count);
  309. if (ret)
  310. goto out;
  311. if (!ret && !count) {
  312. printk(KERN_CONT ".. no entries found ..");
  313. ret = -1;
  314. goto out;
  315. }
  316. /* do the test by disabling interrupts first this time */
  317. tracing_max_latency = 0;
  318. tr->ctrl = 1;
  319. trace->ctrl_update(tr);
  320. preempt_disable();
  321. local_irq_disable();
  322. udelay(100);
  323. preempt_enable();
  324. /* reverse the order of preempt vs irqs */
  325. local_irq_enable();
  326. /* stop the tracing. */
  327. tr->ctrl = 0;
  328. trace->ctrl_update(tr);
  329. /* check both trace buffers */
  330. ret = trace_test_buffer(tr, NULL);
  331. if (ret)
  332. goto out;
  333. ret = trace_test_buffer(&max_tr, &count);
  334. if (!ret && !count) {
  335. printk(KERN_CONT ".. no entries found ..");
  336. ret = -1;
  337. goto out;
  338. }
  339. out:
  340. trace->reset(tr);
  341. tracing_max_latency = save_max;
  342. return ret;
  343. }
  344. #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
  345. #ifdef CONFIG_SCHED_TRACER
  346. static int trace_wakeup_test_thread(void *data)
  347. {
  348. /* Make this a RT thread, doesn't need to be too high */
  349. struct sched_param param = { .sched_priority = 5 };
  350. struct completion *x = data;
  351. sched_setscheduler(current, SCHED_FIFO, &param);
  352. /* Make it know we have a new prio */
  353. complete(x);
  354. /* now go to sleep and let the test wake us up */
  355. set_current_state(TASK_INTERRUPTIBLE);
  356. schedule();
  357. /* we are awake, now wait to disappear */
  358. while (!kthread_should_stop()) {
  359. /*
  360. * This is an RT task, do short sleeps to let
  361. * others run.
  362. */
  363. msleep(100);
  364. }
  365. return 0;
  366. }
  367. int
  368. trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
  369. {
  370. unsigned long save_max = tracing_max_latency;
  371. struct task_struct *p;
  372. struct completion isrt;
  373. unsigned long count;
  374. int ret;
  375. init_completion(&isrt);
  376. /* create a high prio thread */
  377. p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
  378. if (IS_ERR(p)) {
  379. printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
  380. return -1;
  381. }
  382. /* make sure the thread is running at an RT prio */
  383. wait_for_completion(&isrt);
  384. /* start the tracing */
  385. tr->ctrl = 1;
  386. trace->init(tr);
  387. /* reset the max latency */
  388. tracing_max_latency = 0;
  389. /* sleep to let the RT thread sleep too */
  390. msleep(100);
  391. /*
  392. * Yes this is slightly racy. It is possible that for some
  393. * strange reason that the RT thread we created, did not
  394. * call schedule for 100ms after doing the completion,
  395. * and we do a wakeup on a task that already is awake.
  396. * But that is extremely unlikely, and the worst thing that
  397. * happens in such a case, is that we disable tracing.
  398. * Honestly, if this race does happen something is horrible
  399. * wrong with the system.
  400. */
  401. wake_up_process(p);
  402. /* stop the tracing. */
  403. tr->ctrl = 0;
  404. trace->ctrl_update(tr);
  405. /* check both trace buffers */
  406. ret = trace_test_buffer(tr, NULL);
  407. if (!ret)
  408. ret = trace_test_buffer(&max_tr, &count);
  409. trace->reset(tr);
  410. tracing_max_latency = save_max;
  411. /* kill the thread */
  412. kthread_stop(p);
  413. if (!ret && !count) {
  414. printk(KERN_CONT ".. no entries found ..");
  415. ret = -1;
  416. }
  417. return ret;
  418. }
  419. #endif /* CONFIG_SCHED_TRACER */
  420. #ifdef CONFIG_CONTEXT_SWITCH_TRACER
  421. int
  422. trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
  423. {
  424. unsigned long count;
  425. int ret;
  426. /* start the tracing */
  427. tr->ctrl = 1;
  428. trace->init(tr);
  429. /* Sleep for a 1/10 of a second */
  430. msleep(100);
  431. /* stop the tracing. */
  432. tr->ctrl = 0;
  433. trace->ctrl_update(tr);
  434. /* check the trace buffer */
  435. ret = trace_test_buffer(tr, &count);
  436. trace->reset(tr);
  437. if (!ret && !count) {
  438. printk(KERN_CONT ".. no entries found ..");
  439. ret = -1;
  440. }
  441. return ret;
  442. }
  443. #endif /* CONFIG_CONTEXT_SWITCH_TRACER */