trace_events.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/module.h>
  10. #include <linux/ctype.h>
  11. #include "trace.h"
  12. #define TRACE_SYSTEM "TRACE_SYSTEM"
  13. static DEFINE_MUTEX(event_mutex);
  14. #define events_for_each(event) \
  15. for (event = __start_ftrace_events; \
  16. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  17. event++)
  18. void event_trace_printk(unsigned long ip, const char *fmt, ...)
  19. {
  20. va_list ap;
  21. va_start(ap, fmt);
  22. tracing_record_cmdline(current);
  23. trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  24. va_end(ap);
  25. }
  26. static void ftrace_clear_events(void)
  27. {
  28. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  29. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  30. if (call->enabled) {
  31. call->enabled = 0;
  32. call->unregfunc();
  33. }
  34. call++;
  35. }
  36. }
  37. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  38. int enable)
  39. {
  40. switch (enable) {
  41. case 0:
  42. if (call->enabled) {
  43. call->enabled = 0;
  44. call->unregfunc();
  45. }
  46. if (call->raw_enabled) {
  47. call->raw_enabled = 0;
  48. call->raw_unreg();
  49. }
  50. break;
  51. case 1:
  52. if (!call->enabled &&
  53. (call->type & TRACE_EVENT_TYPE_PRINTF)) {
  54. call->enabled = 1;
  55. call->regfunc();
  56. }
  57. if (!call->raw_enabled &&
  58. (call->type & TRACE_EVENT_TYPE_RAW)) {
  59. call->raw_enabled = 1;
  60. call->raw_reg();
  61. }
  62. break;
  63. }
  64. }
  65. static int ftrace_set_clr_event(char *buf, int set)
  66. {
  67. struct ftrace_event_call *call = __start_ftrace_events;
  68. char *event = NULL, *sub = NULL, *match;
  69. int ret = -EINVAL;
  70. /*
  71. * The buf format can be <subsystem>:<event-name>
  72. * *:<event-name> means any event by that name.
  73. * :<event-name> is the same.
  74. *
  75. * <subsystem>:* means all events in that subsystem
  76. * <subsystem>: means the same.
  77. *
  78. * <name> (no ':') means all events in a subsystem with
  79. * the name <name> or any event that matches <name>
  80. */
  81. match = strsep(&buf, ":");
  82. if (buf) {
  83. sub = match;
  84. event = buf;
  85. match = NULL;
  86. if (!strlen(sub) || strcmp(sub, "*") == 0)
  87. sub = NULL;
  88. if (!strlen(event) || strcmp(event, "*") == 0)
  89. event = NULL;
  90. }
  91. mutex_lock(&event_mutex);
  92. events_for_each(call) {
  93. if (!call->name)
  94. continue;
  95. if (match &&
  96. strcmp(match, call->name) != 0 &&
  97. strcmp(match, call->system) != 0)
  98. continue;
  99. if (sub && strcmp(sub, call->system) != 0)
  100. continue;
  101. if (event && strcmp(event, call->name) != 0)
  102. continue;
  103. ftrace_event_enable_disable(call, set);
  104. ret = 0;
  105. }
  106. mutex_unlock(&event_mutex);
  107. return ret;
  108. }
  109. /* 128 should be much more than enough */
  110. #define EVENT_BUF_SIZE 127
  111. static ssize_t
  112. ftrace_event_write(struct file *file, const char __user *ubuf,
  113. size_t cnt, loff_t *ppos)
  114. {
  115. size_t read = 0;
  116. int i, set = 1;
  117. ssize_t ret;
  118. char *buf;
  119. char ch;
  120. if (!cnt || cnt < 0)
  121. return 0;
  122. ret = get_user(ch, ubuf++);
  123. if (ret)
  124. return ret;
  125. read++;
  126. cnt--;
  127. /* skip white space */
  128. while (cnt && isspace(ch)) {
  129. ret = get_user(ch, ubuf++);
  130. if (ret)
  131. return ret;
  132. read++;
  133. cnt--;
  134. }
  135. /* Only white space found? */
  136. if (isspace(ch)) {
  137. file->f_pos += read;
  138. ret = read;
  139. return ret;
  140. }
  141. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  142. if (!buf)
  143. return -ENOMEM;
  144. if (cnt > EVENT_BUF_SIZE)
  145. cnt = EVENT_BUF_SIZE;
  146. i = 0;
  147. while (cnt && !isspace(ch)) {
  148. if (!i && ch == '!')
  149. set = 0;
  150. else
  151. buf[i++] = ch;
  152. ret = get_user(ch, ubuf++);
  153. if (ret)
  154. goto out_free;
  155. read++;
  156. cnt--;
  157. }
  158. buf[i] = 0;
  159. file->f_pos += read;
  160. ret = ftrace_set_clr_event(buf, set);
  161. if (ret)
  162. goto out_free;
  163. ret = read;
  164. out_free:
  165. kfree(buf);
  166. return ret;
  167. }
  168. static void *
  169. t_next(struct seq_file *m, void *v, loff_t *pos)
  170. {
  171. struct ftrace_event_call *call = m->private;
  172. struct ftrace_event_call *next = call;
  173. (*pos)++;
  174. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  175. return NULL;
  176. m->private = ++next;
  177. return call;
  178. }
  179. static void *t_start(struct seq_file *m, loff_t *pos)
  180. {
  181. return t_next(m, NULL, pos);
  182. }
  183. static void *
  184. s_next(struct seq_file *m, void *v, loff_t *pos)
  185. {
  186. struct ftrace_event_call *call = m->private;
  187. struct ftrace_event_call *next;
  188. (*pos)++;
  189. retry:
  190. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  191. return NULL;
  192. if (!call->enabled) {
  193. call++;
  194. goto retry;
  195. }
  196. next = call;
  197. m->private = ++next;
  198. return call;
  199. }
  200. static void *s_start(struct seq_file *m, loff_t *pos)
  201. {
  202. return s_next(m, NULL, pos);
  203. }
  204. static int t_show(struct seq_file *m, void *v)
  205. {
  206. struct ftrace_event_call *call = v;
  207. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  208. seq_printf(m, "%s:", call->system);
  209. seq_printf(m, "%s\n", call->name);
  210. return 0;
  211. }
  212. static void t_stop(struct seq_file *m, void *p)
  213. {
  214. }
  215. static int
  216. ftrace_event_seq_open(struct inode *inode, struct file *file)
  217. {
  218. int ret;
  219. const struct seq_operations *seq_ops;
  220. if ((file->f_mode & FMODE_WRITE) &&
  221. !(file->f_flags & O_APPEND))
  222. ftrace_clear_events();
  223. seq_ops = inode->i_private;
  224. ret = seq_open(file, seq_ops);
  225. if (!ret) {
  226. struct seq_file *m = file->private_data;
  227. m->private = __start_ftrace_events;
  228. }
  229. return ret;
  230. }
  231. static ssize_t
  232. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  233. loff_t *ppos)
  234. {
  235. struct ftrace_event_call *call = filp->private_data;
  236. char *buf;
  237. if (call->enabled || call->raw_enabled)
  238. buf = "1\n";
  239. else
  240. buf = "0\n";
  241. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  242. }
  243. static ssize_t
  244. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  245. loff_t *ppos)
  246. {
  247. struct ftrace_event_call *call = filp->private_data;
  248. char buf[64];
  249. unsigned long val;
  250. int ret;
  251. if (cnt >= sizeof(buf))
  252. return -EINVAL;
  253. if (copy_from_user(&buf, ubuf, cnt))
  254. return -EFAULT;
  255. buf[cnt] = 0;
  256. ret = strict_strtoul(buf, 10, &val);
  257. if (ret < 0)
  258. return ret;
  259. switch (val) {
  260. case 0:
  261. case 1:
  262. mutex_lock(&event_mutex);
  263. ftrace_event_enable_disable(call, val);
  264. mutex_unlock(&event_mutex);
  265. break;
  266. default:
  267. return -EINVAL;
  268. }
  269. *ppos += cnt;
  270. return cnt;
  271. }
  272. static ssize_t
  273. event_type_read(struct file *filp, char __user *ubuf, size_t cnt,
  274. loff_t *ppos)
  275. {
  276. struct ftrace_event_call *call = filp->private_data;
  277. char buf[16];
  278. int r = 0;
  279. if (call->type & TRACE_EVENT_TYPE_PRINTF)
  280. r += sprintf(buf, "printf\n");
  281. if (call->type & TRACE_EVENT_TYPE_RAW)
  282. r += sprintf(buf+r, "raw\n");
  283. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  284. }
  285. static ssize_t
  286. event_type_write(struct file *filp, const char __user *ubuf, size_t cnt,
  287. loff_t *ppos)
  288. {
  289. struct ftrace_event_call *call = filp->private_data;
  290. char buf[64];
  291. /*
  292. * If there's only one type, we can't change it.
  293. * And currently we always have printf type, and we
  294. * may or may not have raw type.
  295. *
  296. * This is a redundant check, the file should be read
  297. * only if this is the case anyway.
  298. */
  299. if (!call->raw_init)
  300. return -EPERM;
  301. if (cnt >= sizeof(buf))
  302. return -EINVAL;
  303. if (copy_from_user(&buf, ubuf, cnt))
  304. return -EFAULT;
  305. buf[cnt] = 0;
  306. if (!strncmp(buf, "printf", 6) &&
  307. (!buf[6] || isspace(buf[6]))) {
  308. call->type = TRACE_EVENT_TYPE_PRINTF;
  309. /*
  310. * If raw enabled, the disable it and enable
  311. * printf type.
  312. */
  313. if (call->raw_enabled) {
  314. call->raw_enabled = 0;
  315. call->raw_unreg();
  316. call->enabled = 1;
  317. call->regfunc();
  318. }
  319. } else if (!strncmp(buf, "raw", 3) &&
  320. (!buf[3] || isspace(buf[3]))) {
  321. call->type = TRACE_EVENT_TYPE_RAW;
  322. /*
  323. * If printf enabled, the disable it and enable
  324. * raw type.
  325. */
  326. if (call->enabled) {
  327. call->enabled = 0;
  328. call->unregfunc();
  329. call->raw_enabled = 1;
  330. call->raw_reg();
  331. }
  332. } else
  333. return -EINVAL;
  334. *ppos += cnt;
  335. return cnt;
  336. }
  337. static ssize_t
  338. event_available_types_read(struct file *filp, char __user *ubuf, size_t cnt,
  339. loff_t *ppos)
  340. {
  341. struct ftrace_event_call *call = filp->private_data;
  342. char buf[16];
  343. int r = 0;
  344. r += sprintf(buf, "printf\n");
  345. if (call->raw_init)
  346. r += sprintf(buf+r, "raw\n");
  347. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  348. }
  349. static const struct seq_operations show_event_seq_ops = {
  350. .start = t_start,
  351. .next = t_next,
  352. .show = t_show,
  353. .stop = t_stop,
  354. };
  355. static const struct seq_operations show_set_event_seq_ops = {
  356. .start = s_start,
  357. .next = s_next,
  358. .show = t_show,
  359. .stop = t_stop,
  360. };
  361. static const struct file_operations ftrace_avail_fops = {
  362. .open = ftrace_event_seq_open,
  363. .read = seq_read,
  364. .llseek = seq_lseek,
  365. .release = seq_release,
  366. };
  367. static const struct file_operations ftrace_set_event_fops = {
  368. .open = ftrace_event_seq_open,
  369. .read = seq_read,
  370. .write = ftrace_event_write,
  371. .llseek = seq_lseek,
  372. .release = seq_release,
  373. };
  374. static const struct file_operations ftrace_enable_fops = {
  375. .open = tracing_open_generic,
  376. .read = event_enable_read,
  377. .write = event_enable_write,
  378. };
  379. static const struct file_operations ftrace_type_fops = {
  380. .open = tracing_open_generic,
  381. .read = event_type_read,
  382. .write = event_type_write,
  383. };
  384. static const struct file_operations ftrace_available_types_fops = {
  385. .open = tracing_open_generic,
  386. .read = event_available_types_read,
  387. };
  388. static struct dentry *event_trace_events_dir(void)
  389. {
  390. static struct dentry *d_tracer;
  391. static struct dentry *d_events;
  392. if (d_events)
  393. return d_events;
  394. d_tracer = tracing_init_dentry();
  395. if (!d_tracer)
  396. return NULL;
  397. d_events = debugfs_create_dir("events", d_tracer);
  398. if (!d_events)
  399. pr_warning("Could not create debugfs "
  400. "'events' directory\n");
  401. return d_events;
  402. }
  403. struct event_subsystem {
  404. struct list_head list;
  405. const char *name;
  406. struct dentry *entry;
  407. };
  408. static LIST_HEAD(event_subsystems);
  409. static struct dentry *
  410. event_subsystem_dir(const char *name, struct dentry *d_events)
  411. {
  412. struct event_subsystem *system;
  413. /* First see if we did not already create this dir */
  414. list_for_each_entry(system, &event_subsystems, list) {
  415. if (strcmp(system->name, name) == 0)
  416. return system->entry;
  417. }
  418. /* need to create new entry */
  419. system = kmalloc(sizeof(*system), GFP_KERNEL);
  420. if (!system) {
  421. pr_warning("No memory to create event subsystem %s\n",
  422. name);
  423. return d_events;
  424. }
  425. system->entry = debugfs_create_dir(name, d_events);
  426. if (!system->entry) {
  427. pr_warning("Could not create event subsystem %s\n",
  428. name);
  429. kfree(system);
  430. return d_events;
  431. }
  432. system->name = name;
  433. list_add(&system->list, &event_subsystems);
  434. return system->entry;
  435. }
  436. static int
  437. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  438. {
  439. struct dentry *entry;
  440. int ret;
  441. /*
  442. * If the trace point header did not define TRACE_SYSTEM
  443. * then the system would be called "TRACE_SYSTEM".
  444. */
  445. if (strcmp(call->system, "TRACE_SYSTEM") != 0)
  446. d_events = event_subsystem_dir(call->system, d_events);
  447. if (call->raw_init) {
  448. ret = call->raw_init();
  449. if (ret < 0) {
  450. pr_warning("Could not initialize trace point"
  451. " events/%s\n", call->name);
  452. return ret;
  453. }
  454. }
  455. /* default the output to printf */
  456. call->type = TRACE_EVENT_TYPE_PRINTF;
  457. call->dir = debugfs_create_dir(call->name, d_events);
  458. if (!call->dir) {
  459. pr_warning("Could not create debugfs "
  460. "'%s' directory\n", call->name);
  461. return -1;
  462. }
  463. entry = debugfs_create_file("enable", 0644, call->dir, call,
  464. &ftrace_enable_fops);
  465. if (!entry)
  466. pr_warning("Could not create debugfs "
  467. "'%s/enable' entry\n", call->name);
  468. /* Only let type be writable, if we can change it */
  469. entry = debugfs_create_file("type",
  470. call->raw_init ? 0644 : 0444,
  471. call->dir, call,
  472. &ftrace_type_fops);
  473. if (!entry)
  474. pr_warning("Could not create debugfs "
  475. "'%s/type' entry\n", call->name);
  476. entry = debugfs_create_file("available_types", 0444, call->dir, call,
  477. &ftrace_available_types_fops);
  478. if (!entry)
  479. pr_warning("Could not create debugfs "
  480. "'%s/type' available_types\n", call->name);
  481. return 0;
  482. }
  483. static __init int event_trace_init(void)
  484. {
  485. struct ftrace_event_call *call = __start_ftrace_events;
  486. struct dentry *d_tracer;
  487. struct dentry *entry;
  488. struct dentry *d_events;
  489. d_tracer = tracing_init_dentry();
  490. if (!d_tracer)
  491. return 0;
  492. entry = debugfs_create_file("available_events", 0444, d_tracer,
  493. (void *)&show_event_seq_ops,
  494. &ftrace_avail_fops);
  495. if (!entry)
  496. pr_warning("Could not create debugfs "
  497. "'available_events' entry\n");
  498. entry = debugfs_create_file("set_event", 0644, d_tracer,
  499. (void *)&show_set_event_seq_ops,
  500. &ftrace_set_event_fops);
  501. if (!entry)
  502. pr_warning("Could not create debugfs "
  503. "'set_event' entry\n");
  504. d_events = event_trace_events_dir();
  505. if (!d_events)
  506. return 0;
  507. events_for_each(call) {
  508. /* The linker may leave blanks */
  509. if (!call->name)
  510. continue;
  511. event_create_dir(call, d_events);
  512. }
  513. return 0;
  514. }
  515. fs_initcall(event_trace_init);