trace_uprobe.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * uprobes-based tracing events
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  16. *
  17. * Copyright (C) IBM Corporation, 2010-2012
  18. * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/uprobes.h>
  23. #include <linux/namei.h>
  24. #include "trace_probe.h"
  25. #define UPROBE_EVENT_SYSTEM "uprobes"
  26. /*
  27. * uprobe event core functions
  28. */
  29. struct trace_uprobe;
  30. struct uprobe_trace_consumer {
  31. struct uprobe_consumer cons;
  32. struct trace_uprobe *tu;
  33. };
  34. struct trace_uprobe {
  35. struct list_head list;
  36. struct ftrace_event_class class;
  37. struct ftrace_event_call call;
  38. struct uprobe_trace_consumer *consumer;
  39. struct inode *inode;
  40. char *filename;
  41. unsigned long offset;
  42. unsigned long nhit;
  43. unsigned int flags; /* For TP_FLAG_* */
  44. ssize_t size; /* trace entry size */
  45. unsigned int nr_args;
  46. struct probe_arg args[];
  47. };
  48. #define SIZEOF_TRACE_UPROBE(n) \
  49. (offsetof(struct trace_uprobe, args) + \
  50. (sizeof(struct probe_arg) * (n)))
  51. static int register_uprobe_event(struct trace_uprobe *tu);
  52. static void unregister_uprobe_event(struct trace_uprobe *tu);
  53. static DEFINE_MUTEX(uprobe_lock);
  54. static LIST_HEAD(uprobe_list);
  55. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  56. /*
  57. * Allocate new trace_uprobe and initialize it (including uprobes).
  58. */
  59. static struct trace_uprobe *
  60. alloc_trace_uprobe(const char *group, const char *event, int nargs)
  61. {
  62. struct trace_uprobe *tu;
  63. if (!event || !is_good_name(event))
  64. return ERR_PTR(-EINVAL);
  65. if (!group || !is_good_name(group))
  66. return ERR_PTR(-EINVAL);
  67. tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
  68. if (!tu)
  69. return ERR_PTR(-ENOMEM);
  70. tu->call.class = &tu->class;
  71. tu->call.name = kstrdup(event, GFP_KERNEL);
  72. if (!tu->call.name)
  73. goto error;
  74. tu->class.system = kstrdup(group, GFP_KERNEL);
  75. if (!tu->class.system)
  76. goto error;
  77. INIT_LIST_HEAD(&tu->list);
  78. return tu;
  79. error:
  80. kfree(tu->call.name);
  81. kfree(tu);
  82. return ERR_PTR(-ENOMEM);
  83. }
  84. static void free_trace_uprobe(struct trace_uprobe *tu)
  85. {
  86. int i;
  87. for (i = 0; i < tu->nr_args; i++)
  88. traceprobe_free_probe_arg(&tu->args[i]);
  89. iput(tu->inode);
  90. kfree(tu->call.class->system);
  91. kfree(tu->call.name);
  92. kfree(tu->filename);
  93. kfree(tu);
  94. }
  95. static struct trace_uprobe *find_probe_event(const char *event, const char *group)
  96. {
  97. struct trace_uprobe *tu;
  98. list_for_each_entry(tu, &uprobe_list, list)
  99. if (strcmp(tu->call.name, event) == 0 &&
  100. strcmp(tu->call.class->system, group) == 0)
  101. return tu;
  102. return NULL;
  103. }
  104. /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
  105. static void unregister_trace_uprobe(struct trace_uprobe *tu)
  106. {
  107. list_del(&tu->list);
  108. unregister_uprobe_event(tu);
  109. free_trace_uprobe(tu);
  110. }
  111. /* Register a trace_uprobe and probe_event */
  112. static int register_trace_uprobe(struct trace_uprobe *tu)
  113. {
  114. struct trace_uprobe *old_tp;
  115. int ret;
  116. mutex_lock(&uprobe_lock);
  117. /* register as an event */
  118. old_tp = find_probe_event(tu->call.name, tu->call.class->system);
  119. if (old_tp)
  120. /* delete old event */
  121. unregister_trace_uprobe(old_tp);
  122. ret = register_uprobe_event(tu);
  123. if (ret) {
  124. pr_warning("Failed to register probe event(%d)\n", ret);
  125. goto end;
  126. }
  127. list_add_tail(&tu->list, &uprobe_list);
  128. end:
  129. mutex_unlock(&uprobe_lock);
  130. return ret;
  131. }
  132. /*
  133. * Argument syntax:
  134. * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS]
  135. *
  136. * - Remove uprobe: -:[GRP/]EVENT
  137. */
  138. static int create_trace_uprobe(int argc, char **argv)
  139. {
  140. struct trace_uprobe *tu;
  141. struct inode *inode;
  142. char *arg, *event, *group, *filename;
  143. char buf[MAX_EVENT_NAME_LEN];
  144. struct path path;
  145. unsigned long offset;
  146. bool is_delete;
  147. int i, ret;
  148. inode = NULL;
  149. ret = 0;
  150. is_delete = false;
  151. event = NULL;
  152. group = NULL;
  153. /* argc must be >= 1 */
  154. if (argv[0][0] == '-')
  155. is_delete = true;
  156. else if (argv[0][0] != 'p') {
  157. pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n");
  158. return -EINVAL;
  159. }
  160. if (argv[0][1] == ':') {
  161. event = &argv[0][2];
  162. arg = strchr(event, '/');
  163. if (arg) {
  164. group = event;
  165. event = arg + 1;
  166. event[-1] = '\0';
  167. if (strlen(group) == 0) {
  168. pr_info("Group name is not specified\n");
  169. return -EINVAL;
  170. }
  171. }
  172. if (strlen(event) == 0) {
  173. pr_info("Event name is not specified\n");
  174. return -EINVAL;
  175. }
  176. }
  177. if (!group)
  178. group = UPROBE_EVENT_SYSTEM;
  179. if (is_delete) {
  180. if (!event) {
  181. pr_info("Delete command needs an event name.\n");
  182. return -EINVAL;
  183. }
  184. mutex_lock(&uprobe_lock);
  185. tu = find_probe_event(event, group);
  186. if (!tu) {
  187. mutex_unlock(&uprobe_lock);
  188. pr_info("Event %s/%s doesn't exist.\n", group, event);
  189. return -ENOENT;
  190. }
  191. /* delete an event */
  192. unregister_trace_uprobe(tu);
  193. mutex_unlock(&uprobe_lock);
  194. return 0;
  195. }
  196. if (argc < 2) {
  197. pr_info("Probe point is not specified.\n");
  198. return -EINVAL;
  199. }
  200. if (isdigit(argv[1][0])) {
  201. pr_info("probe point must be have a filename.\n");
  202. return -EINVAL;
  203. }
  204. arg = strchr(argv[1], ':');
  205. if (!arg)
  206. goto fail_address_parse;
  207. *arg++ = '\0';
  208. filename = argv[1];
  209. ret = kern_path(filename, LOOKUP_FOLLOW, &path);
  210. if (ret)
  211. goto fail_address_parse;
  212. ret = strict_strtoul(arg, 0, &offset);
  213. if (ret)
  214. goto fail_address_parse;
  215. inode = igrab(path.dentry->d_inode);
  216. argc -= 2;
  217. argv += 2;
  218. /* setup a probe */
  219. if (!event) {
  220. char *tail = strrchr(filename, '/');
  221. char *ptr;
  222. ptr = kstrdup((tail ? tail + 1 : filename), GFP_KERNEL);
  223. if (!ptr) {
  224. ret = -ENOMEM;
  225. goto fail_address_parse;
  226. }
  227. tail = ptr;
  228. ptr = strpbrk(tail, ".-_");
  229. if (ptr)
  230. *ptr = '\0';
  231. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
  232. event = buf;
  233. kfree(tail);
  234. }
  235. tu = alloc_trace_uprobe(group, event, argc);
  236. if (IS_ERR(tu)) {
  237. pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
  238. ret = PTR_ERR(tu);
  239. goto fail_address_parse;
  240. }
  241. tu->offset = offset;
  242. tu->inode = inode;
  243. tu->filename = kstrdup(filename, GFP_KERNEL);
  244. if (!tu->filename) {
  245. pr_info("Failed to allocate filename.\n");
  246. ret = -ENOMEM;
  247. goto error;
  248. }
  249. /* parse arguments */
  250. ret = 0;
  251. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  252. /* Increment count for freeing args in error case */
  253. tu->nr_args++;
  254. /* Parse argument name */
  255. arg = strchr(argv[i], '=');
  256. if (arg) {
  257. *arg++ = '\0';
  258. tu->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  259. } else {
  260. arg = argv[i];
  261. /* If argument name is omitted, set "argN" */
  262. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  263. tu->args[i].name = kstrdup(buf, GFP_KERNEL);
  264. }
  265. if (!tu->args[i].name) {
  266. pr_info("Failed to allocate argument[%d] name.\n", i);
  267. ret = -ENOMEM;
  268. goto error;
  269. }
  270. if (!is_good_name(tu->args[i].name)) {
  271. pr_info("Invalid argument[%d] name: %s\n", i, tu->args[i].name);
  272. ret = -EINVAL;
  273. goto error;
  274. }
  275. if (traceprobe_conflict_field_name(tu->args[i].name, tu->args, i)) {
  276. pr_info("Argument[%d] name '%s' conflicts with "
  277. "another field.\n", i, argv[i]);
  278. ret = -EINVAL;
  279. goto error;
  280. }
  281. /* Parse fetch argument */
  282. ret = traceprobe_parse_probe_arg(arg, &tu->size, &tu->args[i], false, false);
  283. if (ret) {
  284. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  285. goto error;
  286. }
  287. }
  288. ret = register_trace_uprobe(tu);
  289. if (ret)
  290. goto error;
  291. return 0;
  292. error:
  293. free_trace_uprobe(tu);
  294. return ret;
  295. fail_address_parse:
  296. if (inode)
  297. iput(inode);
  298. pr_info("Failed to parse address.\n");
  299. return ret;
  300. }
  301. static void cleanup_all_probes(void)
  302. {
  303. struct trace_uprobe *tu;
  304. mutex_lock(&uprobe_lock);
  305. while (!list_empty(&uprobe_list)) {
  306. tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
  307. unregister_trace_uprobe(tu);
  308. }
  309. mutex_unlock(&uprobe_lock);
  310. }
  311. /* Probes listing interfaces */
  312. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  313. {
  314. mutex_lock(&uprobe_lock);
  315. return seq_list_start(&uprobe_list, *pos);
  316. }
  317. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  318. {
  319. return seq_list_next(v, &uprobe_list, pos);
  320. }
  321. static void probes_seq_stop(struct seq_file *m, void *v)
  322. {
  323. mutex_unlock(&uprobe_lock);
  324. }
  325. static int probes_seq_show(struct seq_file *m, void *v)
  326. {
  327. struct trace_uprobe *tu = v;
  328. int i;
  329. seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name);
  330. seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
  331. for (i = 0; i < tu->nr_args; i++)
  332. seq_printf(m, " %s=%s", tu->args[i].name, tu->args[i].comm);
  333. seq_printf(m, "\n");
  334. return 0;
  335. }
  336. static const struct seq_operations probes_seq_op = {
  337. .start = probes_seq_start,
  338. .next = probes_seq_next,
  339. .stop = probes_seq_stop,
  340. .show = probes_seq_show
  341. };
  342. static int probes_open(struct inode *inode, struct file *file)
  343. {
  344. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
  345. cleanup_all_probes();
  346. return seq_open(file, &probes_seq_op);
  347. }
  348. static ssize_t probes_write(struct file *file, const char __user *buffer,
  349. size_t count, loff_t *ppos)
  350. {
  351. return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
  352. }
  353. static const struct file_operations uprobe_events_ops = {
  354. .owner = THIS_MODULE,
  355. .open = probes_open,
  356. .read = seq_read,
  357. .llseek = seq_lseek,
  358. .release = seq_release,
  359. .write = probes_write,
  360. };
  361. /* Probes profiling interfaces */
  362. static int probes_profile_seq_show(struct seq_file *m, void *v)
  363. {
  364. struct trace_uprobe *tu = v;
  365. seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->call.name, tu->nhit);
  366. return 0;
  367. }
  368. static const struct seq_operations profile_seq_op = {
  369. .start = probes_seq_start,
  370. .next = probes_seq_next,
  371. .stop = probes_seq_stop,
  372. .show = probes_profile_seq_show
  373. };
  374. static int profile_open(struct inode *inode, struct file *file)
  375. {
  376. return seq_open(file, &profile_seq_op);
  377. }
  378. static const struct file_operations uprobe_profile_ops = {
  379. .owner = THIS_MODULE,
  380. .open = profile_open,
  381. .read = seq_read,
  382. .llseek = seq_lseek,
  383. .release = seq_release,
  384. };
  385. /* uprobe handler */
  386. static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
  387. {
  388. struct uprobe_trace_entry_head *entry;
  389. struct ring_buffer_event *event;
  390. struct ring_buffer *buffer;
  391. u8 *data;
  392. int size, i, pc;
  393. unsigned long irq_flags;
  394. struct ftrace_event_call *call = &tu->call;
  395. tu->nhit++;
  396. local_save_flags(irq_flags);
  397. pc = preempt_count();
  398. size = sizeof(*entry) + tu->size;
  399. event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
  400. size, irq_flags, pc);
  401. if (!event)
  402. return;
  403. entry = ring_buffer_event_data(event);
  404. entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
  405. data = (u8 *)&entry[1];
  406. for (i = 0; i < tu->nr_args; i++)
  407. call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
  408. if (!filter_current_check_discard(buffer, call, entry, event))
  409. trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
  410. }
  411. /* Event entry printers */
  412. static enum print_line_t
  413. print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
  414. {
  415. struct uprobe_trace_entry_head *field;
  416. struct trace_seq *s = &iter->seq;
  417. struct trace_uprobe *tu;
  418. u8 *data;
  419. int i;
  420. field = (struct uprobe_trace_entry_head *)iter->ent;
  421. tu = container_of(event, struct trace_uprobe, call.event);
  422. if (!trace_seq_printf(s, "%s: (", tu->call.name))
  423. goto partial;
  424. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  425. goto partial;
  426. if (!trace_seq_puts(s, ")"))
  427. goto partial;
  428. data = (u8 *)&field[1];
  429. for (i = 0; i < tu->nr_args; i++) {
  430. if (!tu->args[i].type->print(s, tu->args[i].name,
  431. data + tu->args[i].offset, field))
  432. goto partial;
  433. }
  434. if (trace_seq_puts(s, "\n"))
  435. return TRACE_TYPE_HANDLED;
  436. partial:
  437. return TRACE_TYPE_PARTIAL_LINE;
  438. }
  439. static int probe_event_enable(struct trace_uprobe *tu, int flag)
  440. {
  441. struct uprobe_trace_consumer *utc;
  442. int ret = 0;
  443. if (!tu->inode || tu->consumer)
  444. return -EINTR;
  445. utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL);
  446. if (!utc)
  447. return -EINTR;
  448. utc->cons.handler = uprobe_dispatcher;
  449. utc->cons.filter = NULL;
  450. ret = uprobe_register(tu->inode, tu->offset, &utc->cons);
  451. if (ret) {
  452. kfree(utc);
  453. return ret;
  454. }
  455. tu->flags |= flag;
  456. utc->tu = tu;
  457. tu->consumer = utc;
  458. return 0;
  459. }
  460. static void probe_event_disable(struct trace_uprobe *tu, int flag)
  461. {
  462. if (!tu->inode || !tu->consumer)
  463. return;
  464. uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons);
  465. tu->flags &= ~flag;
  466. kfree(tu->consumer);
  467. tu->consumer = NULL;
  468. }
  469. static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
  470. {
  471. int ret, i;
  472. struct uprobe_trace_entry_head field;
  473. struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data;
  474. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  475. /* Set argument names as fields */
  476. for (i = 0; i < tu->nr_args; i++) {
  477. ret = trace_define_field(event_call, tu->args[i].type->fmttype,
  478. tu->args[i].name,
  479. sizeof(field) + tu->args[i].offset,
  480. tu->args[i].type->size,
  481. tu->args[i].type->is_signed,
  482. FILTER_OTHER);
  483. if (ret)
  484. return ret;
  485. }
  486. return 0;
  487. }
  488. #define LEN_OR_ZERO (len ? len - pos : 0)
  489. static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len)
  490. {
  491. const char *fmt, *arg;
  492. int i;
  493. int pos = 0;
  494. fmt = "(%lx)";
  495. arg = "REC->" FIELD_STRING_IP;
  496. /* When len=0, we just calculate the needed length */
  497. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
  498. for (i = 0; i < tu->nr_args; i++) {
  499. pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
  500. tu->args[i].name, tu->args[i].type->fmt);
  501. }
  502. pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
  503. for (i = 0; i < tu->nr_args; i++) {
  504. pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
  505. tu->args[i].name);
  506. }
  507. return pos; /* return the length of print_fmt */
  508. }
  509. #undef LEN_OR_ZERO
  510. static int set_print_fmt(struct trace_uprobe *tu)
  511. {
  512. char *print_fmt;
  513. int len;
  514. /* First: called with 0 length to calculate the needed length */
  515. len = __set_print_fmt(tu, NULL, 0);
  516. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  517. if (!print_fmt)
  518. return -ENOMEM;
  519. /* Second: actually write the @print_fmt */
  520. __set_print_fmt(tu, print_fmt, len + 1);
  521. tu->call.print_fmt = print_fmt;
  522. return 0;
  523. }
  524. #ifdef CONFIG_PERF_EVENTS
  525. /* uprobe profile handler */
  526. static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
  527. {
  528. struct ftrace_event_call *call = &tu->call;
  529. struct uprobe_trace_entry_head *entry;
  530. struct hlist_head *head;
  531. u8 *data;
  532. int size, __size, i;
  533. int rctx;
  534. __size = sizeof(*entry) + tu->size;
  535. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  536. size -= sizeof(u32);
  537. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
  538. return;
  539. preempt_disable();
  540. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  541. if (!entry)
  542. goto out;
  543. entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
  544. data = (u8 *)&entry[1];
  545. for (i = 0; i < tu->nr_args; i++)
  546. call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
  547. head = this_cpu_ptr(call->perf_events);
  548. perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
  549. out:
  550. preempt_enable();
  551. }
  552. #endif /* CONFIG_PERF_EVENTS */
  553. static
  554. int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data)
  555. {
  556. struct trace_uprobe *tu = (struct trace_uprobe *)event->data;
  557. switch (type) {
  558. case TRACE_REG_REGISTER:
  559. return probe_event_enable(tu, TP_FLAG_TRACE);
  560. case TRACE_REG_UNREGISTER:
  561. probe_event_disable(tu, TP_FLAG_TRACE);
  562. return 0;
  563. #ifdef CONFIG_PERF_EVENTS
  564. case TRACE_REG_PERF_REGISTER:
  565. return probe_event_enable(tu, TP_FLAG_PROFILE);
  566. case TRACE_REG_PERF_UNREGISTER:
  567. probe_event_disable(tu, TP_FLAG_PROFILE);
  568. return 0;
  569. #endif
  570. default:
  571. return 0;
  572. }
  573. return 0;
  574. }
  575. static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
  576. {
  577. struct uprobe_trace_consumer *utc;
  578. struct trace_uprobe *tu;
  579. utc = container_of(con, struct uprobe_trace_consumer, cons);
  580. tu = utc->tu;
  581. if (!tu || tu->consumer != utc)
  582. return 0;
  583. if (tu->flags & TP_FLAG_TRACE)
  584. uprobe_trace_func(tu, regs);
  585. #ifdef CONFIG_PERF_EVENTS
  586. if (tu->flags & TP_FLAG_PROFILE)
  587. uprobe_perf_func(tu, regs);
  588. #endif
  589. return 0;
  590. }
  591. static struct trace_event_functions uprobe_funcs = {
  592. .trace = print_uprobe_event
  593. };
  594. static int register_uprobe_event(struct trace_uprobe *tu)
  595. {
  596. struct ftrace_event_call *call = &tu->call;
  597. int ret;
  598. /* Initialize ftrace_event_call */
  599. INIT_LIST_HEAD(&call->class->fields);
  600. call->event.funcs = &uprobe_funcs;
  601. call->class->define_fields = uprobe_event_define_fields;
  602. if (set_print_fmt(tu) < 0)
  603. return -ENOMEM;
  604. ret = register_ftrace_event(&call->event);
  605. if (!ret) {
  606. kfree(call->print_fmt);
  607. return -ENODEV;
  608. }
  609. call->flags = 0;
  610. call->class->reg = trace_uprobe_register;
  611. call->data = tu;
  612. ret = trace_add_event_call(call);
  613. if (ret) {
  614. pr_info("Failed to register uprobe event: %s\n", call->name);
  615. kfree(call->print_fmt);
  616. unregister_ftrace_event(&call->event);
  617. }
  618. return ret;
  619. }
  620. static void unregister_uprobe_event(struct trace_uprobe *tu)
  621. {
  622. /* tu->event is unregistered in trace_remove_event_call() */
  623. trace_remove_event_call(&tu->call);
  624. kfree(tu->call.print_fmt);
  625. tu->call.print_fmt = NULL;
  626. }
  627. /* Make a trace interface for controling probe points */
  628. static __init int init_uprobe_trace(void)
  629. {
  630. struct dentry *d_tracer;
  631. d_tracer = tracing_init_dentry();
  632. if (!d_tracer)
  633. return 0;
  634. trace_create_file("uprobe_events", 0644, d_tracer,
  635. NULL, &uprobe_events_ops);
  636. /* Profile interface */
  637. trace_create_file("uprobe_profile", 0444, d_tracer,
  638. NULL, &uprobe_profile_ops);
  639. return 0;
  640. }
  641. fs_initcall(init_uprobe_trace);