trace_kprobe.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392
  1. /*
  2. * kprobe based kernel tracer
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/smp.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/types.h>
  27. #include <linux/string.h>
  28. #include <linux/ctype.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/perf_counter.h>
  31. #include "trace.h"
  32. #include "trace_output.h"
  33. #define MAX_TRACE_ARGS 128
  34. #define MAX_ARGSTR_LEN 63
  35. #define MAX_EVENT_NAME_LEN 64
  36. #define KPROBE_EVENT_SYSTEM "kprobes"
  37. /* currently, trace_kprobe only supports X86. */
  38. struct fetch_func {
  39. unsigned long (*func)(struct pt_regs *, void *);
  40. void *data;
  41. };
  42. static __kprobes unsigned long call_fetch(struct fetch_func *f,
  43. struct pt_regs *regs)
  44. {
  45. return f->func(regs, f->data);
  46. }
  47. /* fetch handlers */
  48. static __kprobes unsigned long fetch_register(struct pt_regs *regs,
  49. void *offset)
  50. {
  51. return regs_get_register(regs, (unsigned int)((unsigned long)offset));
  52. }
  53. static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
  54. void *num)
  55. {
  56. return regs_get_kernel_stack_nth(regs,
  57. (unsigned int)((unsigned long)num));
  58. }
  59. static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
  60. {
  61. unsigned long retval;
  62. if (probe_kernel_address(addr, retval))
  63. return 0;
  64. return retval;
  65. }
  66. static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
  67. {
  68. return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
  69. }
  70. static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
  71. void *dummy)
  72. {
  73. return regs_return_value(regs);
  74. }
  75. static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy)
  76. {
  77. return instruction_pointer(regs);
  78. }
  79. static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
  80. void *dummy)
  81. {
  82. return kernel_stack_pointer(regs);
  83. }
  84. /* Memory fetching by symbol */
  85. struct symbol_cache {
  86. char *symbol;
  87. long offset;
  88. unsigned long addr;
  89. };
  90. static unsigned long update_symbol_cache(struct symbol_cache *sc)
  91. {
  92. sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  93. if (sc->addr)
  94. sc->addr += sc->offset;
  95. return sc->addr;
  96. }
  97. static void free_symbol_cache(struct symbol_cache *sc)
  98. {
  99. kfree(sc->symbol);
  100. kfree(sc);
  101. }
  102. static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
  103. {
  104. struct symbol_cache *sc;
  105. if (!sym || strlen(sym) == 0)
  106. return NULL;
  107. sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
  108. if (!sc)
  109. return NULL;
  110. sc->symbol = kstrdup(sym, GFP_KERNEL);
  111. if (!sc->symbol) {
  112. kfree(sc);
  113. return NULL;
  114. }
  115. sc->offset = offset;
  116. update_symbol_cache(sc);
  117. return sc;
  118. }
  119. static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
  120. {
  121. struct symbol_cache *sc = data;
  122. if (sc->addr)
  123. return fetch_memory(regs, (void *)sc->addr);
  124. else
  125. return 0;
  126. }
  127. /* Special indirect memory access interface */
  128. struct indirect_fetch_data {
  129. struct fetch_func orig;
  130. long offset;
  131. };
  132. static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
  133. {
  134. struct indirect_fetch_data *ind = data;
  135. unsigned long addr;
  136. addr = call_fetch(&ind->orig, regs);
  137. if (addr) {
  138. addr += ind->offset;
  139. return fetch_memory(regs, (void *)addr);
  140. } else
  141. return 0;
  142. }
  143. static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
  144. {
  145. if (data->orig.func == fetch_indirect)
  146. free_indirect_fetch_data(data->orig.data);
  147. else if (data->orig.func == fetch_symbol)
  148. free_symbol_cache(data->orig.data);
  149. kfree(data);
  150. }
  151. /**
  152. * Kprobe tracer core functions
  153. */
  154. struct probe_arg {
  155. struct fetch_func fetch;
  156. const char *name;
  157. };
  158. /* Flags for trace_probe */
  159. #define TP_FLAG_TRACE 1
  160. #define TP_FLAG_PROFILE 2
  161. struct trace_probe {
  162. struct list_head list;
  163. struct kretprobe rp; /* Use rp.kp for kprobe use */
  164. unsigned long nhit;
  165. unsigned int flags; /* For TP_FLAG_* */
  166. const char *symbol; /* symbol name */
  167. struct ftrace_event_call call;
  168. struct trace_event event;
  169. unsigned int nr_args;
  170. struct probe_arg args[];
  171. };
  172. #define SIZEOF_TRACE_PROBE(n) \
  173. (offsetof(struct trace_probe, args) + \
  174. (sizeof(struct probe_arg) * (n)))
  175. static __kprobes int probe_is_return(struct trace_probe *tp)
  176. {
  177. return tp->rp.handler != NULL;
  178. }
  179. static __kprobes const char *probe_symbol(struct trace_probe *tp)
  180. {
  181. return tp->symbol ? tp->symbol : "unknown";
  182. }
  183. static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
  184. {
  185. int ret = -EINVAL;
  186. if (ff->func == fetch_argument)
  187. ret = snprintf(buf, n, "a%lu", (unsigned long)ff->data);
  188. else if (ff->func == fetch_register) {
  189. const char *name;
  190. name = regs_query_register_name((unsigned int)((long)ff->data));
  191. ret = snprintf(buf, n, "%%%s", name);
  192. } else if (ff->func == fetch_stack)
  193. ret = snprintf(buf, n, "s%lu", (unsigned long)ff->data);
  194. else if (ff->func == fetch_memory)
  195. ret = snprintf(buf, n, "@0x%p", ff->data);
  196. else if (ff->func == fetch_symbol) {
  197. struct symbol_cache *sc = ff->data;
  198. ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset);
  199. } else if (ff->func == fetch_retvalue)
  200. ret = snprintf(buf, n, "rv");
  201. else if (ff->func == fetch_ip)
  202. ret = snprintf(buf, n, "ra");
  203. else if (ff->func == fetch_stack_address)
  204. ret = snprintf(buf, n, "sa");
  205. else if (ff->func == fetch_indirect) {
  206. struct indirect_fetch_data *id = ff->data;
  207. size_t l = 0;
  208. ret = snprintf(buf, n, "%+ld(", id->offset);
  209. if (ret >= n)
  210. goto end;
  211. l += ret;
  212. ret = probe_arg_string(buf + l, n - l, &id->orig);
  213. if (ret < 0)
  214. goto end;
  215. l += ret;
  216. ret = snprintf(buf + l, n - l, ")");
  217. ret += l;
  218. }
  219. end:
  220. if (ret >= n)
  221. return -ENOSPC;
  222. return ret;
  223. }
  224. static int register_probe_event(struct trace_probe *tp);
  225. static void unregister_probe_event(struct trace_probe *tp);
  226. static DEFINE_MUTEX(probe_lock);
  227. static LIST_HEAD(probe_list);
  228. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  229. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  230. struct pt_regs *regs);
  231. /*
  232. * Allocate new trace_probe and initialize it (including kprobes).
  233. */
  234. static struct trace_probe *alloc_trace_probe(const char *group,
  235. const char *event,
  236. void *addr,
  237. const char *symbol,
  238. unsigned long offs,
  239. int nargs, int is_return)
  240. {
  241. struct trace_probe *tp;
  242. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  243. if (!tp)
  244. return ERR_PTR(-ENOMEM);
  245. if (symbol) {
  246. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  247. if (!tp->symbol)
  248. goto error;
  249. tp->rp.kp.symbol_name = tp->symbol;
  250. tp->rp.kp.offset = offs;
  251. } else
  252. tp->rp.kp.addr = addr;
  253. if (is_return)
  254. tp->rp.handler = kretprobe_dispatcher;
  255. else
  256. tp->rp.kp.pre_handler = kprobe_dispatcher;
  257. if (!event)
  258. goto error;
  259. tp->call.name = kstrdup(event, GFP_KERNEL);
  260. if (!tp->call.name)
  261. goto error;
  262. if (!group)
  263. goto error;
  264. tp->call.system = kstrdup(group, GFP_KERNEL);
  265. if (!tp->call.system)
  266. goto error;
  267. INIT_LIST_HEAD(&tp->list);
  268. return tp;
  269. error:
  270. kfree(tp->call.name);
  271. kfree(tp->symbol);
  272. kfree(tp);
  273. return ERR_PTR(-ENOMEM);
  274. }
  275. static void free_probe_arg(struct probe_arg *arg)
  276. {
  277. if (arg->fetch.func == fetch_symbol)
  278. free_symbol_cache(arg->fetch.data);
  279. else if (arg->fetch.func == fetch_indirect)
  280. free_indirect_fetch_data(arg->fetch.data);
  281. kfree(arg->name);
  282. }
  283. static void free_trace_probe(struct trace_probe *tp)
  284. {
  285. int i;
  286. for (i = 0; i < tp->nr_args; i++)
  287. free_probe_arg(&tp->args[i]);
  288. kfree(tp->call.system);
  289. kfree(tp->call.name);
  290. kfree(tp->symbol);
  291. kfree(tp);
  292. }
  293. static struct trace_probe *find_probe_event(const char *event)
  294. {
  295. struct trace_probe *tp;
  296. list_for_each_entry(tp, &probe_list, list)
  297. if (!strcmp(tp->call.name, event))
  298. return tp;
  299. return NULL;
  300. }
  301. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  302. static void unregister_trace_probe(struct trace_probe *tp)
  303. {
  304. if (probe_is_return(tp))
  305. unregister_kretprobe(&tp->rp);
  306. else
  307. unregister_kprobe(&tp->rp.kp);
  308. list_del(&tp->list);
  309. unregister_probe_event(tp);
  310. }
  311. /* Register a trace_probe and probe_event */
  312. static int register_trace_probe(struct trace_probe *tp)
  313. {
  314. struct trace_probe *old_tp;
  315. int ret;
  316. mutex_lock(&probe_lock);
  317. /* register as an event */
  318. old_tp = find_probe_event(tp->call.name);
  319. if (old_tp) {
  320. /* delete old event */
  321. unregister_trace_probe(old_tp);
  322. free_trace_probe(old_tp);
  323. }
  324. ret = register_probe_event(tp);
  325. if (ret) {
  326. pr_warning("Faild to register probe event(%d)\n", ret);
  327. goto end;
  328. }
  329. tp->flags = TP_FLAG_TRACE;
  330. if (probe_is_return(tp))
  331. ret = register_kretprobe(&tp->rp);
  332. else
  333. ret = register_kprobe(&tp->rp.kp);
  334. if (ret) {
  335. pr_warning("Could not insert probe(%d)\n", ret);
  336. if (ret == -EILSEQ) {
  337. pr_warning("Probing address(0x%p) is not an "
  338. "instruction boundary.\n",
  339. tp->rp.kp.addr);
  340. ret = -EINVAL;
  341. }
  342. unregister_probe_event(tp);
  343. } else
  344. list_add_tail(&tp->list, &probe_list);
  345. end:
  346. mutex_unlock(&probe_lock);
  347. return ret;
  348. }
  349. /* Split symbol and offset. */
  350. static int split_symbol_offset(char *symbol, unsigned long *offset)
  351. {
  352. char *tmp;
  353. int ret;
  354. if (!offset)
  355. return -EINVAL;
  356. tmp = strchr(symbol, '+');
  357. if (tmp) {
  358. /* skip sign because strict_strtol doesn't accept '+' */
  359. ret = strict_strtoul(tmp + 1, 0, offset);
  360. if (ret)
  361. return ret;
  362. *tmp = '\0';
  363. } else
  364. *offset = 0;
  365. return 0;
  366. }
  367. #define PARAM_MAX_ARGS 16
  368. #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
  369. static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  370. {
  371. int ret = 0;
  372. unsigned long param;
  373. long offset;
  374. char *tmp;
  375. switch (arg[0]) {
  376. case 'a': /* argument */
  377. ret = strict_strtoul(arg + 1, 10, &param);
  378. if (ret || param > PARAM_MAX_ARGS)
  379. ret = -EINVAL;
  380. else {
  381. ff->func = fetch_argument;
  382. ff->data = (void *)param;
  383. }
  384. break;
  385. case 'r': /* retval or retaddr */
  386. if (is_return && arg[1] == 'v') {
  387. ff->func = fetch_retvalue;
  388. ff->data = NULL;
  389. } else if (is_return && arg[1] == 'a') {
  390. ff->func = fetch_ip;
  391. ff->data = NULL;
  392. } else
  393. ret = -EINVAL;
  394. break;
  395. case '%': /* named register */
  396. ret = regs_query_register_offset(arg + 1);
  397. if (ret >= 0) {
  398. ff->func = fetch_register;
  399. ff->data = (void *)(unsigned long)ret;
  400. ret = 0;
  401. }
  402. break;
  403. case 's': /* stack */
  404. if (arg[1] == 'a') {
  405. ff->func = fetch_stack_address;
  406. ff->data = NULL;
  407. } else {
  408. ret = strict_strtoul(arg + 1, 10, &param);
  409. if (ret || param > PARAM_MAX_STACK)
  410. ret = -EINVAL;
  411. else {
  412. ff->func = fetch_stack;
  413. ff->data = (void *)param;
  414. }
  415. }
  416. break;
  417. case '@': /* memory or symbol */
  418. if (isdigit(arg[1])) {
  419. ret = strict_strtoul(arg + 1, 0, &param);
  420. if (ret)
  421. break;
  422. ff->func = fetch_memory;
  423. ff->data = (void *)param;
  424. } else {
  425. ret = split_symbol_offset(arg + 1, &offset);
  426. if (ret)
  427. break;
  428. ff->data = alloc_symbol_cache(arg + 1,
  429. offset);
  430. if (ff->data)
  431. ff->func = fetch_symbol;
  432. else
  433. ret = -EINVAL;
  434. }
  435. break;
  436. case '+': /* indirect memory */
  437. case '-':
  438. tmp = strchr(arg, '(');
  439. if (!tmp) {
  440. ret = -EINVAL;
  441. break;
  442. }
  443. *tmp = '\0';
  444. ret = strict_strtol(arg + 1, 0, &offset);
  445. if (ret)
  446. break;
  447. if (arg[0] == '-')
  448. offset = -offset;
  449. arg = tmp + 1;
  450. tmp = strrchr(arg, ')');
  451. if (tmp) {
  452. struct indirect_fetch_data *id;
  453. *tmp = '\0';
  454. id = kzalloc(sizeof(struct indirect_fetch_data),
  455. GFP_KERNEL);
  456. if (!id)
  457. return -ENOMEM;
  458. id->offset = offset;
  459. ret = parse_probe_arg(arg, &id->orig, is_return);
  460. if (ret)
  461. kfree(id);
  462. else {
  463. ff->func = fetch_indirect;
  464. ff->data = (void *)id;
  465. }
  466. } else
  467. ret = -EINVAL;
  468. break;
  469. default:
  470. /* TODO: support custom handler */
  471. ret = -EINVAL;
  472. }
  473. return ret;
  474. }
  475. static int create_trace_probe(int argc, char **argv)
  476. {
  477. /*
  478. * Argument syntax:
  479. * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
  480. * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
  481. * Fetch args:
  482. * aN : fetch Nth of function argument. (N:0-)
  483. * rv : fetch return value
  484. * ra : fetch return address
  485. * sa : fetch stack address
  486. * sN : fetch Nth of stack (N:0-)
  487. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  488. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  489. * %REG : fetch register REG
  490. * Indirect memory fetch:
  491. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  492. * Alias name of args:
  493. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  494. */
  495. struct trace_probe *tp;
  496. int i, ret = 0;
  497. int is_return = 0;
  498. char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
  499. unsigned long offset = 0;
  500. void *addr = NULL;
  501. char buf[MAX_EVENT_NAME_LEN];
  502. if (argc < 2)
  503. return -EINVAL;
  504. if (argv[0][0] == 'p')
  505. is_return = 0;
  506. else if (argv[0][0] == 'r')
  507. is_return = 1;
  508. else
  509. return -EINVAL;
  510. if (argv[0][1] == ':') {
  511. event = &argv[0][2];
  512. if (strchr(event, '/')) {
  513. group = event;
  514. event = strchr(group, '/') + 1;
  515. event[-1] = '\0';
  516. if (strlen(group) == 0) {
  517. pr_info("Group name is not specifiled\n");
  518. return -EINVAL;
  519. }
  520. }
  521. if (strlen(event) == 0) {
  522. pr_info("Event name is not specifiled\n");
  523. return -EINVAL;
  524. }
  525. }
  526. if (isdigit(argv[1][0])) {
  527. if (is_return)
  528. return -EINVAL;
  529. /* an address specified */
  530. ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
  531. if (ret)
  532. return ret;
  533. } else {
  534. /* a symbol specified */
  535. symbol = argv[1];
  536. /* TODO: support .init module functions */
  537. ret = split_symbol_offset(symbol, &offset);
  538. if (ret)
  539. return ret;
  540. if (offset && is_return)
  541. return -EINVAL;
  542. }
  543. argc -= 2; argv += 2;
  544. /* setup a probe */
  545. if (!group)
  546. group = KPROBE_EVENT_SYSTEM;
  547. if (!event) {
  548. /* Make a new event name */
  549. if (symbol)
  550. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
  551. is_return ? 'r' : 'p', symbol, offset);
  552. else
  553. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
  554. is_return ? 'r' : 'p', addr);
  555. event = buf;
  556. }
  557. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  558. is_return);
  559. if (IS_ERR(tp))
  560. return PTR_ERR(tp);
  561. /* parse arguments */
  562. ret = 0;
  563. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  564. /* Parse argument name */
  565. arg = strchr(argv[i], '=');
  566. if (arg)
  567. *arg++ = '\0';
  568. else
  569. arg = argv[i];
  570. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  571. /* Parse fetch argument */
  572. if (strlen(arg) > MAX_ARGSTR_LEN) {
  573. pr_info("Argument%d(%s) is too long.\n", i, arg);
  574. ret = -ENOSPC;
  575. goto error;
  576. }
  577. ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
  578. if (ret)
  579. goto error;
  580. }
  581. tp->nr_args = i;
  582. ret = register_trace_probe(tp);
  583. if (ret)
  584. goto error;
  585. return 0;
  586. error:
  587. free_trace_probe(tp);
  588. return ret;
  589. }
  590. static void cleanup_all_probes(void)
  591. {
  592. struct trace_probe *tp;
  593. mutex_lock(&probe_lock);
  594. /* TODO: Use batch unregistration */
  595. while (!list_empty(&probe_list)) {
  596. tp = list_entry(probe_list.next, struct trace_probe, list);
  597. unregister_trace_probe(tp);
  598. free_trace_probe(tp);
  599. }
  600. mutex_unlock(&probe_lock);
  601. }
  602. /* Probes listing interfaces */
  603. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  604. {
  605. mutex_lock(&probe_lock);
  606. return seq_list_start(&probe_list, *pos);
  607. }
  608. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  609. {
  610. return seq_list_next(v, &probe_list, pos);
  611. }
  612. static void probes_seq_stop(struct seq_file *m, void *v)
  613. {
  614. mutex_unlock(&probe_lock);
  615. }
  616. static int probes_seq_show(struct seq_file *m, void *v)
  617. {
  618. struct trace_probe *tp = v;
  619. int i, ret;
  620. char buf[MAX_ARGSTR_LEN + 1];
  621. seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
  622. seq_printf(m, ":%s", tp->call.name);
  623. if (tp->symbol)
  624. seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
  625. else
  626. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  627. for (i = 0; i < tp->nr_args; i++) {
  628. ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
  629. if (ret < 0) {
  630. pr_warning("Argument%d decoding error(%d).\n", i, ret);
  631. return ret;
  632. }
  633. seq_printf(m, " %s=%s", tp->args[i].name, buf);
  634. }
  635. seq_printf(m, "\n");
  636. return 0;
  637. }
  638. static const struct seq_operations probes_seq_op = {
  639. .start = probes_seq_start,
  640. .next = probes_seq_next,
  641. .stop = probes_seq_stop,
  642. .show = probes_seq_show
  643. };
  644. static int probes_open(struct inode *inode, struct file *file)
  645. {
  646. if ((file->f_mode & FMODE_WRITE) &&
  647. (file->f_flags & O_TRUNC))
  648. cleanup_all_probes();
  649. return seq_open(file, &probes_seq_op);
  650. }
  651. static int command_trace_probe(const char *buf)
  652. {
  653. char **argv;
  654. int argc = 0, ret = 0;
  655. argv = argv_split(GFP_KERNEL, buf, &argc);
  656. if (!argv)
  657. return -ENOMEM;
  658. if (argc)
  659. ret = create_trace_probe(argc, argv);
  660. argv_free(argv);
  661. return ret;
  662. }
  663. #define WRITE_BUFSIZE 128
  664. static ssize_t probes_write(struct file *file, const char __user *buffer,
  665. size_t count, loff_t *ppos)
  666. {
  667. char *kbuf, *tmp;
  668. int ret;
  669. size_t done;
  670. size_t size;
  671. kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
  672. if (!kbuf)
  673. return -ENOMEM;
  674. ret = done = 0;
  675. while (done < count) {
  676. size = count - done;
  677. if (size >= WRITE_BUFSIZE)
  678. size = WRITE_BUFSIZE - 1;
  679. if (copy_from_user(kbuf, buffer + done, size)) {
  680. ret = -EFAULT;
  681. goto out;
  682. }
  683. kbuf[size] = '\0';
  684. tmp = strchr(kbuf, '\n');
  685. if (tmp) {
  686. *tmp = '\0';
  687. size = tmp - kbuf + 1;
  688. } else if (done + size < count) {
  689. pr_warning("Line length is too long: "
  690. "Should be less than %d.", WRITE_BUFSIZE);
  691. ret = -EINVAL;
  692. goto out;
  693. }
  694. done += size;
  695. /* Remove comments */
  696. tmp = strchr(kbuf, '#');
  697. if (tmp)
  698. *tmp = '\0';
  699. ret = command_trace_probe(kbuf);
  700. if (ret)
  701. goto out;
  702. }
  703. ret = done;
  704. out:
  705. kfree(kbuf);
  706. return ret;
  707. }
  708. static const struct file_operations kprobe_events_ops = {
  709. .owner = THIS_MODULE,
  710. .open = probes_open,
  711. .read = seq_read,
  712. .llseek = seq_lseek,
  713. .release = seq_release,
  714. .write = probes_write,
  715. };
  716. /* Probes profiling interfaces */
  717. static int probes_profile_seq_show(struct seq_file *m, void *v)
  718. {
  719. struct trace_probe *tp = v;
  720. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  721. tp->rp.kp.nmissed);
  722. return 0;
  723. }
  724. static const struct seq_operations profile_seq_op = {
  725. .start = probes_seq_start,
  726. .next = probes_seq_next,
  727. .stop = probes_seq_stop,
  728. .show = probes_profile_seq_show
  729. };
  730. static int profile_open(struct inode *inode, struct file *file)
  731. {
  732. return seq_open(file, &profile_seq_op);
  733. }
  734. static const struct file_operations kprobe_profile_ops = {
  735. .owner = THIS_MODULE,
  736. .open = profile_open,
  737. .read = seq_read,
  738. .llseek = seq_lseek,
  739. .release = seq_release,
  740. };
  741. /* Kprobe handler */
  742. static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
  743. {
  744. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  745. struct kprobe_trace_entry *entry;
  746. struct ring_buffer_event *event;
  747. struct ring_buffer *buffer;
  748. int size, i, pc;
  749. unsigned long irq_flags;
  750. struct ftrace_event_call *call = &tp->call;
  751. tp->nhit++;
  752. local_save_flags(irq_flags);
  753. pc = preempt_count();
  754. size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  755. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  756. irq_flags, pc);
  757. if (!event)
  758. return 0;
  759. entry = ring_buffer_event_data(event);
  760. entry->nargs = tp->nr_args;
  761. entry->ip = (unsigned long)kp->addr;
  762. for (i = 0; i < tp->nr_args; i++)
  763. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  764. if (!filter_current_check_discard(buffer, call, entry, event))
  765. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  766. return 0;
  767. }
  768. /* Kretprobe handler */
  769. static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
  770. struct pt_regs *regs)
  771. {
  772. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  773. struct kretprobe_trace_entry *entry;
  774. struct ring_buffer_event *event;
  775. struct ring_buffer *buffer;
  776. int size, i, pc;
  777. unsigned long irq_flags;
  778. struct ftrace_event_call *call = &tp->call;
  779. local_save_flags(irq_flags);
  780. pc = preempt_count();
  781. size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  782. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  783. irq_flags, pc);
  784. if (!event)
  785. return 0;
  786. entry = ring_buffer_event_data(event);
  787. entry->nargs = tp->nr_args;
  788. entry->func = (unsigned long)tp->rp.kp.addr;
  789. entry->ret_ip = (unsigned long)ri->ret_addr;
  790. for (i = 0; i < tp->nr_args; i++)
  791. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  792. if (!filter_current_check_discard(buffer, call, entry, event))
  793. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  794. return 0;
  795. }
  796. /* Event entry printers */
  797. enum print_line_t
  798. print_kprobe_event(struct trace_iterator *iter, int flags)
  799. {
  800. struct kprobe_trace_entry *field;
  801. struct trace_seq *s = &iter->seq;
  802. struct trace_event *event;
  803. struct trace_probe *tp;
  804. int i;
  805. field = (struct kprobe_trace_entry *)iter->ent;
  806. event = ftrace_find_event(field->ent.type);
  807. tp = container_of(event, struct trace_probe, event);
  808. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  809. goto partial;
  810. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  811. goto partial;
  812. if (!trace_seq_puts(s, ")"))
  813. goto partial;
  814. for (i = 0; i < field->nargs; i++)
  815. if (!trace_seq_printf(s, " %s=%lx",
  816. tp->args[i].name, field->args[i]))
  817. goto partial;
  818. if (!trace_seq_puts(s, "\n"))
  819. goto partial;
  820. return TRACE_TYPE_HANDLED;
  821. partial:
  822. return TRACE_TYPE_PARTIAL_LINE;
  823. }
  824. enum print_line_t
  825. print_kretprobe_event(struct trace_iterator *iter, int flags)
  826. {
  827. struct kretprobe_trace_entry *field;
  828. struct trace_seq *s = &iter->seq;
  829. struct trace_event *event;
  830. struct trace_probe *tp;
  831. int i;
  832. field = (struct kretprobe_trace_entry *)iter->ent;
  833. event = ftrace_find_event(field->ent.type);
  834. tp = container_of(event, struct trace_probe, event);
  835. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  836. goto partial;
  837. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  838. goto partial;
  839. if (!trace_seq_puts(s, " <- "))
  840. goto partial;
  841. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  842. goto partial;
  843. if (!trace_seq_puts(s, ")"))
  844. goto partial;
  845. for (i = 0; i < field->nargs; i++)
  846. if (!trace_seq_printf(s, " %s=%lx",
  847. tp->args[i].name, field->args[i]))
  848. goto partial;
  849. if (!trace_seq_puts(s, "\n"))
  850. goto partial;
  851. return TRACE_TYPE_HANDLED;
  852. partial:
  853. return TRACE_TYPE_PARTIAL_LINE;
  854. }
  855. static int probe_event_enable(struct ftrace_event_call *call)
  856. {
  857. struct trace_probe *tp = (struct trace_probe *)call->data;
  858. tp->flags |= TP_FLAG_TRACE;
  859. if (probe_is_return(tp))
  860. return enable_kretprobe(&tp->rp);
  861. else
  862. return enable_kprobe(&tp->rp.kp);
  863. }
  864. static void probe_event_disable(struct ftrace_event_call *call)
  865. {
  866. struct trace_probe *tp = (struct trace_probe *)call->data;
  867. tp->flags &= ~TP_FLAG_TRACE;
  868. if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
  869. if (probe_is_return(tp))
  870. disable_kretprobe(&tp->rp);
  871. else
  872. disable_kprobe(&tp->rp.kp);
  873. }
  874. }
  875. static int probe_event_raw_init(struct ftrace_event_call *event_call)
  876. {
  877. INIT_LIST_HEAD(&event_call->fields);
  878. return 0;
  879. }
  880. #undef DEFINE_FIELD
  881. #define DEFINE_FIELD(type, item, name, is_signed) \
  882. do { \
  883. ret = trace_define_field(event_call, #type, name, \
  884. offsetof(typeof(field), item), \
  885. sizeof(field.item), is_signed, \
  886. FILTER_OTHER); \
  887. if (ret) \
  888. return ret; \
  889. } while (0)
  890. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  891. {
  892. int ret, i;
  893. struct kprobe_trace_entry field;
  894. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  895. ret = trace_define_common_fields(event_call);
  896. if (!ret)
  897. return ret;
  898. DEFINE_FIELD(unsigned long, ip, "ip", 0);
  899. DEFINE_FIELD(int, nargs, "nargs", 1);
  900. /* Set argument names as fields */
  901. for (i = 0; i < tp->nr_args; i++)
  902. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  903. return 0;
  904. }
  905. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  906. {
  907. int ret, i;
  908. struct kretprobe_trace_entry field;
  909. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  910. ret = trace_define_common_fields(event_call);
  911. if (!ret)
  912. return ret;
  913. DEFINE_FIELD(unsigned long, func, "func", 0);
  914. DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0);
  915. DEFINE_FIELD(int, nargs, "nargs", 1);
  916. /* Set argument names as fields */
  917. for (i = 0; i < tp->nr_args; i++)
  918. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  919. return 0;
  920. }
  921. static int __probe_event_show_format(struct trace_seq *s,
  922. struct trace_probe *tp, const char *fmt,
  923. const char *arg)
  924. {
  925. int i;
  926. /* Show format */
  927. if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
  928. return 0;
  929. for (i = 0; i < tp->nr_args; i++)
  930. if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
  931. return 0;
  932. if (!trace_seq_printf(s, "\", %s", arg))
  933. return 0;
  934. for (i = 0; i < tp->nr_args; i++)
  935. if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
  936. return 0;
  937. return trace_seq_puts(s, "\n");
  938. }
  939. #undef SHOW_FIELD
  940. #define SHOW_FIELD(type, item, name) \
  941. do { \
  942. ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
  943. "offset:%u;\tsize:%u;\n", name, \
  944. (unsigned int)offsetof(typeof(field), item),\
  945. (unsigned int)sizeof(type)); \
  946. if (!ret) \
  947. return 0; \
  948. } while (0)
  949. static int kprobe_event_show_format(struct ftrace_event_call *call,
  950. struct trace_seq *s)
  951. {
  952. struct kprobe_trace_entry field __attribute__((unused));
  953. int ret, i;
  954. struct trace_probe *tp = (struct trace_probe *)call->data;
  955. SHOW_FIELD(unsigned long, ip, "ip");
  956. SHOW_FIELD(int, nargs, "nargs");
  957. /* Show fields */
  958. for (i = 0; i < tp->nr_args; i++)
  959. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  960. trace_seq_puts(s, "\n");
  961. return __probe_event_show_format(s, tp, "(%lx)", "REC->ip");
  962. }
  963. static int kretprobe_event_show_format(struct ftrace_event_call *call,
  964. struct trace_seq *s)
  965. {
  966. struct kretprobe_trace_entry field __attribute__((unused));
  967. int ret, i;
  968. struct trace_probe *tp = (struct trace_probe *)call->data;
  969. SHOW_FIELD(unsigned long, func, "func");
  970. SHOW_FIELD(unsigned long, ret_ip, "ret_ip");
  971. SHOW_FIELD(int, nargs, "nargs");
  972. /* Show fields */
  973. for (i = 0; i < tp->nr_args; i++)
  974. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  975. trace_seq_puts(s, "\n");
  976. return __probe_event_show_format(s, tp, "(%lx <- %lx)",
  977. "REC->func, REC->ret_ip");
  978. }
  979. #ifdef CONFIG_EVENT_PROFILE
  980. /* Kprobe profile handler */
  981. static __kprobes int kprobe_profile_func(struct kprobe *kp,
  982. struct pt_regs *regs)
  983. {
  984. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  985. struct ftrace_event_call *call = &tp->call;
  986. struct kprobe_trace_entry *entry;
  987. int size, __size, i, pc;
  988. unsigned long irq_flags;
  989. local_save_flags(irq_flags);
  990. pc = preempt_count();
  991. __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  992. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  993. size -= sizeof(u32);
  994. do {
  995. char raw_data[size];
  996. struct trace_entry *ent;
  997. /*
  998. * Zero dead bytes from alignment to avoid stack leak
  999. * to userspace
  1000. */
  1001. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1002. entry = (struct kprobe_trace_entry *)raw_data;
  1003. ent = &entry->ent;
  1004. tracing_generic_entry_update(ent, irq_flags, pc);
  1005. ent->type = call->id;
  1006. entry->nargs = tp->nr_args;
  1007. entry->ip = (unsigned long)kp->addr;
  1008. for (i = 0; i < tp->nr_args; i++)
  1009. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1010. perf_tpcounter_event(call->id, entry->ip, 1, entry, size);
  1011. } while (0);
  1012. return 0;
  1013. }
  1014. /* Kretprobe profile handler */
  1015. static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
  1016. struct pt_regs *regs)
  1017. {
  1018. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1019. struct ftrace_event_call *call = &tp->call;
  1020. struct kretprobe_trace_entry *entry;
  1021. int size, __size, i, pc;
  1022. unsigned long irq_flags;
  1023. local_save_flags(irq_flags);
  1024. pc = preempt_count();
  1025. __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  1026. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1027. size -= sizeof(u32);
  1028. do {
  1029. char raw_data[size];
  1030. struct trace_entry *ent;
  1031. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1032. entry = (struct kretprobe_trace_entry *)raw_data;
  1033. ent = &entry->ent;
  1034. tracing_generic_entry_update(ent, irq_flags, pc);
  1035. ent->type = call->id;
  1036. entry->nargs = tp->nr_args;
  1037. entry->func = (unsigned long)tp->rp.kp.addr;
  1038. entry->ret_ip = (unsigned long)ri->ret_addr;
  1039. for (i = 0; i < tp->nr_args; i++)
  1040. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1041. perf_tpcounter_event(call->id, entry->ret_ip, 1, entry, size);
  1042. } while (0);
  1043. return 0;
  1044. }
  1045. static int probe_profile_enable(struct ftrace_event_call *call)
  1046. {
  1047. struct trace_probe *tp = (struct trace_probe *)call->data;
  1048. if (atomic_inc_return(&call->profile_count))
  1049. return 0;
  1050. tp->flags |= TP_FLAG_PROFILE;
  1051. if (probe_is_return(tp))
  1052. return enable_kretprobe(&tp->rp);
  1053. else
  1054. return enable_kprobe(&tp->rp.kp);
  1055. }
  1056. static void probe_profile_disable(struct ftrace_event_call *call)
  1057. {
  1058. struct trace_probe *tp = (struct trace_probe *)call->data;
  1059. if (atomic_add_negative(-1, &call->profile_count))
  1060. tp->flags &= ~TP_FLAG_PROFILE;
  1061. if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
  1062. if (probe_is_return(tp))
  1063. disable_kretprobe(&tp->rp);
  1064. else
  1065. disable_kprobe(&tp->rp.kp);
  1066. }
  1067. }
  1068. #endif /* CONFIG_EVENT_PROFILE */
  1069. static __kprobes
  1070. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1071. {
  1072. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1073. if (tp->flags & TP_FLAG_TRACE)
  1074. kprobe_trace_func(kp, regs);
  1075. #ifdef CONFIG_EVENT_PROFILE
  1076. if (tp->flags & TP_FLAG_PROFILE)
  1077. kprobe_profile_func(kp, regs);
  1078. #endif /* CONFIG_EVENT_PROFILE */
  1079. return 0; /* We don't tweek kernel, so just return 0 */
  1080. }
  1081. static __kprobes
  1082. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1083. {
  1084. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1085. if (tp->flags & TP_FLAG_TRACE)
  1086. kretprobe_trace_func(ri, regs);
  1087. #ifdef CONFIG_EVENT_PROFILE
  1088. if (tp->flags & TP_FLAG_PROFILE)
  1089. kretprobe_profile_func(ri, regs);
  1090. #endif /* CONFIG_EVENT_PROFILE */
  1091. return 0; /* We don't tweek kernel, so just return 0 */
  1092. }
  1093. static int register_probe_event(struct trace_probe *tp)
  1094. {
  1095. struct ftrace_event_call *call = &tp->call;
  1096. int ret;
  1097. /* Initialize ftrace_event_call */
  1098. if (probe_is_return(tp)) {
  1099. tp->event.trace = print_kretprobe_event;
  1100. call->raw_init = probe_event_raw_init;
  1101. call->show_format = kretprobe_event_show_format;
  1102. call->define_fields = kretprobe_event_define_fields;
  1103. } else {
  1104. tp->event.trace = print_kprobe_event;
  1105. call->raw_init = probe_event_raw_init;
  1106. call->show_format = kprobe_event_show_format;
  1107. call->define_fields = kprobe_event_define_fields;
  1108. }
  1109. call->event = &tp->event;
  1110. call->id = register_ftrace_event(&tp->event);
  1111. if (!call->id)
  1112. return -ENODEV;
  1113. call->enabled = 1;
  1114. call->regfunc = probe_event_enable;
  1115. call->unregfunc = probe_event_disable;
  1116. #ifdef CONFIG_EVENT_PROFILE
  1117. atomic_set(&call->profile_count, -1);
  1118. call->profile_enable = probe_profile_enable;
  1119. call->profile_disable = probe_profile_disable;
  1120. #endif
  1121. call->data = tp;
  1122. ret = trace_add_event_call(call);
  1123. if (ret) {
  1124. pr_info("Failed to register kprobe event: %s\n", call->name);
  1125. unregister_ftrace_event(&tp->event);
  1126. }
  1127. return ret;
  1128. }
  1129. static void unregister_probe_event(struct trace_probe *tp)
  1130. {
  1131. /* tp->event is unregistered in trace_remove_event_call() */
  1132. trace_remove_event_call(&tp->call);
  1133. }
  1134. /* Make a debugfs interface for controling probe points */
  1135. static __init int init_kprobe_trace(void)
  1136. {
  1137. struct dentry *d_tracer;
  1138. struct dentry *entry;
  1139. d_tracer = tracing_init_dentry();
  1140. if (!d_tracer)
  1141. return 0;
  1142. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1143. NULL, &kprobe_events_ops);
  1144. /* Event list interface */
  1145. if (!entry)
  1146. pr_warning("Could not create debugfs "
  1147. "'kprobe_events' entry\n");
  1148. /* Profile interface */
  1149. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1150. NULL, &kprobe_profile_ops);
  1151. if (!entry)
  1152. pr_warning("Could not create debugfs "
  1153. "'kprobe_profile' entry\n");
  1154. return 0;
  1155. }
  1156. fs_initcall(init_kprobe_trace);
  1157. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1158. static int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1159. int a4, int a5, int a6)
  1160. {
  1161. return a1 + a2 + a3 + a4 + a5 + a6;
  1162. }
  1163. static __init int kprobe_trace_self_tests_init(void)
  1164. {
  1165. int ret;
  1166. int (*target)(int, int, int, int, int, int);
  1167. target = kprobe_trace_selftest_target;
  1168. pr_info("Testing kprobe tracing: ");
  1169. ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
  1170. "a1 a2 a3 a4 a5 a6");
  1171. if (WARN_ON_ONCE(ret))
  1172. pr_warning("error enabling function entry\n");
  1173. ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
  1174. "ra rv");
  1175. if (WARN_ON_ONCE(ret))
  1176. pr_warning("error enabling function return\n");
  1177. ret = target(1, 2, 3, 4, 5, 6);
  1178. cleanup_all_probes();
  1179. pr_cont("OK\n");
  1180. return 0;
  1181. }
  1182. late_initcall(kprobe_trace_self_tests_init);
  1183. #endif