trace_kprobe.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * kprobe based kernel tracer
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/smp.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/types.h>
  27. #include <linux/string.h>
  28. #include <linux/ctype.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/perf_counter.h>
  31. #include "trace.h"
  32. #include "trace_output.h"
  33. #define MAX_TRACE_ARGS 128
  34. #define MAX_ARGSTR_LEN 63
  35. #define MAX_EVENT_NAME_LEN 64
  36. /* currently, trace_kprobe only supports X86. */
  37. struct fetch_func {
  38. unsigned long (*func)(struct pt_regs *, void *);
  39. void *data;
  40. };
  41. static __kprobes unsigned long call_fetch(struct fetch_func *f,
  42. struct pt_regs *regs)
  43. {
  44. return f->func(regs, f->data);
  45. }
  46. /* fetch handlers */
  47. static __kprobes unsigned long fetch_register(struct pt_regs *regs,
  48. void *offset)
  49. {
  50. return regs_get_register(regs, (unsigned int)((unsigned long)offset));
  51. }
  52. static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
  53. void *num)
  54. {
  55. return regs_get_kernel_stack_nth(regs,
  56. (unsigned int)((unsigned long)num));
  57. }
  58. static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
  59. {
  60. unsigned long retval;
  61. if (probe_kernel_address(addr, retval))
  62. return 0;
  63. return retval;
  64. }
  65. static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
  66. {
  67. return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
  68. }
  69. static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
  70. void *dummy)
  71. {
  72. return regs_return_value(regs);
  73. }
  74. static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy)
  75. {
  76. return instruction_pointer(regs);
  77. }
  78. static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
  79. void *dummy)
  80. {
  81. return kernel_stack_pointer(regs);
  82. }
  83. /* Memory fetching by symbol */
  84. struct symbol_cache {
  85. char *symbol;
  86. long offset;
  87. unsigned long addr;
  88. };
  89. static unsigned long update_symbol_cache(struct symbol_cache *sc)
  90. {
  91. sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  92. if (sc->addr)
  93. sc->addr += sc->offset;
  94. return sc->addr;
  95. }
  96. static void free_symbol_cache(struct symbol_cache *sc)
  97. {
  98. kfree(sc->symbol);
  99. kfree(sc);
  100. }
  101. static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
  102. {
  103. struct symbol_cache *sc;
  104. if (!sym || strlen(sym) == 0)
  105. return NULL;
  106. sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
  107. if (!sc)
  108. return NULL;
  109. sc->symbol = kstrdup(sym, GFP_KERNEL);
  110. if (!sc->symbol) {
  111. kfree(sc);
  112. return NULL;
  113. }
  114. sc->offset = offset;
  115. update_symbol_cache(sc);
  116. return sc;
  117. }
  118. static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
  119. {
  120. struct symbol_cache *sc = data;
  121. if (sc->addr)
  122. return fetch_memory(regs, (void *)sc->addr);
  123. else
  124. return 0;
  125. }
  126. /* Special indirect memory access interface */
  127. struct indirect_fetch_data {
  128. struct fetch_func orig;
  129. long offset;
  130. };
  131. static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
  132. {
  133. struct indirect_fetch_data *ind = data;
  134. unsigned long addr;
  135. addr = call_fetch(&ind->orig, regs);
  136. if (addr) {
  137. addr += ind->offset;
  138. return fetch_memory(regs, (void *)addr);
  139. } else
  140. return 0;
  141. }
  142. static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
  143. {
  144. if (data->orig.func == fetch_indirect)
  145. free_indirect_fetch_data(data->orig.data);
  146. else if (data->orig.func == fetch_symbol)
  147. free_symbol_cache(data->orig.data);
  148. kfree(data);
  149. }
  150. /**
  151. * Kprobe tracer core functions
  152. */
  153. struct probe_arg {
  154. struct fetch_func fetch;
  155. const char *name;
  156. };
  157. struct trace_probe {
  158. struct list_head list;
  159. struct kretprobe rp; /* Use rp.kp for kprobe use */
  160. unsigned long nhit;
  161. const char *symbol; /* symbol name */
  162. struct ftrace_event_call call;
  163. struct trace_event event;
  164. unsigned int nr_args;
  165. struct probe_arg args[];
  166. };
  167. #define SIZEOF_TRACE_PROBE(n) \
  168. (offsetof(struct trace_probe, args) + \
  169. (sizeof(struct probe_arg) * (n)))
  170. static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs);
  171. static int kretprobe_trace_func(struct kretprobe_instance *ri,
  172. struct pt_regs *regs);
  173. static __kprobes int probe_is_return(struct trace_probe *tp)
  174. {
  175. return tp->rp.handler != NULL;
  176. }
  177. static __kprobes const char *probe_symbol(struct trace_probe *tp)
  178. {
  179. return tp->symbol ? tp->symbol : "unknown";
  180. }
  181. static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
  182. {
  183. int ret = -EINVAL;
  184. if (ff->func == fetch_argument)
  185. ret = snprintf(buf, n, "a%lu", (unsigned long)ff->data);
  186. else if (ff->func == fetch_register) {
  187. const char *name;
  188. name = regs_query_register_name((unsigned int)((long)ff->data));
  189. ret = snprintf(buf, n, "%%%s", name);
  190. } else if (ff->func == fetch_stack)
  191. ret = snprintf(buf, n, "s%lu", (unsigned long)ff->data);
  192. else if (ff->func == fetch_memory)
  193. ret = snprintf(buf, n, "@0x%p", ff->data);
  194. else if (ff->func == fetch_symbol) {
  195. struct symbol_cache *sc = ff->data;
  196. ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset);
  197. } else if (ff->func == fetch_retvalue)
  198. ret = snprintf(buf, n, "rv");
  199. else if (ff->func == fetch_ip)
  200. ret = snprintf(buf, n, "ra");
  201. else if (ff->func == fetch_stack_address)
  202. ret = snprintf(buf, n, "sa");
  203. else if (ff->func == fetch_indirect) {
  204. struct indirect_fetch_data *id = ff->data;
  205. size_t l = 0;
  206. ret = snprintf(buf, n, "%+ld(", id->offset);
  207. if (ret >= n)
  208. goto end;
  209. l += ret;
  210. ret = probe_arg_string(buf + l, n - l, &id->orig);
  211. if (ret < 0)
  212. goto end;
  213. l += ret;
  214. ret = snprintf(buf + l, n - l, ")");
  215. ret += l;
  216. }
  217. end:
  218. if (ret >= n)
  219. return -ENOSPC;
  220. return ret;
  221. }
  222. static int register_probe_event(struct trace_probe *tp);
  223. static void unregister_probe_event(struct trace_probe *tp);
  224. static DEFINE_MUTEX(probe_lock);
  225. static LIST_HEAD(probe_list);
  226. /*
  227. * Allocate new trace_probe and initialize it (including kprobes).
  228. */
  229. static struct trace_probe *alloc_trace_probe(const char *event,
  230. void *addr,
  231. const char *symbol,
  232. unsigned long offs,
  233. int nargs, int is_return)
  234. {
  235. struct trace_probe *tp;
  236. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  237. if (!tp)
  238. return ERR_PTR(-ENOMEM);
  239. if (symbol) {
  240. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  241. if (!tp->symbol)
  242. goto error;
  243. tp->rp.kp.symbol_name = tp->symbol;
  244. tp->rp.kp.offset = offs;
  245. } else
  246. tp->rp.kp.addr = addr;
  247. /* Set handler here for checking whether this probe is return or not. */
  248. if (is_return)
  249. tp->rp.handler = kretprobe_trace_func;
  250. else
  251. tp->rp.kp.pre_handler = kprobe_trace_func;
  252. if (!event)
  253. goto error;
  254. tp->call.name = kstrdup(event, GFP_KERNEL);
  255. if (!tp->call.name)
  256. goto error;
  257. INIT_LIST_HEAD(&tp->list);
  258. return tp;
  259. error:
  260. kfree(tp->symbol);
  261. kfree(tp);
  262. return ERR_PTR(-ENOMEM);
  263. }
  264. static void free_probe_arg(struct probe_arg *arg)
  265. {
  266. if (arg->fetch.func == fetch_symbol)
  267. free_symbol_cache(arg->fetch.data);
  268. else if (arg->fetch.func == fetch_indirect)
  269. free_indirect_fetch_data(arg->fetch.data);
  270. kfree(arg->name);
  271. }
  272. static void free_trace_probe(struct trace_probe *tp)
  273. {
  274. int i;
  275. for (i = 0; i < tp->nr_args; i++)
  276. free_probe_arg(&tp->args[i]);
  277. kfree(tp->call.name);
  278. kfree(tp->symbol);
  279. kfree(tp);
  280. }
  281. static struct trace_probe *find_probe_event(const char *event)
  282. {
  283. struct trace_probe *tp;
  284. list_for_each_entry(tp, &probe_list, list)
  285. if (!strcmp(tp->call.name, event))
  286. return tp;
  287. return NULL;
  288. }
  289. static void __unregister_trace_probe(struct trace_probe *tp)
  290. {
  291. if (probe_is_return(tp))
  292. unregister_kretprobe(&tp->rp);
  293. else
  294. unregister_kprobe(&tp->rp.kp);
  295. }
  296. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  297. static void unregister_trace_probe(struct trace_probe *tp)
  298. {
  299. unregister_probe_event(tp);
  300. __unregister_trace_probe(tp);
  301. list_del(&tp->list);
  302. }
  303. /* Register a trace_probe and probe_event */
  304. static int register_trace_probe(struct trace_probe *tp)
  305. {
  306. struct trace_probe *old_tp;
  307. int ret;
  308. mutex_lock(&probe_lock);
  309. if (probe_is_return(tp))
  310. ret = register_kretprobe(&tp->rp);
  311. else
  312. ret = register_kprobe(&tp->rp.kp);
  313. if (ret) {
  314. pr_warning("Could not insert probe(%d)\n", ret);
  315. if (ret == -EILSEQ) {
  316. pr_warning("Probing address(0x%p) is not an "
  317. "instruction boundary.\n",
  318. tp->rp.kp.addr);
  319. ret = -EINVAL;
  320. }
  321. goto end;
  322. }
  323. /* register as an event */
  324. old_tp = find_probe_event(tp->call.name);
  325. if (old_tp) {
  326. /* delete old event */
  327. unregister_trace_probe(old_tp);
  328. free_trace_probe(old_tp);
  329. }
  330. ret = register_probe_event(tp);
  331. if (ret) {
  332. pr_warning("Faild to register probe event(%d)\n", ret);
  333. __unregister_trace_probe(tp);
  334. }
  335. list_add_tail(&tp->list, &probe_list);
  336. end:
  337. mutex_unlock(&probe_lock);
  338. return ret;
  339. }
  340. /* Split symbol and offset. */
  341. static int split_symbol_offset(char *symbol, unsigned long *offset)
  342. {
  343. char *tmp;
  344. int ret;
  345. if (!offset)
  346. return -EINVAL;
  347. tmp = strchr(symbol, '+');
  348. if (tmp) {
  349. /* skip sign because strict_strtol doesn't accept '+' */
  350. ret = strict_strtoul(tmp + 1, 0, offset);
  351. if (ret)
  352. return ret;
  353. *tmp = '\0';
  354. } else
  355. *offset = 0;
  356. return 0;
  357. }
  358. #define PARAM_MAX_ARGS 16
  359. #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
  360. static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  361. {
  362. int ret = 0;
  363. unsigned long param;
  364. long offset;
  365. char *tmp;
  366. switch (arg[0]) {
  367. case 'a': /* argument */
  368. ret = strict_strtoul(arg + 1, 10, &param);
  369. if (ret || param > PARAM_MAX_ARGS)
  370. ret = -EINVAL;
  371. else {
  372. ff->func = fetch_argument;
  373. ff->data = (void *)param;
  374. }
  375. break;
  376. case 'r': /* retval or retaddr */
  377. if (is_return && arg[1] == 'v') {
  378. ff->func = fetch_retvalue;
  379. ff->data = NULL;
  380. } else if (is_return && arg[1] == 'a') {
  381. ff->func = fetch_ip;
  382. ff->data = NULL;
  383. } else
  384. ret = -EINVAL;
  385. break;
  386. case '%': /* named register */
  387. ret = regs_query_register_offset(arg + 1);
  388. if (ret >= 0) {
  389. ff->func = fetch_register;
  390. ff->data = (void *)(unsigned long)ret;
  391. ret = 0;
  392. }
  393. break;
  394. case 's': /* stack */
  395. if (arg[1] == 'a') {
  396. ff->func = fetch_stack_address;
  397. ff->data = NULL;
  398. } else {
  399. ret = strict_strtoul(arg + 1, 10, &param);
  400. if (ret || param > PARAM_MAX_STACK)
  401. ret = -EINVAL;
  402. else {
  403. ff->func = fetch_stack;
  404. ff->data = (void *)param;
  405. }
  406. }
  407. break;
  408. case '@': /* memory or symbol */
  409. if (isdigit(arg[1])) {
  410. ret = strict_strtoul(arg + 1, 0, &param);
  411. if (ret)
  412. break;
  413. ff->func = fetch_memory;
  414. ff->data = (void *)param;
  415. } else {
  416. ret = split_symbol_offset(arg + 1, &offset);
  417. if (ret)
  418. break;
  419. ff->data = alloc_symbol_cache(arg + 1,
  420. offset);
  421. if (ff->data)
  422. ff->func = fetch_symbol;
  423. else
  424. ret = -EINVAL;
  425. }
  426. break;
  427. case '+': /* indirect memory */
  428. case '-':
  429. tmp = strchr(arg, '(');
  430. if (!tmp) {
  431. ret = -EINVAL;
  432. break;
  433. }
  434. *tmp = '\0';
  435. ret = strict_strtol(arg + 1, 0, &offset);
  436. if (ret)
  437. break;
  438. if (arg[0] == '-')
  439. offset = -offset;
  440. arg = tmp + 1;
  441. tmp = strrchr(arg, ')');
  442. if (tmp) {
  443. struct indirect_fetch_data *id;
  444. *tmp = '\0';
  445. id = kzalloc(sizeof(struct indirect_fetch_data),
  446. GFP_KERNEL);
  447. if (!id)
  448. return -ENOMEM;
  449. id->offset = offset;
  450. ret = parse_probe_arg(arg, &id->orig, is_return);
  451. if (ret)
  452. kfree(id);
  453. else {
  454. ff->func = fetch_indirect;
  455. ff->data = (void *)id;
  456. }
  457. } else
  458. ret = -EINVAL;
  459. break;
  460. default:
  461. /* TODO: support custom handler */
  462. ret = -EINVAL;
  463. }
  464. return ret;
  465. }
  466. static int create_trace_probe(int argc, char **argv)
  467. {
  468. /*
  469. * Argument syntax:
  470. * - Add kprobe: p[:EVENT] SYMBOL[+OFFS]|ADDRESS [FETCHARGS]
  471. * - Add kretprobe: r[:EVENT] SYMBOL[+0] [FETCHARGS]
  472. * Fetch args:
  473. * aN : fetch Nth of function argument. (N:0-)
  474. * rv : fetch return value
  475. * ra : fetch return address
  476. * sa : fetch stack address
  477. * sN : fetch Nth of stack (N:0-)
  478. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  479. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  480. * %REG : fetch register REG
  481. * Indirect memory fetch:
  482. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  483. * Alias name of args:
  484. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  485. */
  486. struct trace_probe *tp;
  487. int i, ret = 0;
  488. int is_return = 0;
  489. char *symbol = NULL, *event = NULL, *arg = NULL;
  490. unsigned long offset = 0;
  491. void *addr = NULL;
  492. char buf[MAX_EVENT_NAME_LEN];
  493. if (argc < 2)
  494. return -EINVAL;
  495. if (argv[0][0] == 'p')
  496. is_return = 0;
  497. else if (argv[0][0] == 'r')
  498. is_return = 1;
  499. else
  500. return -EINVAL;
  501. if (argv[0][1] == ':') {
  502. event = &argv[0][2];
  503. if (strlen(event) == 0) {
  504. pr_info("Event name is not specifiled\n");
  505. return -EINVAL;
  506. }
  507. }
  508. if (isdigit(argv[1][0])) {
  509. if (is_return)
  510. return -EINVAL;
  511. /* an address specified */
  512. ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
  513. if (ret)
  514. return ret;
  515. } else {
  516. /* a symbol specified */
  517. symbol = argv[1];
  518. /* TODO: support .init module functions */
  519. ret = split_symbol_offset(symbol, &offset);
  520. if (ret)
  521. return ret;
  522. if (offset && is_return)
  523. return -EINVAL;
  524. }
  525. argc -= 2; argv += 2;
  526. /* setup a probe */
  527. if (!event) {
  528. /* Make a new event name */
  529. if (symbol)
  530. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
  531. is_return ? 'r' : 'p', symbol, offset);
  532. else
  533. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
  534. is_return ? 'r' : 'p', addr);
  535. event = buf;
  536. }
  537. tp = alloc_trace_probe(event, addr, symbol, offset, argc, is_return);
  538. if (IS_ERR(tp))
  539. return PTR_ERR(tp);
  540. /* parse arguments */
  541. ret = 0;
  542. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  543. /* Parse argument name */
  544. arg = strchr(argv[i], '=');
  545. if (arg)
  546. *arg++ = '\0';
  547. else
  548. arg = argv[i];
  549. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  550. /* Parse fetch argument */
  551. if (strlen(arg) > MAX_ARGSTR_LEN) {
  552. pr_info("Argument%d(%s) is too long.\n", i, arg);
  553. ret = -ENOSPC;
  554. goto error;
  555. }
  556. ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
  557. if (ret)
  558. goto error;
  559. }
  560. tp->nr_args = i;
  561. ret = register_trace_probe(tp);
  562. if (ret)
  563. goto error;
  564. return 0;
  565. error:
  566. free_trace_probe(tp);
  567. return ret;
  568. }
  569. static void cleanup_all_probes(void)
  570. {
  571. struct trace_probe *tp;
  572. mutex_lock(&probe_lock);
  573. /* TODO: Use batch unregistration */
  574. while (!list_empty(&probe_list)) {
  575. tp = list_entry(probe_list.next, struct trace_probe, list);
  576. unregister_trace_probe(tp);
  577. free_trace_probe(tp);
  578. }
  579. mutex_unlock(&probe_lock);
  580. }
  581. /* Probes listing interfaces */
  582. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  583. {
  584. mutex_lock(&probe_lock);
  585. return seq_list_start(&probe_list, *pos);
  586. }
  587. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  588. {
  589. return seq_list_next(v, &probe_list, pos);
  590. }
  591. static void probes_seq_stop(struct seq_file *m, void *v)
  592. {
  593. mutex_unlock(&probe_lock);
  594. }
  595. static int probes_seq_show(struct seq_file *m, void *v)
  596. {
  597. struct trace_probe *tp = v;
  598. int i, ret;
  599. char buf[MAX_ARGSTR_LEN + 1];
  600. seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
  601. seq_printf(m, ":%s", tp->call.name);
  602. if (tp->symbol)
  603. seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
  604. else
  605. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  606. for (i = 0; i < tp->nr_args; i++) {
  607. ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
  608. if (ret < 0) {
  609. pr_warning("Argument%d decoding error(%d).\n", i, ret);
  610. return ret;
  611. }
  612. seq_printf(m, " %s=%s", tp->args[i].name, buf);
  613. }
  614. seq_printf(m, "\n");
  615. return 0;
  616. }
  617. static const struct seq_operations probes_seq_op = {
  618. .start = probes_seq_start,
  619. .next = probes_seq_next,
  620. .stop = probes_seq_stop,
  621. .show = probes_seq_show
  622. };
  623. static int probes_open(struct inode *inode, struct file *file)
  624. {
  625. if ((file->f_mode & FMODE_WRITE) &&
  626. (file->f_flags & O_TRUNC))
  627. cleanup_all_probes();
  628. return seq_open(file, &probes_seq_op);
  629. }
  630. static int command_trace_probe(const char *buf)
  631. {
  632. char **argv;
  633. int argc = 0, ret = 0;
  634. argv = argv_split(GFP_KERNEL, buf, &argc);
  635. if (!argv)
  636. return -ENOMEM;
  637. if (argc)
  638. ret = create_trace_probe(argc, argv);
  639. argv_free(argv);
  640. return ret;
  641. }
  642. #define WRITE_BUFSIZE 128
  643. static ssize_t probes_write(struct file *file, const char __user *buffer,
  644. size_t count, loff_t *ppos)
  645. {
  646. char *kbuf, *tmp;
  647. int ret;
  648. size_t done;
  649. size_t size;
  650. kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
  651. if (!kbuf)
  652. return -ENOMEM;
  653. ret = done = 0;
  654. while (done < count) {
  655. size = count - done;
  656. if (size >= WRITE_BUFSIZE)
  657. size = WRITE_BUFSIZE - 1;
  658. if (copy_from_user(kbuf, buffer + done, size)) {
  659. ret = -EFAULT;
  660. goto out;
  661. }
  662. kbuf[size] = '\0';
  663. tmp = strchr(kbuf, '\n');
  664. if (tmp) {
  665. *tmp = '\0';
  666. size = tmp - kbuf + 1;
  667. } else if (done + size < count) {
  668. pr_warning("Line length is too long: "
  669. "Should be less than %d.", WRITE_BUFSIZE);
  670. ret = -EINVAL;
  671. goto out;
  672. }
  673. done += size;
  674. /* Remove comments */
  675. tmp = strchr(kbuf, '#');
  676. if (tmp)
  677. *tmp = '\0';
  678. ret = command_trace_probe(kbuf);
  679. if (ret)
  680. goto out;
  681. }
  682. ret = done;
  683. out:
  684. kfree(kbuf);
  685. return ret;
  686. }
  687. static const struct file_operations kprobe_events_ops = {
  688. .owner = THIS_MODULE,
  689. .open = probes_open,
  690. .read = seq_read,
  691. .llseek = seq_lseek,
  692. .release = seq_release,
  693. .write = probes_write,
  694. };
  695. /* Probes profiling interfaces */
  696. static int probes_profile_seq_show(struct seq_file *m, void *v)
  697. {
  698. struct trace_probe *tp = v;
  699. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  700. tp->rp.kp.nmissed);
  701. return 0;
  702. }
  703. static const struct seq_operations profile_seq_op = {
  704. .start = probes_seq_start,
  705. .next = probes_seq_next,
  706. .stop = probes_seq_stop,
  707. .show = probes_profile_seq_show
  708. };
  709. static int profile_open(struct inode *inode, struct file *file)
  710. {
  711. return seq_open(file, &profile_seq_op);
  712. }
  713. static const struct file_operations kprobe_profile_ops = {
  714. .owner = THIS_MODULE,
  715. .open = profile_open,
  716. .read = seq_read,
  717. .llseek = seq_lseek,
  718. .release = seq_release,
  719. };
  720. /* Kprobe handler */
  721. static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
  722. {
  723. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  724. struct kprobe_trace_entry *entry;
  725. struct ring_buffer_event *event;
  726. struct ring_buffer *buffer;
  727. int size, i, pc;
  728. unsigned long irq_flags;
  729. struct ftrace_event_call *call = &tp->call;
  730. tp->nhit++;
  731. local_save_flags(irq_flags);
  732. pc = preempt_count();
  733. size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  734. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  735. irq_flags, pc);
  736. if (!event)
  737. return 0;
  738. entry = ring_buffer_event_data(event);
  739. entry->nargs = tp->nr_args;
  740. entry->ip = (unsigned long)kp->addr;
  741. for (i = 0; i < tp->nr_args; i++)
  742. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  743. if (!filter_current_check_discard(buffer, call, entry, event))
  744. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  745. return 0;
  746. }
  747. /* Kretprobe handler */
  748. static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
  749. struct pt_regs *regs)
  750. {
  751. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  752. struct kretprobe_trace_entry *entry;
  753. struct ring_buffer_event *event;
  754. struct ring_buffer *buffer;
  755. int size, i, pc;
  756. unsigned long irq_flags;
  757. struct ftrace_event_call *call = &tp->call;
  758. local_save_flags(irq_flags);
  759. pc = preempt_count();
  760. size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  761. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  762. irq_flags, pc);
  763. if (!event)
  764. return 0;
  765. entry = ring_buffer_event_data(event);
  766. entry->nargs = tp->nr_args;
  767. entry->func = (unsigned long)tp->rp.kp.addr;
  768. entry->ret_ip = (unsigned long)ri->ret_addr;
  769. for (i = 0; i < tp->nr_args; i++)
  770. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  771. if (!filter_current_check_discard(buffer, call, entry, event))
  772. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  773. return 0;
  774. }
  775. /* Event entry printers */
  776. enum print_line_t
  777. print_kprobe_event(struct trace_iterator *iter, int flags)
  778. {
  779. struct kprobe_trace_entry *field;
  780. struct trace_seq *s = &iter->seq;
  781. struct trace_event *event;
  782. struct trace_probe *tp;
  783. int i;
  784. field = (struct kprobe_trace_entry *)iter->ent;
  785. event = ftrace_find_event(field->ent.type);
  786. tp = container_of(event, struct trace_probe, event);
  787. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  788. goto partial;
  789. if (!trace_seq_puts(s, ":"))
  790. goto partial;
  791. for (i = 0; i < field->nargs; i++)
  792. if (!trace_seq_printf(s, " %s=%lx",
  793. tp->args[i].name, field->args[i]))
  794. goto partial;
  795. if (!trace_seq_puts(s, "\n"))
  796. goto partial;
  797. return TRACE_TYPE_HANDLED;
  798. partial:
  799. return TRACE_TYPE_PARTIAL_LINE;
  800. }
  801. enum print_line_t
  802. print_kretprobe_event(struct trace_iterator *iter, int flags)
  803. {
  804. struct kretprobe_trace_entry *field;
  805. struct trace_seq *s = &iter->seq;
  806. struct trace_event *event;
  807. struct trace_probe *tp;
  808. int i;
  809. field = (struct kretprobe_trace_entry *)iter->ent;
  810. event = ftrace_find_event(field->ent.type);
  811. tp = container_of(event, struct trace_probe, event);
  812. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  813. goto partial;
  814. if (!trace_seq_puts(s, " <- "))
  815. goto partial;
  816. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  817. goto partial;
  818. if (!trace_seq_puts(s, ":"))
  819. goto partial;
  820. for (i = 0; i < field->nargs; i++)
  821. if (!trace_seq_printf(s, " %s=%lx",
  822. tp->args[i].name, field->args[i]))
  823. goto partial;
  824. if (!trace_seq_puts(s, "\n"))
  825. goto partial;
  826. return TRACE_TYPE_HANDLED;
  827. partial:
  828. return TRACE_TYPE_PARTIAL_LINE;
  829. }
  830. static int probe_event_enable(struct ftrace_event_call *call)
  831. {
  832. struct trace_probe *tp = (struct trace_probe *)call->data;
  833. if (probe_is_return(tp)) {
  834. tp->rp.handler = kretprobe_trace_func;
  835. return enable_kretprobe(&tp->rp);
  836. } else {
  837. tp->rp.kp.pre_handler = kprobe_trace_func;
  838. return enable_kprobe(&tp->rp.kp);
  839. }
  840. }
  841. static void probe_event_disable(struct ftrace_event_call *call)
  842. {
  843. struct trace_probe *tp = (struct trace_probe *)call->data;
  844. if (probe_is_return(tp))
  845. disable_kretprobe(&tp->rp);
  846. else
  847. disable_kprobe(&tp->rp.kp);
  848. }
  849. static int probe_event_raw_init(struct ftrace_event_call *event_call)
  850. {
  851. INIT_LIST_HEAD(&event_call->fields);
  852. return 0;
  853. }
  854. #undef DEFINE_FIELD
  855. #define DEFINE_FIELD(type, item, name, is_signed) \
  856. do { \
  857. ret = trace_define_field(event_call, #type, name, \
  858. offsetof(typeof(field), item), \
  859. sizeof(field.item), is_signed, \
  860. FILTER_OTHER); \
  861. if (ret) \
  862. return ret; \
  863. } while (0)
  864. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  865. {
  866. int ret, i;
  867. struct kprobe_trace_entry field;
  868. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  869. ret = trace_define_common_fields(event_call);
  870. if (!ret)
  871. return ret;
  872. DEFINE_FIELD(unsigned long, ip, "ip", 0);
  873. DEFINE_FIELD(int, nargs, "nargs", 1);
  874. /* Set argument names as fields */
  875. for (i = 0; i < tp->nr_args; i++)
  876. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  877. return 0;
  878. }
  879. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  880. {
  881. int ret, i;
  882. struct kretprobe_trace_entry field;
  883. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  884. ret = trace_define_common_fields(event_call);
  885. if (!ret)
  886. return ret;
  887. DEFINE_FIELD(unsigned long, func, "func", 0);
  888. DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0);
  889. DEFINE_FIELD(int, nargs, "nargs", 1);
  890. /* Set argument names as fields */
  891. for (i = 0; i < tp->nr_args; i++)
  892. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  893. return 0;
  894. }
  895. static int __probe_event_show_format(struct trace_seq *s,
  896. struct trace_probe *tp, const char *fmt,
  897. const char *arg)
  898. {
  899. int i;
  900. /* Show format */
  901. if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
  902. return 0;
  903. for (i = 0; i < tp->nr_args; i++)
  904. if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
  905. return 0;
  906. if (!trace_seq_printf(s, "\", %s", arg))
  907. return 0;
  908. for (i = 0; i < tp->nr_args; i++)
  909. if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
  910. return 0;
  911. return trace_seq_puts(s, "\n");
  912. }
  913. #undef SHOW_FIELD
  914. #define SHOW_FIELD(type, item, name) \
  915. do { \
  916. ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
  917. "offset:%u;\tsize:%u;\n", name, \
  918. (unsigned int)offsetof(typeof(field), item),\
  919. (unsigned int)sizeof(type)); \
  920. if (!ret) \
  921. return 0; \
  922. } while (0)
  923. static int kprobe_event_show_format(struct ftrace_event_call *call,
  924. struct trace_seq *s)
  925. {
  926. struct kprobe_trace_entry field __attribute__((unused));
  927. int ret, i;
  928. struct trace_probe *tp = (struct trace_probe *)call->data;
  929. SHOW_FIELD(unsigned long, ip, "ip");
  930. SHOW_FIELD(int, nargs, "nargs");
  931. /* Show fields */
  932. for (i = 0; i < tp->nr_args; i++)
  933. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  934. trace_seq_puts(s, "\n");
  935. return __probe_event_show_format(s, tp, "%lx:", "ip");
  936. }
  937. static int kretprobe_event_show_format(struct ftrace_event_call *call,
  938. struct trace_seq *s)
  939. {
  940. struct kretprobe_trace_entry field __attribute__((unused));
  941. int ret, i;
  942. struct trace_probe *tp = (struct trace_probe *)call->data;
  943. SHOW_FIELD(unsigned long, func, "func");
  944. SHOW_FIELD(unsigned long, ret_ip, "ret_ip");
  945. SHOW_FIELD(int, nargs, "nargs");
  946. /* Show fields */
  947. for (i = 0; i < tp->nr_args; i++)
  948. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  949. trace_seq_puts(s, "\n");
  950. return __probe_event_show_format(s, tp, "%lx <- %lx:",
  951. "func, ret_ip");
  952. }
  953. #ifdef CONFIG_EVENT_PROFILE
  954. /* Kprobe profile handler */
  955. static __kprobes int kprobe_profile_func(struct kprobe *kp,
  956. struct pt_regs *regs)
  957. {
  958. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  959. struct ftrace_event_call *call = &tp->call;
  960. struct kprobe_trace_entry *entry;
  961. int size, i, pc;
  962. unsigned long irq_flags;
  963. local_save_flags(irq_flags);
  964. pc = preempt_count();
  965. size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  966. do {
  967. char raw_data[size];
  968. struct trace_entry *ent;
  969. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  970. entry = (struct kprobe_trace_entry *)raw_data;
  971. ent = &entry->ent;
  972. tracing_generic_entry_update(ent, irq_flags, pc);
  973. ent->type = call->id;
  974. entry->nargs = tp->nr_args;
  975. entry->ip = (unsigned long)kp->addr;
  976. for (i = 0; i < tp->nr_args; i++)
  977. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  978. perf_tpcounter_event(call->id, entry->ip, 1, entry, size);
  979. } while (0);
  980. return 0;
  981. }
  982. /* Kretprobe profile handler */
  983. static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
  984. struct pt_regs *regs)
  985. {
  986. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  987. struct ftrace_event_call *call = &tp->call;
  988. struct kretprobe_trace_entry *entry;
  989. int size, i, pc;
  990. unsigned long irq_flags;
  991. local_save_flags(irq_flags);
  992. pc = preempt_count();
  993. size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  994. do {
  995. char raw_data[size];
  996. struct trace_entry *ent;
  997. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  998. entry = (struct kretprobe_trace_entry *)raw_data;
  999. ent = &entry->ent;
  1000. tracing_generic_entry_update(ent, irq_flags, pc);
  1001. ent->type = call->id;
  1002. entry->nargs = tp->nr_args;
  1003. entry->func = (unsigned long)tp->rp.kp.addr;
  1004. entry->ret_ip = (unsigned long)ri->ret_addr;
  1005. for (i = 0; i < tp->nr_args; i++)
  1006. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1007. perf_tpcounter_event(call->id, entry->ret_ip, 1, entry, size);
  1008. } while (0);
  1009. return 0;
  1010. }
  1011. static int probe_profile_enable(struct ftrace_event_call *call)
  1012. {
  1013. struct trace_probe *tp = (struct trace_probe *)call->data;
  1014. if (atomic_inc_return(&call->profile_count))
  1015. return 0;
  1016. if (probe_is_return(tp)) {
  1017. tp->rp.handler = kretprobe_profile_func;
  1018. return enable_kretprobe(&tp->rp);
  1019. } else {
  1020. tp->rp.kp.pre_handler = kprobe_profile_func;
  1021. return enable_kprobe(&tp->rp.kp);
  1022. }
  1023. }
  1024. static void probe_profile_disable(struct ftrace_event_call *call)
  1025. {
  1026. if (atomic_add_negative(-1, &call->profile_count))
  1027. probe_event_disable(call);
  1028. }
  1029. #endif /* CONFIG_EVENT_PROFILE */
  1030. static int register_probe_event(struct trace_probe *tp)
  1031. {
  1032. struct ftrace_event_call *call = &tp->call;
  1033. int ret;
  1034. /* Initialize ftrace_event_call */
  1035. call->system = "kprobes";
  1036. if (probe_is_return(tp)) {
  1037. tp->event.trace = print_kretprobe_event;
  1038. call->raw_init = probe_event_raw_init;
  1039. call->show_format = kretprobe_event_show_format;
  1040. call->define_fields = kretprobe_event_define_fields;
  1041. } else {
  1042. tp->event.trace = print_kprobe_event;
  1043. call->raw_init = probe_event_raw_init;
  1044. call->show_format = kprobe_event_show_format;
  1045. call->define_fields = kprobe_event_define_fields;
  1046. }
  1047. call->event = &tp->event;
  1048. call->id = register_ftrace_event(&tp->event);
  1049. if (!call->id)
  1050. return -ENODEV;
  1051. call->enabled = 1;
  1052. call->regfunc = probe_event_enable;
  1053. call->unregfunc = probe_event_disable;
  1054. #ifdef CONFIG_EVENT_PROFILE
  1055. atomic_set(&call->profile_count, -1);
  1056. call->profile_enable = probe_profile_enable;
  1057. call->profile_disable = probe_profile_disable;
  1058. #endif
  1059. call->data = tp;
  1060. ret = trace_add_event_call(call);
  1061. if (ret) {
  1062. pr_info("Failed to register kprobe event: %s\n", call->name);
  1063. unregister_ftrace_event(&tp->event);
  1064. }
  1065. return ret;
  1066. }
  1067. static void unregister_probe_event(struct trace_probe *tp)
  1068. {
  1069. /* tp->event is unregistered in trace_remove_event_call() */
  1070. trace_remove_event_call(&tp->call);
  1071. }
  1072. /* Make a debugfs interface for controling probe points */
  1073. static __init int init_kprobe_trace(void)
  1074. {
  1075. struct dentry *d_tracer;
  1076. struct dentry *entry;
  1077. d_tracer = tracing_init_dentry();
  1078. if (!d_tracer)
  1079. return 0;
  1080. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1081. NULL, &kprobe_events_ops);
  1082. /* Event list interface */
  1083. if (!entry)
  1084. pr_warning("Could not create debugfs "
  1085. "'kprobe_events' entry\n");
  1086. /* Profile interface */
  1087. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1088. NULL, &kprobe_profile_ops);
  1089. if (!entry)
  1090. pr_warning("Could not create debugfs "
  1091. "'kprobe_profile' entry\n");
  1092. return 0;
  1093. }
  1094. fs_initcall(init_kprobe_trace);
  1095. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1096. static int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1097. int a4, int a5, int a6)
  1098. {
  1099. return a1 + a2 + a3 + a4 + a5 + a6;
  1100. }
  1101. static __init int kprobe_trace_self_tests_init(void)
  1102. {
  1103. int ret;
  1104. int (*target)(int, int, int, int, int, int);
  1105. target = kprobe_trace_selftest_target;
  1106. pr_info("Testing kprobe tracing: ");
  1107. ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
  1108. "a1 a2 a3 a4 a5 a6");
  1109. if (WARN_ON_ONCE(ret))
  1110. pr_warning("error enabling function entry\n");
  1111. ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
  1112. "ra rv");
  1113. if (WARN_ON_ONCE(ret))
  1114. pr_warning("error enabling function return\n");
  1115. ret = target(1, 2, 3, 4, 5, 6);
  1116. cleanup_all_probes();
  1117. pr_cont("OK\n");
  1118. return 0;
  1119. }
  1120. late_initcall(kprobe_trace_self_tests_init);
  1121. #endif