trace_kprobe.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/smp.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/types.h>
  27. #include <linux/string.h>
  28. #include <linux/ctype.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/perf_event.h>
  31. #include "trace.h"
  32. #include "trace_output.h"
  33. #define MAX_TRACE_ARGS 128
  34. #define MAX_ARGSTR_LEN 63
  35. #define MAX_EVENT_NAME_LEN 64
  36. #define KPROBE_EVENT_SYSTEM "kprobes"
  37. /* Reserved field names */
  38. #define FIELD_STRING_IP "__probe_ip"
  39. #define FIELD_STRING_NARGS "__probe_nargs"
  40. #define FIELD_STRING_RETIP "__probe_ret_ip"
  41. #define FIELD_STRING_FUNC "__probe_func"
  42. const char *reserved_field_names[] = {
  43. "common_type",
  44. "common_flags",
  45. "common_preempt_count",
  46. "common_pid",
  47. "common_tgid",
  48. "common_lock_depth",
  49. FIELD_STRING_IP,
  50. FIELD_STRING_NARGS,
  51. FIELD_STRING_RETIP,
  52. FIELD_STRING_FUNC,
  53. };
  54. struct fetch_func {
  55. unsigned long (*func)(struct pt_regs *, void *);
  56. void *data;
  57. };
  58. static __kprobes unsigned long call_fetch(struct fetch_func *f,
  59. struct pt_regs *regs)
  60. {
  61. return f->func(regs, f->data);
  62. }
  63. /* fetch handlers */
  64. static __kprobes unsigned long fetch_register(struct pt_regs *regs,
  65. void *offset)
  66. {
  67. return regs_get_register(regs, (unsigned int)((unsigned long)offset));
  68. }
  69. static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
  70. void *num)
  71. {
  72. return regs_get_kernel_stack_nth(regs,
  73. (unsigned int)((unsigned long)num));
  74. }
  75. static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
  76. {
  77. unsigned long retval;
  78. if (probe_kernel_address(addr, retval))
  79. return 0;
  80. return retval;
  81. }
  82. static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
  83. {
  84. return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
  85. }
  86. static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
  87. void *dummy)
  88. {
  89. return regs_return_value(regs);
  90. }
  91. static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
  92. void *dummy)
  93. {
  94. return kernel_stack_pointer(regs);
  95. }
  96. /* Memory fetching by symbol */
  97. struct symbol_cache {
  98. char *symbol;
  99. long offset;
  100. unsigned long addr;
  101. };
  102. static unsigned long update_symbol_cache(struct symbol_cache *sc)
  103. {
  104. sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  105. if (sc->addr)
  106. sc->addr += sc->offset;
  107. return sc->addr;
  108. }
  109. static void free_symbol_cache(struct symbol_cache *sc)
  110. {
  111. kfree(sc->symbol);
  112. kfree(sc);
  113. }
  114. static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
  115. {
  116. struct symbol_cache *sc;
  117. if (!sym || strlen(sym) == 0)
  118. return NULL;
  119. sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
  120. if (!sc)
  121. return NULL;
  122. sc->symbol = kstrdup(sym, GFP_KERNEL);
  123. if (!sc->symbol) {
  124. kfree(sc);
  125. return NULL;
  126. }
  127. sc->offset = offset;
  128. update_symbol_cache(sc);
  129. return sc;
  130. }
  131. static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
  132. {
  133. struct symbol_cache *sc = data;
  134. if (sc->addr)
  135. return fetch_memory(regs, (void *)sc->addr);
  136. else
  137. return 0;
  138. }
  139. /* Special indirect memory access interface */
  140. struct indirect_fetch_data {
  141. struct fetch_func orig;
  142. long offset;
  143. };
  144. static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
  145. {
  146. struct indirect_fetch_data *ind = data;
  147. unsigned long addr;
  148. addr = call_fetch(&ind->orig, regs);
  149. if (addr) {
  150. addr += ind->offset;
  151. return fetch_memory(regs, (void *)addr);
  152. } else
  153. return 0;
  154. }
  155. static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
  156. {
  157. if (data->orig.func == fetch_indirect)
  158. free_indirect_fetch_data(data->orig.data);
  159. else if (data->orig.func == fetch_symbol)
  160. free_symbol_cache(data->orig.data);
  161. kfree(data);
  162. }
  163. /**
  164. * Kprobe event core functions
  165. */
  166. struct probe_arg {
  167. struct fetch_func fetch;
  168. const char *name;
  169. };
  170. /* Flags for trace_probe */
  171. #define TP_FLAG_TRACE 1
  172. #define TP_FLAG_PROFILE 2
  173. struct trace_probe {
  174. struct list_head list;
  175. struct kretprobe rp; /* Use rp.kp for kprobe use */
  176. unsigned long nhit;
  177. unsigned int flags; /* For TP_FLAG_* */
  178. const char *symbol; /* symbol name */
  179. struct ftrace_event_call call;
  180. struct trace_event event;
  181. unsigned int nr_args;
  182. struct probe_arg args[];
  183. };
  184. #define SIZEOF_TRACE_PROBE(n) \
  185. (offsetof(struct trace_probe, args) + \
  186. (sizeof(struct probe_arg) * (n)))
  187. static __kprobes int probe_is_return(struct trace_probe *tp)
  188. {
  189. return tp->rp.handler != NULL;
  190. }
  191. static __kprobes const char *probe_symbol(struct trace_probe *tp)
  192. {
  193. return tp->symbol ? tp->symbol : "unknown";
  194. }
  195. static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
  196. {
  197. int ret = -EINVAL;
  198. if (ff->func == fetch_argument)
  199. ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
  200. else if (ff->func == fetch_register) {
  201. const char *name;
  202. name = regs_query_register_name((unsigned int)((long)ff->data));
  203. ret = snprintf(buf, n, "%%%s", name);
  204. } else if (ff->func == fetch_stack)
  205. ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
  206. else if (ff->func == fetch_memory)
  207. ret = snprintf(buf, n, "@0x%p", ff->data);
  208. else if (ff->func == fetch_symbol) {
  209. struct symbol_cache *sc = ff->data;
  210. if (sc->offset)
  211. ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
  212. sc->offset);
  213. else
  214. ret = snprintf(buf, n, "@%s", sc->symbol);
  215. } else if (ff->func == fetch_retvalue)
  216. ret = snprintf(buf, n, "$retval");
  217. else if (ff->func == fetch_stack_address)
  218. ret = snprintf(buf, n, "$stack");
  219. else if (ff->func == fetch_indirect) {
  220. struct indirect_fetch_data *id = ff->data;
  221. size_t l = 0;
  222. ret = snprintf(buf, n, "%+ld(", id->offset);
  223. if (ret >= n)
  224. goto end;
  225. l += ret;
  226. ret = probe_arg_string(buf + l, n - l, &id->orig);
  227. if (ret < 0)
  228. goto end;
  229. l += ret;
  230. ret = snprintf(buf + l, n - l, ")");
  231. ret += l;
  232. }
  233. end:
  234. if (ret >= n)
  235. return -ENOSPC;
  236. return ret;
  237. }
  238. static int register_probe_event(struct trace_probe *tp);
  239. static void unregister_probe_event(struct trace_probe *tp);
  240. static DEFINE_MUTEX(probe_lock);
  241. static LIST_HEAD(probe_list);
  242. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  243. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  244. struct pt_regs *regs);
  245. /*
  246. * Allocate new trace_probe and initialize it (including kprobes).
  247. */
  248. static struct trace_probe *alloc_trace_probe(const char *group,
  249. const char *event,
  250. void *addr,
  251. const char *symbol,
  252. unsigned long offs,
  253. int nargs, int is_return)
  254. {
  255. struct trace_probe *tp;
  256. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  257. if (!tp)
  258. return ERR_PTR(-ENOMEM);
  259. if (symbol) {
  260. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  261. if (!tp->symbol)
  262. goto error;
  263. tp->rp.kp.symbol_name = tp->symbol;
  264. tp->rp.kp.offset = offs;
  265. } else
  266. tp->rp.kp.addr = addr;
  267. if (is_return)
  268. tp->rp.handler = kretprobe_dispatcher;
  269. else
  270. tp->rp.kp.pre_handler = kprobe_dispatcher;
  271. if (!event)
  272. goto error;
  273. tp->call.name = kstrdup(event, GFP_KERNEL);
  274. if (!tp->call.name)
  275. goto error;
  276. if (!group)
  277. goto error;
  278. tp->call.system = kstrdup(group, GFP_KERNEL);
  279. if (!tp->call.system)
  280. goto error;
  281. INIT_LIST_HEAD(&tp->list);
  282. return tp;
  283. error:
  284. kfree(tp->call.name);
  285. kfree(tp->symbol);
  286. kfree(tp);
  287. return ERR_PTR(-ENOMEM);
  288. }
  289. static void free_probe_arg(struct probe_arg *arg)
  290. {
  291. if (arg->fetch.func == fetch_symbol)
  292. free_symbol_cache(arg->fetch.data);
  293. else if (arg->fetch.func == fetch_indirect)
  294. free_indirect_fetch_data(arg->fetch.data);
  295. kfree(arg->name);
  296. }
  297. static void free_trace_probe(struct trace_probe *tp)
  298. {
  299. int i;
  300. for (i = 0; i < tp->nr_args; i++)
  301. free_probe_arg(&tp->args[i]);
  302. kfree(tp->call.system);
  303. kfree(tp->call.name);
  304. kfree(tp->symbol);
  305. kfree(tp);
  306. }
  307. static struct trace_probe *find_probe_event(const char *event,
  308. const char *group)
  309. {
  310. struct trace_probe *tp;
  311. list_for_each_entry(tp, &probe_list, list)
  312. if (strcmp(tp->call.name, event) == 0 &&
  313. strcmp(tp->call.system, group) == 0)
  314. return tp;
  315. return NULL;
  316. }
  317. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  318. static void unregister_trace_probe(struct trace_probe *tp)
  319. {
  320. if (probe_is_return(tp))
  321. unregister_kretprobe(&tp->rp);
  322. else
  323. unregister_kprobe(&tp->rp.kp);
  324. list_del(&tp->list);
  325. unregister_probe_event(tp);
  326. }
  327. /* Register a trace_probe and probe_event */
  328. static int register_trace_probe(struct trace_probe *tp)
  329. {
  330. struct trace_probe *old_tp;
  331. int ret;
  332. mutex_lock(&probe_lock);
  333. /* register as an event */
  334. old_tp = find_probe_event(tp->call.name, tp->call.system);
  335. if (old_tp) {
  336. /* delete old event */
  337. unregister_trace_probe(old_tp);
  338. free_trace_probe(old_tp);
  339. }
  340. ret = register_probe_event(tp);
  341. if (ret) {
  342. pr_warning("Faild to register probe event(%d)\n", ret);
  343. goto end;
  344. }
  345. tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  346. if (probe_is_return(tp))
  347. ret = register_kretprobe(&tp->rp);
  348. else
  349. ret = register_kprobe(&tp->rp.kp);
  350. if (ret) {
  351. pr_warning("Could not insert probe(%d)\n", ret);
  352. if (ret == -EILSEQ) {
  353. pr_warning("Probing address(0x%p) is not an "
  354. "instruction boundary.\n",
  355. tp->rp.kp.addr);
  356. ret = -EINVAL;
  357. }
  358. unregister_probe_event(tp);
  359. } else
  360. list_add_tail(&tp->list, &probe_list);
  361. end:
  362. mutex_unlock(&probe_lock);
  363. return ret;
  364. }
  365. /* Split symbol and offset. */
  366. static int split_symbol_offset(char *symbol, unsigned long *offset)
  367. {
  368. char *tmp;
  369. int ret;
  370. if (!offset)
  371. return -EINVAL;
  372. tmp = strchr(symbol, '+');
  373. if (tmp) {
  374. /* skip sign because strict_strtol doesn't accept '+' */
  375. ret = strict_strtoul(tmp + 1, 0, offset);
  376. if (ret)
  377. return ret;
  378. *tmp = '\0';
  379. } else
  380. *offset = 0;
  381. return 0;
  382. }
  383. #define PARAM_MAX_ARGS 16
  384. #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
  385. static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
  386. {
  387. int ret = 0;
  388. unsigned long param;
  389. if (strcmp(arg, "retval") == 0) {
  390. if (is_return) {
  391. ff->func = fetch_retvalue;
  392. ff->data = NULL;
  393. } else
  394. ret = -EINVAL;
  395. } else if (strncmp(arg, "stack", 5) == 0) {
  396. if (arg[5] == '\0') {
  397. ff->func = fetch_stack_address;
  398. ff->data = NULL;
  399. } else if (isdigit(arg[5])) {
  400. ret = strict_strtoul(arg + 5, 10, &param);
  401. if (ret || param > PARAM_MAX_STACK)
  402. ret = -EINVAL;
  403. else {
  404. ff->func = fetch_stack;
  405. ff->data = (void *)param;
  406. }
  407. } else
  408. ret = -EINVAL;
  409. } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
  410. ret = strict_strtoul(arg + 3, 10, &param);
  411. if (ret || param > PARAM_MAX_ARGS)
  412. ret = -EINVAL;
  413. else {
  414. ff->func = fetch_argument;
  415. ff->data = (void *)param;
  416. }
  417. } else
  418. ret = -EINVAL;
  419. return ret;
  420. }
  421. /* Recursive argument parser */
  422. static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  423. {
  424. int ret = 0;
  425. unsigned long param;
  426. long offset;
  427. char *tmp;
  428. switch (arg[0]) {
  429. case '$':
  430. ret = parse_probe_vars(arg + 1, ff, is_return);
  431. break;
  432. case '%': /* named register */
  433. ret = regs_query_register_offset(arg + 1);
  434. if (ret >= 0) {
  435. ff->func = fetch_register;
  436. ff->data = (void *)(unsigned long)ret;
  437. ret = 0;
  438. }
  439. break;
  440. case '@': /* memory or symbol */
  441. if (isdigit(arg[1])) {
  442. ret = strict_strtoul(arg + 1, 0, &param);
  443. if (ret)
  444. break;
  445. ff->func = fetch_memory;
  446. ff->data = (void *)param;
  447. } else {
  448. ret = split_symbol_offset(arg + 1, &offset);
  449. if (ret)
  450. break;
  451. ff->data = alloc_symbol_cache(arg + 1, offset);
  452. if (ff->data)
  453. ff->func = fetch_symbol;
  454. else
  455. ret = -EINVAL;
  456. }
  457. break;
  458. case '+': /* indirect memory */
  459. case '-':
  460. tmp = strchr(arg, '(');
  461. if (!tmp) {
  462. ret = -EINVAL;
  463. break;
  464. }
  465. *tmp = '\0';
  466. ret = strict_strtol(arg + 1, 0, &offset);
  467. if (ret)
  468. break;
  469. if (arg[0] == '-')
  470. offset = -offset;
  471. arg = tmp + 1;
  472. tmp = strrchr(arg, ')');
  473. if (tmp) {
  474. struct indirect_fetch_data *id;
  475. *tmp = '\0';
  476. id = kzalloc(sizeof(struct indirect_fetch_data),
  477. GFP_KERNEL);
  478. if (!id)
  479. return -ENOMEM;
  480. id->offset = offset;
  481. ret = __parse_probe_arg(arg, &id->orig, is_return);
  482. if (ret)
  483. kfree(id);
  484. else {
  485. ff->func = fetch_indirect;
  486. ff->data = (void *)id;
  487. }
  488. } else
  489. ret = -EINVAL;
  490. break;
  491. default:
  492. /* TODO: support custom handler */
  493. ret = -EINVAL;
  494. }
  495. return ret;
  496. }
  497. /* String length checking wrapper */
  498. static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  499. {
  500. if (strlen(arg) > MAX_ARGSTR_LEN) {
  501. pr_info("Argument is too long.: %s\n", arg);
  502. return -ENOSPC;
  503. }
  504. return __parse_probe_arg(arg, ff, is_return);
  505. }
  506. /* Return 1 if name is reserved or already used by another argument */
  507. static int conflict_field_name(const char *name,
  508. struct probe_arg *args, int narg)
  509. {
  510. int i;
  511. for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
  512. if (strcmp(reserved_field_names[i], name) == 0)
  513. return 1;
  514. for (i = 0; i < narg; i++)
  515. if (strcmp(args[i].name, name) == 0)
  516. return 1;
  517. return 0;
  518. }
  519. static int create_trace_probe(int argc, char **argv)
  520. {
  521. /*
  522. * Argument syntax:
  523. * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
  524. * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
  525. * Fetch args:
  526. * $argN : fetch Nth of function argument. (N:0-)
  527. * $retval : fetch return value
  528. * $stack : fetch stack address
  529. * $stackN : fetch Nth of stack (N:0-)
  530. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  531. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  532. * %REG : fetch register REG
  533. * Indirect memory fetch:
  534. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  535. * Alias name of args:
  536. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  537. */
  538. struct trace_probe *tp;
  539. int i, ret = 0;
  540. int is_return = 0;
  541. char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
  542. unsigned long offset = 0;
  543. void *addr = NULL;
  544. char buf[MAX_EVENT_NAME_LEN];
  545. if (argc < 2) {
  546. pr_info("Probe point is not specified.\n");
  547. return -EINVAL;
  548. }
  549. if (argv[0][0] == 'p')
  550. is_return = 0;
  551. else if (argv[0][0] == 'r')
  552. is_return = 1;
  553. else {
  554. pr_info("Probe definition must be started with 'p' or 'r'.\n");
  555. return -EINVAL;
  556. }
  557. if (argv[0][1] == ':') {
  558. event = &argv[0][2];
  559. if (strchr(event, '/')) {
  560. group = event;
  561. event = strchr(group, '/') + 1;
  562. event[-1] = '\0';
  563. if (strlen(group) == 0) {
  564. pr_info("Group name is not specifiled\n");
  565. return -EINVAL;
  566. }
  567. }
  568. if (strlen(event) == 0) {
  569. pr_info("Event name is not specifiled\n");
  570. return -EINVAL;
  571. }
  572. }
  573. if (isdigit(argv[1][0])) {
  574. if (is_return) {
  575. pr_info("Return probe point must be a symbol.\n");
  576. return -EINVAL;
  577. }
  578. /* an address specified */
  579. ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
  580. if (ret) {
  581. pr_info("Failed to parse address.\n");
  582. return ret;
  583. }
  584. } else {
  585. /* a symbol specified */
  586. symbol = argv[1];
  587. /* TODO: support .init module functions */
  588. ret = split_symbol_offset(symbol, &offset);
  589. if (ret) {
  590. pr_info("Failed to parse symbol.\n");
  591. return ret;
  592. }
  593. if (offset && is_return) {
  594. pr_info("Return probe must be used without offset.\n");
  595. return -EINVAL;
  596. }
  597. }
  598. argc -= 2; argv += 2;
  599. /* setup a probe */
  600. if (!group)
  601. group = KPROBE_EVENT_SYSTEM;
  602. if (!event) {
  603. /* Make a new event name */
  604. if (symbol)
  605. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
  606. is_return ? 'r' : 'p', symbol, offset);
  607. else
  608. snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
  609. is_return ? 'r' : 'p', addr);
  610. event = buf;
  611. }
  612. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  613. is_return);
  614. if (IS_ERR(tp)) {
  615. pr_info("Failed to allocate trace_probe.(%d)\n",
  616. (int)PTR_ERR(tp));
  617. return PTR_ERR(tp);
  618. }
  619. /* parse arguments */
  620. ret = 0;
  621. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  622. /* Parse argument name */
  623. arg = strchr(argv[i], '=');
  624. if (arg)
  625. *arg++ = '\0';
  626. else
  627. arg = argv[i];
  628. if (conflict_field_name(argv[i], tp->args, i)) {
  629. pr_info("Argument%d name '%s' conflicts with "
  630. "another field.\n", i, argv[i]);
  631. ret = -EINVAL;
  632. goto error;
  633. }
  634. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  635. if (!tp->args[i].name) {
  636. pr_info("Failed to allocate argument%d name '%s'.\n",
  637. i, argv[i]);
  638. ret = -ENOMEM;
  639. goto error;
  640. }
  641. /* Parse fetch argument */
  642. ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
  643. if (ret) {
  644. pr_info("Parse error at argument%d. (%d)\n", i, ret);
  645. kfree(tp->args[i].name);
  646. goto error;
  647. }
  648. tp->nr_args++;
  649. }
  650. ret = register_trace_probe(tp);
  651. if (ret)
  652. goto error;
  653. return 0;
  654. error:
  655. free_trace_probe(tp);
  656. return ret;
  657. }
  658. static void cleanup_all_probes(void)
  659. {
  660. struct trace_probe *tp;
  661. mutex_lock(&probe_lock);
  662. /* TODO: Use batch unregistration */
  663. while (!list_empty(&probe_list)) {
  664. tp = list_entry(probe_list.next, struct trace_probe, list);
  665. unregister_trace_probe(tp);
  666. free_trace_probe(tp);
  667. }
  668. mutex_unlock(&probe_lock);
  669. }
  670. /* Probes listing interfaces */
  671. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  672. {
  673. mutex_lock(&probe_lock);
  674. return seq_list_start(&probe_list, *pos);
  675. }
  676. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  677. {
  678. return seq_list_next(v, &probe_list, pos);
  679. }
  680. static void probes_seq_stop(struct seq_file *m, void *v)
  681. {
  682. mutex_unlock(&probe_lock);
  683. }
  684. static int probes_seq_show(struct seq_file *m, void *v)
  685. {
  686. struct trace_probe *tp = v;
  687. int i, ret;
  688. char buf[MAX_ARGSTR_LEN + 1];
  689. seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
  690. seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
  691. if (!tp->symbol)
  692. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  693. else if (tp->rp.kp.offset)
  694. seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
  695. else
  696. seq_printf(m, " %s", probe_symbol(tp));
  697. for (i = 0; i < tp->nr_args; i++) {
  698. ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
  699. if (ret < 0) {
  700. pr_warning("Argument%d decoding error(%d).\n", i, ret);
  701. return ret;
  702. }
  703. seq_printf(m, " %s=%s", tp->args[i].name, buf);
  704. }
  705. seq_printf(m, "\n");
  706. return 0;
  707. }
  708. static const struct seq_operations probes_seq_op = {
  709. .start = probes_seq_start,
  710. .next = probes_seq_next,
  711. .stop = probes_seq_stop,
  712. .show = probes_seq_show
  713. };
  714. static int probes_open(struct inode *inode, struct file *file)
  715. {
  716. if ((file->f_mode & FMODE_WRITE) &&
  717. (file->f_flags & O_TRUNC))
  718. cleanup_all_probes();
  719. return seq_open(file, &probes_seq_op);
  720. }
  721. static int command_trace_probe(const char *buf)
  722. {
  723. char **argv;
  724. int argc = 0, ret = 0;
  725. argv = argv_split(GFP_KERNEL, buf, &argc);
  726. if (!argv)
  727. return -ENOMEM;
  728. if (argc)
  729. ret = create_trace_probe(argc, argv);
  730. argv_free(argv);
  731. return ret;
  732. }
  733. #define WRITE_BUFSIZE 128
  734. static ssize_t probes_write(struct file *file, const char __user *buffer,
  735. size_t count, loff_t *ppos)
  736. {
  737. char *kbuf, *tmp;
  738. int ret;
  739. size_t done;
  740. size_t size;
  741. kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
  742. if (!kbuf)
  743. return -ENOMEM;
  744. ret = done = 0;
  745. while (done < count) {
  746. size = count - done;
  747. if (size >= WRITE_BUFSIZE)
  748. size = WRITE_BUFSIZE - 1;
  749. if (copy_from_user(kbuf, buffer + done, size)) {
  750. ret = -EFAULT;
  751. goto out;
  752. }
  753. kbuf[size] = '\0';
  754. tmp = strchr(kbuf, '\n');
  755. if (tmp) {
  756. *tmp = '\0';
  757. size = tmp - kbuf + 1;
  758. } else if (done + size < count) {
  759. pr_warning("Line length is too long: "
  760. "Should be less than %d.", WRITE_BUFSIZE);
  761. ret = -EINVAL;
  762. goto out;
  763. }
  764. done += size;
  765. /* Remove comments */
  766. tmp = strchr(kbuf, '#');
  767. if (tmp)
  768. *tmp = '\0';
  769. ret = command_trace_probe(kbuf);
  770. if (ret)
  771. goto out;
  772. }
  773. ret = done;
  774. out:
  775. kfree(kbuf);
  776. return ret;
  777. }
  778. static const struct file_operations kprobe_events_ops = {
  779. .owner = THIS_MODULE,
  780. .open = probes_open,
  781. .read = seq_read,
  782. .llseek = seq_lseek,
  783. .release = seq_release,
  784. .write = probes_write,
  785. };
  786. /* Probes profiling interfaces */
  787. static int probes_profile_seq_show(struct seq_file *m, void *v)
  788. {
  789. struct trace_probe *tp = v;
  790. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  791. tp->rp.kp.nmissed);
  792. return 0;
  793. }
  794. static const struct seq_operations profile_seq_op = {
  795. .start = probes_seq_start,
  796. .next = probes_seq_next,
  797. .stop = probes_seq_stop,
  798. .show = probes_profile_seq_show
  799. };
  800. static int profile_open(struct inode *inode, struct file *file)
  801. {
  802. return seq_open(file, &profile_seq_op);
  803. }
  804. static const struct file_operations kprobe_profile_ops = {
  805. .owner = THIS_MODULE,
  806. .open = profile_open,
  807. .read = seq_read,
  808. .llseek = seq_lseek,
  809. .release = seq_release,
  810. };
  811. /* Kprobe handler */
  812. static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
  813. {
  814. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  815. struct kprobe_trace_entry *entry;
  816. struct ring_buffer_event *event;
  817. struct ring_buffer *buffer;
  818. int size, i, pc;
  819. unsigned long irq_flags;
  820. struct ftrace_event_call *call = &tp->call;
  821. tp->nhit++;
  822. local_save_flags(irq_flags);
  823. pc = preempt_count();
  824. size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  825. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  826. irq_flags, pc);
  827. if (!event)
  828. return 0;
  829. entry = ring_buffer_event_data(event);
  830. entry->nargs = tp->nr_args;
  831. entry->ip = (unsigned long)kp->addr;
  832. for (i = 0; i < tp->nr_args; i++)
  833. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  834. if (!filter_current_check_discard(buffer, call, entry, event))
  835. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  836. return 0;
  837. }
  838. /* Kretprobe handler */
  839. static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
  840. struct pt_regs *regs)
  841. {
  842. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  843. struct kretprobe_trace_entry *entry;
  844. struct ring_buffer_event *event;
  845. struct ring_buffer *buffer;
  846. int size, i, pc;
  847. unsigned long irq_flags;
  848. struct ftrace_event_call *call = &tp->call;
  849. local_save_flags(irq_flags);
  850. pc = preempt_count();
  851. size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  852. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  853. irq_flags, pc);
  854. if (!event)
  855. return 0;
  856. entry = ring_buffer_event_data(event);
  857. entry->nargs = tp->nr_args;
  858. entry->func = (unsigned long)tp->rp.kp.addr;
  859. entry->ret_ip = (unsigned long)ri->ret_addr;
  860. for (i = 0; i < tp->nr_args; i++)
  861. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  862. if (!filter_current_check_discard(buffer, call, entry, event))
  863. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  864. return 0;
  865. }
  866. /* Event entry printers */
  867. enum print_line_t
  868. print_kprobe_event(struct trace_iterator *iter, int flags)
  869. {
  870. struct kprobe_trace_entry *field;
  871. struct trace_seq *s = &iter->seq;
  872. struct trace_event *event;
  873. struct trace_probe *tp;
  874. int i;
  875. field = (struct kprobe_trace_entry *)iter->ent;
  876. event = ftrace_find_event(field->ent.type);
  877. tp = container_of(event, struct trace_probe, event);
  878. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  879. goto partial;
  880. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  881. goto partial;
  882. if (!trace_seq_puts(s, ")"))
  883. goto partial;
  884. for (i = 0; i < field->nargs; i++)
  885. if (!trace_seq_printf(s, " %s=%lx",
  886. tp->args[i].name, field->args[i]))
  887. goto partial;
  888. if (!trace_seq_puts(s, "\n"))
  889. goto partial;
  890. return TRACE_TYPE_HANDLED;
  891. partial:
  892. return TRACE_TYPE_PARTIAL_LINE;
  893. }
  894. enum print_line_t
  895. print_kretprobe_event(struct trace_iterator *iter, int flags)
  896. {
  897. struct kretprobe_trace_entry *field;
  898. struct trace_seq *s = &iter->seq;
  899. struct trace_event *event;
  900. struct trace_probe *tp;
  901. int i;
  902. field = (struct kretprobe_trace_entry *)iter->ent;
  903. event = ftrace_find_event(field->ent.type);
  904. tp = container_of(event, struct trace_probe, event);
  905. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  906. goto partial;
  907. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  908. goto partial;
  909. if (!trace_seq_puts(s, " <- "))
  910. goto partial;
  911. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  912. goto partial;
  913. if (!trace_seq_puts(s, ")"))
  914. goto partial;
  915. for (i = 0; i < field->nargs; i++)
  916. if (!trace_seq_printf(s, " %s=%lx",
  917. tp->args[i].name, field->args[i]))
  918. goto partial;
  919. if (!trace_seq_puts(s, "\n"))
  920. goto partial;
  921. return TRACE_TYPE_HANDLED;
  922. partial:
  923. return TRACE_TYPE_PARTIAL_LINE;
  924. }
  925. static int probe_event_enable(struct ftrace_event_call *call)
  926. {
  927. struct trace_probe *tp = (struct trace_probe *)call->data;
  928. tp->flags |= TP_FLAG_TRACE;
  929. if (probe_is_return(tp))
  930. return enable_kretprobe(&tp->rp);
  931. else
  932. return enable_kprobe(&tp->rp.kp);
  933. }
  934. static void probe_event_disable(struct ftrace_event_call *call)
  935. {
  936. struct trace_probe *tp = (struct trace_probe *)call->data;
  937. tp->flags &= ~TP_FLAG_TRACE;
  938. if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
  939. if (probe_is_return(tp))
  940. disable_kretprobe(&tp->rp);
  941. else
  942. disable_kprobe(&tp->rp.kp);
  943. }
  944. }
  945. static int probe_event_raw_init(struct ftrace_event_call *event_call)
  946. {
  947. INIT_LIST_HEAD(&event_call->fields);
  948. return 0;
  949. }
  950. #undef DEFINE_FIELD
  951. #define DEFINE_FIELD(type, item, name, is_signed) \
  952. do { \
  953. ret = trace_define_field(event_call, #type, name, \
  954. offsetof(typeof(field), item), \
  955. sizeof(field.item), is_signed, \
  956. FILTER_OTHER); \
  957. if (ret) \
  958. return ret; \
  959. } while (0)
  960. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  961. {
  962. int ret, i;
  963. struct kprobe_trace_entry field;
  964. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  965. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  966. DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
  967. /* Set argument names as fields */
  968. for (i = 0; i < tp->nr_args; i++)
  969. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  970. return 0;
  971. }
  972. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  973. {
  974. int ret, i;
  975. struct kretprobe_trace_entry field;
  976. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  977. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  978. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  979. DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
  980. /* Set argument names as fields */
  981. for (i = 0; i < tp->nr_args; i++)
  982. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  983. return 0;
  984. }
  985. static int __probe_event_show_format(struct trace_seq *s,
  986. struct trace_probe *tp, const char *fmt,
  987. const char *arg)
  988. {
  989. int i;
  990. /* Show format */
  991. if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
  992. return 0;
  993. for (i = 0; i < tp->nr_args; i++)
  994. if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
  995. return 0;
  996. if (!trace_seq_printf(s, "\", %s", arg))
  997. return 0;
  998. for (i = 0; i < tp->nr_args; i++)
  999. if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
  1000. return 0;
  1001. return trace_seq_puts(s, "\n");
  1002. }
  1003. #undef SHOW_FIELD
  1004. #define SHOW_FIELD(type, item, name) \
  1005. do { \
  1006. ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
  1007. "offset:%u;\tsize:%u;\n", name, \
  1008. (unsigned int)offsetof(typeof(field), item),\
  1009. (unsigned int)sizeof(type)); \
  1010. if (!ret) \
  1011. return 0; \
  1012. } while (0)
  1013. static int kprobe_event_show_format(struct ftrace_event_call *call,
  1014. struct trace_seq *s)
  1015. {
  1016. struct kprobe_trace_entry field __attribute__((unused));
  1017. int ret, i;
  1018. struct trace_probe *tp = (struct trace_probe *)call->data;
  1019. SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
  1020. SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
  1021. /* Show fields */
  1022. for (i = 0; i < tp->nr_args; i++)
  1023. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  1024. trace_seq_puts(s, "\n");
  1025. return __probe_event_show_format(s, tp, "(%lx)",
  1026. "REC->" FIELD_STRING_IP);
  1027. }
  1028. static int kretprobe_event_show_format(struct ftrace_event_call *call,
  1029. struct trace_seq *s)
  1030. {
  1031. struct kretprobe_trace_entry field __attribute__((unused));
  1032. int ret, i;
  1033. struct trace_probe *tp = (struct trace_probe *)call->data;
  1034. SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
  1035. SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
  1036. SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
  1037. /* Show fields */
  1038. for (i = 0; i < tp->nr_args; i++)
  1039. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  1040. trace_seq_puts(s, "\n");
  1041. return __probe_event_show_format(s, tp, "(%lx <- %lx)",
  1042. "REC->" FIELD_STRING_FUNC
  1043. ", REC->" FIELD_STRING_RETIP);
  1044. }
  1045. #ifdef CONFIG_EVENT_PROFILE
  1046. /* Kprobe profile handler */
  1047. static __kprobes int kprobe_profile_func(struct kprobe *kp,
  1048. struct pt_regs *regs)
  1049. {
  1050. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1051. struct ftrace_event_call *call = &tp->call;
  1052. struct kprobe_trace_entry *entry;
  1053. struct trace_entry *ent;
  1054. int size, __size, i, pc, __cpu;
  1055. unsigned long irq_flags;
  1056. char *trace_buf;
  1057. char *raw_data;
  1058. int rctx;
  1059. pc = preempt_count();
  1060. __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  1061. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1062. size -= sizeof(u32);
  1063. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1064. "profile buffer not large enough"))
  1065. return 0;
  1066. /*
  1067. * Protect the non nmi buffer
  1068. * This also protects the rcu read side
  1069. */
  1070. local_irq_save(irq_flags);
  1071. rctx = perf_swevent_get_recursion_context();
  1072. if (rctx < 0)
  1073. goto end_recursion;
  1074. __cpu = smp_processor_id();
  1075. if (in_nmi())
  1076. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  1077. else
  1078. trace_buf = rcu_dereference(perf_trace_buf);
  1079. if (!trace_buf)
  1080. goto end;
  1081. raw_data = per_cpu_ptr(trace_buf, __cpu);
  1082. /* Zero dead bytes from alignment to avoid buffer leak to userspace */
  1083. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1084. entry = (struct kprobe_trace_entry *)raw_data;
  1085. ent = &entry->ent;
  1086. tracing_generic_entry_update(ent, irq_flags, pc);
  1087. ent->type = call->id;
  1088. entry->nargs = tp->nr_args;
  1089. entry->ip = (unsigned long)kp->addr;
  1090. for (i = 0; i < tp->nr_args; i++)
  1091. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1092. perf_tp_event(call->id, entry->ip, 1, entry, size);
  1093. end:
  1094. perf_swevent_put_recursion_context(rctx);
  1095. end_recursion:
  1096. local_irq_restore(irq_flags);
  1097. return 0;
  1098. }
  1099. /* Kretprobe profile handler */
  1100. static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
  1101. struct pt_regs *regs)
  1102. {
  1103. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1104. struct ftrace_event_call *call = &tp->call;
  1105. struct kretprobe_trace_entry *entry;
  1106. struct trace_entry *ent;
  1107. int size, __size, i, pc, __cpu;
  1108. unsigned long irq_flags;
  1109. char *trace_buf;
  1110. char *raw_data;
  1111. int rctx;
  1112. pc = preempt_count();
  1113. __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  1114. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1115. size -= sizeof(u32);
  1116. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1117. "profile buffer not large enough"))
  1118. return 0;
  1119. /*
  1120. * Protect the non nmi buffer
  1121. * This also protects the rcu read side
  1122. */
  1123. local_irq_save(irq_flags);
  1124. rctx = perf_swevent_get_recursion_context();
  1125. if (rctx < 0)
  1126. goto end_recursion;
  1127. __cpu = smp_processor_id();
  1128. if (in_nmi())
  1129. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  1130. else
  1131. trace_buf = rcu_dereference(perf_trace_buf);
  1132. if (!trace_buf)
  1133. goto end;
  1134. raw_data = per_cpu_ptr(trace_buf, __cpu);
  1135. /* Zero dead bytes from alignment to avoid buffer leak to userspace */
  1136. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1137. entry = (struct kretprobe_trace_entry *)raw_data;
  1138. ent = &entry->ent;
  1139. tracing_generic_entry_update(ent, irq_flags, pc);
  1140. ent->type = call->id;
  1141. entry->nargs = tp->nr_args;
  1142. entry->func = (unsigned long)tp->rp.kp.addr;
  1143. entry->ret_ip = (unsigned long)ri->ret_addr;
  1144. for (i = 0; i < tp->nr_args; i++)
  1145. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1146. perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
  1147. end:
  1148. perf_swevent_put_recursion_context(rctx);
  1149. end_recursion:
  1150. local_irq_restore(irq_flags);
  1151. return 0;
  1152. }
  1153. static int probe_profile_enable(struct ftrace_event_call *call)
  1154. {
  1155. struct trace_probe *tp = (struct trace_probe *)call->data;
  1156. tp->flags |= TP_FLAG_PROFILE;
  1157. if (probe_is_return(tp))
  1158. return enable_kretprobe(&tp->rp);
  1159. else
  1160. return enable_kprobe(&tp->rp.kp);
  1161. }
  1162. static void probe_profile_disable(struct ftrace_event_call *call)
  1163. {
  1164. struct trace_probe *tp = (struct trace_probe *)call->data;
  1165. tp->flags &= ~TP_FLAG_PROFILE;
  1166. if (!(tp->flags & TP_FLAG_TRACE)) {
  1167. if (probe_is_return(tp))
  1168. disable_kretprobe(&tp->rp);
  1169. else
  1170. disable_kprobe(&tp->rp.kp);
  1171. }
  1172. }
  1173. #endif /* CONFIG_EVENT_PROFILE */
  1174. static __kprobes
  1175. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1176. {
  1177. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1178. if (tp->flags & TP_FLAG_TRACE)
  1179. kprobe_trace_func(kp, regs);
  1180. #ifdef CONFIG_EVENT_PROFILE
  1181. if (tp->flags & TP_FLAG_PROFILE)
  1182. kprobe_profile_func(kp, regs);
  1183. #endif /* CONFIG_EVENT_PROFILE */
  1184. return 0; /* We don't tweek kernel, so just return 0 */
  1185. }
  1186. static __kprobes
  1187. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1188. {
  1189. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1190. if (tp->flags & TP_FLAG_TRACE)
  1191. kretprobe_trace_func(ri, regs);
  1192. #ifdef CONFIG_EVENT_PROFILE
  1193. if (tp->flags & TP_FLAG_PROFILE)
  1194. kretprobe_profile_func(ri, regs);
  1195. #endif /* CONFIG_EVENT_PROFILE */
  1196. return 0; /* We don't tweek kernel, so just return 0 */
  1197. }
  1198. static int register_probe_event(struct trace_probe *tp)
  1199. {
  1200. struct ftrace_event_call *call = &tp->call;
  1201. int ret;
  1202. /* Initialize ftrace_event_call */
  1203. if (probe_is_return(tp)) {
  1204. tp->event.trace = print_kretprobe_event;
  1205. call->raw_init = probe_event_raw_init;
  1206. call->show_format = kretprobe_event_show_format;
  1207. call->define_fields = kretprobe_event_define_fields;
  1208. } else {
  1209. tp->event.trace = print_kprobe_event;
  1210. call->raw_init = probe_event_raw_init;
  1211. call->show_format = kprobe_event_show_format;
  1212. call->define_fields = kprobe_event_define_fields;
  1213. }
  1214. call->event = &tp->event;
  1215. call->id = register_ftrace_event(&tp->event);
  1216. if (!call->id)
  1217. return -ENODEV;
  1218. call->enabled = 0;
  1219. call->regfunc = probe_event_enable;
  1220. call->unregfunc = probe_event_disable;
  1221. #ifdef CONFIG_EVENT_PROFILE
  1222. atomic_set(&call->profile_count, -1);
  1223. call->profile_enable = probe_profile_enable;
  1224. call->profile_disable = probe_profile_disable;
  1225. #endif
  1226. call->data = tp;
  1227. ret = trace_add_event_call(call);
  1228. if (ret) {
  1229. pr_info("Failed to register kprobe event: %s\n", call->name);
  1230. unregister_ftrace_event(&tp->event);
  1231. }
  1232. return ret;
  1233. }
  1234. static void unregister_probe_event(struct trace_probe *tp)
  1235. {
  1236. /* tp->event is unregistered in trace_remove_event_call() */
  1237. trace_remove_event_call(&tp->call);
  1238. }
  1239. /* Make a debugfs interface for controling probe points */
  1240. static __init int init_kprobe_trace(void)
  1241. {
  1242. struct dentry *d_tracer;
  1243. struct dentry *entry;
  1244. d_tracer = tracing_init_dentry();
  1245. if (!d_tracer)
  1246. return 0;
  1247. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1248. NULL, &kprobe_events_ops);
  1249. /* Event list interface */
  1250. if (!entry)
  1251. pr_warning("Could not create debugfs "
  1252. "'kprobe_events' entry\n");
  1253. /* Profile interface */
  1254. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1255. NULL, &kprobe_profile_ops);
  1256. if (!entry)
  1257. pr_warning("Could not create debugfs "
  1258. "'kprobe_profile' entry\n");
  1259. return 0;
  1260. }
  1261. fs_initcall(init_kprobe_trace);
  1262. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1263. static int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1264. int a4, int a5, int a6)
  1265. {
  1266. return a1 + a2 + a3 + a4 + a5 + a6;
  1267. }
  1268. static __init int kprobe_trace_self_tests_init(void)
  1269. {
  1270. int ret;
  1271. int (*target)(int, int, int, int, int, int);
  1272. target = kprobe_trace_selftest_target;
  1273. pr_info("Testing kprobe tracing: ");
  1274. ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
  1275. "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
  1276. if (WARN_ON_ONCE(ret))
  1277. pr_warning("error enabling function entry\n");
  1278. ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
  1279. "$retval");
  1280. if (WARN_ON_ONCE(ret))
  1281. pr_warning("error enabling function return\n");
  1282. ret = target(1, 2, 3, 4, 5, 6);
  1283. cleanup_all_probes();
  1284. pr_cont("OK\n");
  1285. return 0;
  1286. }
  1287. late_initcall(kprobe_trace_self_tests_init);
  1288. #endif