trace_kprobe.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include "trace_probe.h"
  22. #define KPROBE_EVENT_SYSTEM "kprobes"
  23. /**
  24. * Kprobe event core functions
  25. */
  26. struct trace_probe {
  27. struct list_head list;
  28. struct kretprobe rp; /* Use rp.kp for kprobe use */
  29. unsigned long nhit;
  30. unsigned int flags; /* For TP_FLAG_* */
  31. const char *symbol; /* symbol name */
  32. struct ftrace_event_class class;
  33. struct ftrace_event_call call;
  34. struct list_head files;
  35. ssize_t size; /* trace entry size */
  36. unsigned int nr_args;
  37. struct probe_arg args[];
  38. };
  39. struct event_file_link {
  40. struct ftrace_event_file *file;
  41. struct list_head list;
  42. };
  43. #define SIZEOF_TRACE_PROBE(n) \
  44. (offsetof(struct trace_probe, args) + \
  45. (sizeof(struct probe_arg) * (n)))
  46. static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
  47. {
  48. return tp->rp.handler != NULL;
  49. }
  50. static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
  51. {
  52. return tp->symbol ? tp->symbol : "unknown";
  53. }
  54. static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
  55. {
  56. return tp->rp.kp.offset;
  57. }
  58. static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
  59. {
  60. return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
  61. }
  62. static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
  63. {
  64. return !!(tp->flags & TP_FLAG_REGISTERED);
  65. }
  66. static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
  67. {
  68. return !!(kprobe_gone(&tp->rp.kp));
  69. }
  70. static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
  71. struct module *mod)
  72. {
  73. int len = strlen(mod->name);
  74. const char *name = trace_probe_symbol(tp);
  75. return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  76. }
  77. static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
  78. {
  79. return !!strchr(trace_probe_symbol(tp), ':');
  80. }
  81. static int register_probe_event(struct trace_probe *tp);
  82. static void unregister_probe_event(struct trace_probe *tp);
  83. static DEFINE_MUTEX(probe_lock);
  84. static LIST_HEAD(probe_list);
  85. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  86. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  87. struct pt_regs *regs);
  88. /*
  89. * Allocate new trace_probe and initialize it (including kprobes).
  90. */
  91. static struct trace_probe *alloc_trace_probe(const char *group,
  92. const char *event,
  93. void *addr,
  94. const char *symbol,
  95. unsigned long offs,
  96. int nargs, bool is_return)
  97. {
  98. struct trace_probe *tp;
  99. int ret = -ENOMEM;
  100. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  101. if (!tp)
  102. return ERR_PTR(ret);
  103. if (symbol) {
  104. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  105. if (!tp->symbol)
  106. goto error;
  107. tp->rp.kp.symbol_name = tp->symbol;
  108. tp->rp.kp.offset = offs;
  109. } else
  110. tp->rp.kp.addr = addr;
  111. if (is_return)
  112. tp->rp.handler = kretprobe_dispatcher;
  113. else
  114. tp->rp.kp.pre_handler = kprobe_dispatcher;
  115. if (!event || !is_good_name(event)) {
  116. ret = -EINVAL;
  117. goto error;
  118. }
  119. tp->call.class = &tp->class;
  120. tp->call.name = kstrdup(event, GFP_KERNEL);
  121. if (!tp->call.name)
  122. goto error;
  123. if (!group || !is_good_name(group)) {
  124. ret = -EINVAL;
  125. goto error;
  126. }
  127. tp->class.system = kstrdup(group, GFP_KERNEL);
  128. if (!tp->class.system)
  129. goto error;
  130. INIT_LIST_HEAD(&tp->list);
  131. INIT_LIST_HEAD(&tp->files);
  132. return tp;
  133. error:
  134. kfree(tp->call.name);
  135. kfree(tp->symbol);
  136. kfree(tp);
  137. return ERR_PTR(ret);
  138. }
  139. static void free_trace_probe(struct trace_probe *tp)
  140. {
  141. int i;
  142. for (i = 0; i < tp->nr_args; i++)
  143. traceprobe_free_probe_arg(&tp->args[i]);
  144. kfree(tp->call.class->system);
  145. kfree(tp->call.name);
  146. kfree(tp->symbol);
  147. kfree(tp);
  148. }
  149. static struct trace_probe *find_trace_probe(const char *event,
  150. const char *group)
  151. {
  152. struct trace_probe *tp;
  153. list_for_each_entry(tp, &probe_list, list)
  154. if (strcmp(tp->call.name, event) == 0 &&
  155. strcmp(tp->call.class->system, group) == 0)
  156. return tp;
  157. return NULL;
  158. }
  159. /*
  160. * Enable trace_probe
  161. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  162. */
  163. static int
  164. enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  165. {
  166. int ret = 0;
  167. if (file) {
  168. struct event_file_link *link;
  169. link = kmalloc(sizeof(*link), GFP_KERNEL);
  170. if (!link) {
  171. ret = -ENOMEM;
  172. goto out;
  173. }
  174. link->file = file;
  175. list_add_tail_rcu(&link->list, &tp->files);
  176. tp->flags |= TP_FLAG_TRACE;
  177. } else
  178. tp->flags |= TP_FLAG_PROFILE;
  179. if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) {
  180. if (trace_probe_is_return(tp))
  181. ret = enable_kretprobe(&tp->rp);
  182. else
  183. ret = enable_kprobe(&tp->rp.kp);
  184. }
  185. out:
  186. return ret;
  187. }
  188. static struct event_file_link *
  189. find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
  190. {
  191. struct event_file_link *link;
  192. list_for_each_entry(link, &tp->files, list)
  193. if (link->file == file)
  194. return link;
  195. return NULL;
  196. }
  197. /*
  198. * Disable trace_probe
  199. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  200. */
  201. static int
  202. disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  203. {
  204. int ret = 0;
  205. if (file) {
  206. struct event_file_link *link;
  207. link = find_event_file_link(tp, file);
  208. if (!link) {
  209. ret = -EINVAL;
  210. goto out;
  211. }
  212. list_del_rcu(&link->list);
  213. /* synchronize with kprobe_trace_func/kretprobe_trace_func */
  214. synchronize_sched();
  215. kfree(link);
  216. if (!list_empty(&tp->files))
  217. goto out;
  218. tp->flags &= ~TP_FLAG_TRACE;
  219. } else
  220. tp->flags &= ~TP_FLAG_PROFILE;
  221. if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
  222. if (trace_probe_is_return(tp))
  223. disable_kretprobe(&tp->rp);
  224. else
  225. disable_kprobe(&tp->rp.kp);
  226. }
  227. out:
  228. return ret;
  229. }
  230. /* Internal register function - just handle k*probes and flags */
  231. static int __register_trace_probe(struct trace_probe *tp)
  232. {
  233. int i, ret;
  234. if (trace_probe_is_registered(tp))
  235. return -EINVAL;
  236. for (i = 0; i < tp->nr_args; i++)
  237. traceprobe_update_arg(&tp->args[i]);
  238. /* Set/clear disabled flag according to tp->flag */
  239. if (trace_probe_is_enabled(tp))
  240. tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
  241. else
  242. tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  243. if (trace_probe_is_return(tp))
  244. ret = register_kretprobe(&tp->rp);
  245. else
  246. ret = register_kprobe(&tp->rp.kp);
  247. if (ret == 0)
  248. tp->flags |= TP_FLAG_REGISTERED;
  249. else {
  250. pr_warning("Could not insert probe at %s+%lu: %d\n",
  251. trace_probe_symbol(tp), trace_probe_offset(tp), ret);
  252. if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
  253. pr_warning("This probe might be able to register after"
  254. "target module is loaded. Continue.\n");
  255. ret = 0;
  256. } else if (ret == -EILSEQ) {
  257. pr_warning("Probing address(0x%p) is not an "
  258. "instruction boundary.\n",
  259. tp->rp.kp.addr);
  260. ret = -EINVAL;
  261. }
  262. }
  263. return ret;
  264. }
  265. /* Internal unregister function - just handle k*probes and flags */
  266. static void __unregister_trace_probe(struct trace_probe *tp)
  267. {
  268. if (trace_probe_is_registered(tp)) {
  269. if (trace_probe_is_return(tp))
  270. unregister_kretprobe(&tp->rp);
  271. else
  272. unregister_kprobe(&tp->rp.kp);
  273. tp->flags &= ~TP_FLAG_REGISTERED;
  274. /* Cleanup kprobe for reuse */
  275. if (tp->rp.kp.symbol_name)
  276. tp->rp.kp.addr = NULL;
  277. }
  278. }
  279. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  280. static int unregister_trace_probe(struct trace_probe *tp)
  281. {
  282. /* Enabled event can not be unregistered */
  283. if (trace_probe_is_enabled(tp))
  284. return -EBUSY;
  285. __unregister_trace_probe(tp);
  286. list_del(&tp->list);
  287. unregister_probe_event(tp);
  288. return 0;
  289. }
  290. /* Register a trace_probe and probe_event */
  291. static int register_trace_probe(struct trace_probe *tp)
  292. {
  293. struct trace_probe *old_tp;
  294. int ret;
  295. mutex_lock(&probe_lock);
  296. /* Delete old (same name) event if exist */
  297. old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
  298. if (old_tp) {
  299. ret = unregister_trace_probe(old_tp);
  300. if (ret < 0)
  301. goto end;
  302. free_trace_probe(old_tp);
  303. }
  304. /* Register new event */
  305. ret = register_probe_event(tp);
  306. if (ret) {
  307. pr_warning("Failed to register probe event(%d)\n", ret);
  308. goto end;
  309. }
  310. /* Register k*probe */
  311. ret = __register_trace_probe(tp);
  312. if (ret < 0)
  313. unregister_probe_event(tp);
  314. else
  315. list_add_tail(&tp->list, &probe_list);
  316. end:
  317. mutex_unlock(&probe_lock);
  318. return ret;
  319. }
  320. /* Module notifier call back, checking event on the module */
  321. static int trace_probe_module_callback(struct notifier_block *nb,
  322. unsigned long val, void *data)
  323. {
  324. struct module *mod = data;
  325. struct trace_probe *tp;
  326. int ret;
  327. if (val != MODULE_STATE_COMING)
  328. return NOTIFY_DONE;
  329. /* Update probes on coming module */
  330. mutex_lock(&probe_lock);
  331. list_for_each_entry(tp, &probe_list, list) {
  332. if (trace_probe_within_module(tp, mod)) {
  333. /* Don't need to check busy - this should have gone. */
  334. __unregister_trace_probe(tp);
  335. ret = __register_trace_probe(tp);
  336. if (ret)
  337. pr_warning("Failed to re-register probe %s on"
  338. "%s: %d\n",
  339. tp->call.name, mod->name, ret);
  340. }
  341. }
  342. mutex_unlock(&probe_lock);
  343. return NOTIFY_DONE;
  344. }
  345. static struct notifier_block trace_probe_module_nb = {
  346. .notifier_call = trace_probe_module_callback,
  347. .priority = 1 /* Invoked after kprobe module callback */
  348. };
  349. static int create_trace_probe(int argc, char **argv)
  350. {
  351. /*
  352. * Argument syntax:
  353. * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
  354. * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
  355. * Fetch args:
  356. * $retval : fetch return value
  357. * $stack : fetch stack address
  358. * $stackN : fetch Nth of stack (N:0-)
  359. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  360. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  361. * %REG : fetch register REG
  362. * Dereferencing memory fetch:
  363. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  364. * Alias name of args:
  365. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  366. * Type of args:
  367. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  368. */
  369. struct trace_probe *tp;
  370. int i, ret = 0;
  371. bool is_return = false, is_delete = false;
  372. char *symbol = NULL, *event = NULL, *group = NULL;
  373. char *arg;
  374. unsigned long offset = 0;
  375. void *addr = NULL;
  376. char buf[MAX_EVENT_NAME_LEN];
  377. /* argc must be >= 1 */
  378. if (argv[0][0] == 'p')
  379. is_return = false;
  380. else if (argv[0][0] == 'r')
  381. is_return = true;
  382. else if (argv[0][0] == '-')
  383. is_delete = true;
  384. else {
  385. pr_info("Probe definition must be started with 'p', 'r' or"
  386. " '-'.\n");
  387. return -EINVAL;
  388. }
  389. if (argv[0][1] == ':') {
  390. event = &argv[0][2];
  391. if (strchr(event, '/')) {
  392. group = event;
  393. event = strchr(group, '/') + 1;
  394. event[-1] = '\0';
  395. if (strlen(group) == 0) {
  396. pr_info("Group name is not specified\n");
  397. return -EINVAL;
  398. }
  399. }
  400. if (strlen(event) == 0) {
  401. pr_info("Event name is not specified\n");
  402. return -EINVAL;
  403. }
  404. }
  405. if (!group)
  406. group = KPROBE_EVENT_SYSTEM;
  407. if (is_delete) {
  408. if (!event) {
  409. pr_info("Delete command needs an event name.\n");
  410. return -EINVAL;
  411. }
  412. mutex_lock(&probe_lock);
  413. tp = find_trace_probe(event, group);
  414. if (!tp) {
  415. mutex_unlock(&probe_lock);
  416. pr_info("Event %s/%s doesn't exist.\n", group, event);
  417. return -ENOENT;
  418. }
  419. /* delete an event */
  420. ret = unregister_trace_probe(tp);
  421. if (ret == 0)
  422. free_trace_probe(tp);
  423. mutex_unlock(&probe_lock);
  424. return ret;
  425. }
  426. if (argc < 2) {
  427. pr_info("Probe point is not specified.\n");
  428. return -EINVAL;
  429. }
  430. if (isdigit(argv[1][0])) {
  431. if (is_return) {
  432. pr_info("Return probe point must be a symbol.\n");
  433. return -EINVAL;
  434. }
  435. /* an address specified */
  436. ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
  437. if (ret) {
  438. pr_info("Failed to parse address.\n");
  439. return ret;
  440. }
  441. } else {
  442. /* a symbol specified */
  443. symbol = argv[1];
  444. /* TODO: support .init module functions */
  445. ret = traceprobe_split_symbol_offset(symbol, &offset);
  446. if (ret) {
  447. pr_info("Failed to parse symbol.\n");
  448. return ret;
  449. }
  450. if (offset && is_return) {
  451. pr_info("Return probe must be used without offset.\n");
  452. return -EINVAL;
  453. }
  454. }
  455. argc -= 2; argv += 2;
  456. /* setup a probe */
  457. if (!event) {
  458. /* Make a new event name */
  459. if (symbol)
  460. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  461. is_return ? 'r' : 'p', symbol, offset);
  462. else
  463. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  464. is_return ? 'r' : 'p', addr);
  465. event = buf;
  466. }
  467. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  468. is_return);
  469. if (IS_ERR(tp)) {
  470. pr_info("Failed to allocate trace_probe.(%d)\n",
  471. (int)PTR_ERR(tp));
  472. return PTR_ERR(tp);
  473. }
  474. /* parse arguments */
  475. ret = 0;
  476. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  477. /* Increment count for freeing args in error case */
  478. tp->nr_args++;
  479. /* Parse argument name */
  480. arg = strchr(argv[i], '=');
  481. if (arg) {
  482. *arg++ = '\0';
  483. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  484. } else {
  485. arg = argv[i];
  486. /* If argument name is omitted, set "argN" */
  487. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  488. tp->args[i].name = kstrdup(buf, GFP_KERNEL);
  489. }
  490. if (!tp->args[i].name) {
  491. pr_info("Failed to allocate argument[%d] name.\n", i);
  492. ret = -ENOMEM;
  493. goto error;
  494. }
  495. if (!is_good_name(tp->args[i].name)) {
  496. pr_info("Invalid argument[%d] name: %s\n",
  497. i, tp->args[i].name);
  498. ret = -EINVAL;
  499. goto error;
  500. }
  501. if (traceprobe_conflict_field_name(tp->args[i].name,
  502. tp->args, i)) {
  503. pr_info("Argument[%d] name '%s' conflicts with "
  504. "another field.\n", i, argv[i]);
  505. ret = -EINVAL;
  506. goto error;
  507. }
  508. /* Parse fetch argument */
  509. ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
  510. is_return, true);
  511. if (ret) {
  512. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  513. goto error;
  514. }
  515. }
  516. ret = register_trace_probe(tp);
  517. if (ret)
  518. goto error;
  519. return 0;
  520. error:
  521. free_trace_probe(tp);
  522. return ret;
  523. }
  524. static int release_all_trace_probes(void)
  525. {
  526. struct trace_probe *tp;
  527. int ret = 0;
  528. mutex_lock(&probe_lock);
  529. /* Ensure no probe is in use. */
  530. list_for_each_entry(tp, &probe_list, list)
  531. if (trace_probe_is_enabled(tp)) {
  532. ret = -EBUSY;
  533. goto end;
  534. }
  535. /* TODO: Use batch unregistration */
  536. while (!list_empty(&probe_list)) {
  537. tp = list_entry(probe_list.next, struct trace_probe, list);
  538. unregister_trace_probe(tp);
  539. free_trace_probe(tp);
  540. }
  541. end:
  542. mutex_unlock(&probe_lock);
  543. return ret;
  544. }
  545. /* Probes listing interfaces */
  546. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  547. {
  548. mutex_lock(&probe_lock);
  549. return seq_list_start(&probe_list, *pos);
  550. }
  551. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  552. {
  553. return seq_list_next(v, &probe_list, pos);
  554. }
  555. static void probes_seq_stop(struct seq_file *m, void *v)
  556. {
  557. mutex_unlock(&probe_lock);
  558. }
  559. static int probes_seq_show(struct seq_file *m, void *v)
  560. {
  561. struct trace_probe *tp = v;
  562. int i;
  563. seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
  564. seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
  565. if (!tp->symbol)
  566. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  567. else if (tp->rp.kp.offset)
  568. seq_printf(m, " %s+%u", trace_probe_symbol(tp),
  569. tp->rp.kp.offset);
  570. else
  571. seq_printf(m, " %s", trace_probe_symbol(tp));
  572. for (i = 0; i < tp->nr_args; i++)
  573. seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
  574. seq_printf(m, "\n");
  575. return 0;
  576. }
  577. static const struct seq_operations probes_seq_op = {
  578. .start = probes_seq_start,
  579. .next = probes_seq_next,
  580. .stop = probes_seq_stop,
  581. .show = probes_seq_show
  582. };
  583. static int probes_open(struct inode *inode, struct file *file)
  584. {
  585. int ret;
  586. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  587. ret = release_all_trace_probes();
  588. if (ret < 0)
  589. return ret;
  590. }
  591. return seq_open(file, &probes_seq_op);
  592. }
  593. static ssize_t probes_write(struct file *file, const char __user *buffer,
  594. size_t count, loff_t *ppos)
  595. {
  596. return traceprobe_probes_write(file, buffer, count, ppos,
  597. create_trace_probe);
  598. }
  599. static const struct file_operations kprobe_events_ops = {
  600. .owner = THIS_MODULE,
  601. .open = probes_open,
  602. .read = seq_read,
  603. .llseek = seq_lseek,
  604. .release = seq_release,
  605. .write = probes_write,
  606. };
  607. /* Probes profiling interfaces */
  608. static int probes_profile_seq_show(struct seq_file *m, void *v)
  609. {
  610. struct trace_probe *tp = v;
  611. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  612. tp->rp.kp.nmissed);
  613. return 0;
  614. }
  615. static const struct seq_operations profile_seq_op = {
  616. .start = probes_seq_start,
  617. .next = probes_seq_next,
  618. .stop = probes_seq_stop,
  619. .show = probes_profile_seq_show
  620. };
  621. static int profile_open(struct inode *inode, struct file *file)
  622. {
  623. return seq_open(file, &profile_seq_op);
  624. }
  625. static const struct file_operations kprobe_profile_ops = {
  626. .owner = THIS_MODULE,
  627. .open = profile_open,
  628. .read = seq_read,
  629. .llseek = seq_lseek,
  630. .release = seq_release,
  631. };
  632. /* Sum up total data length for dynamic arraies (strings) */
  633. static __kprobes int __get_data_size(struct trace_probe *tp,
  634. struct pt_regs *regs)
  635. {
  636. int i, ret = 0;
  637. u32 len;
  638. for (i = 0; i < tp->nr_args; i++)
  639. if (unlikely(tp->args[i].fetch_size.fn)) {
  640. call_fetch(&tp->args[i].fetch_size, regs, &len);
  641. ret += len;
  642. }
  643. return ret;
  644. }
  645. /* Store the value of each argument */
  646. static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
  647. struct pt_regs *regs,
  648. u8 *data, int maxlen)
  649. {
  650. int i;
  651. u32 end = tp->size;
  652. u32 *dl; /* Data (relative) location */
  653. for (i = 0; i < tp->nr_args; i++) {
  654. if (unlikely(tp->args[i].fetch_size.fn)) {
  655. /*
  656. * First, we set the relative location and
  657. * maximum data length to *dl
  658. */
  659. dl = (u32 *)(data + tp->args[i].offset);
  660. *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
  661. /* Then try to fetch string or dynamic array data */
  662. call_fetch(&tp->args[i].fetch, regs, dl);
  663. /* Reduce maximum length */
  664. end += get_rloc_len(*dl);
  665. maxlen -= get_rloc_len(*dl);
  666. /* Trick here, convert data_rloc to data_loc */
  667. *dl = convert_rloc_to_loc(*dl,
  668. ent_size + tp->args[i].offset);
  669. } else
  670. /* Just fetching data normally */
  671. call_fetch(&tp->args[i].fetch, regs,
  672. data + tp->args[i].offset);
  673. }
  674. }
  675. /* Kprobe handler */
  676. static __kprobes void
  677. __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
  678. struct ftrace_event_file *ftrace_file)
  679. {
  680. struct kprobe_trace_entry_head *entry;
  681. struct ring_buffer_event *event;
  682. struct ring_buffer *buffer;
  683. int size, dsize, pc;
  684. unsigned long irq_flags;
  685. struct ftrace_event_call *call = &tp->call;
  686. WARN_ON(call != ftrace_file->event_call);
  687. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  688. return;
  689. local_save_flags(irq_flags);
  690. pc = preempt_count();
  691. dsize = __get_data_size(tp, regs);
  692. size = sizeof(*entry) + tp->size + dsize;
  693. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  694. call->event.type,
  695. size, irq_flags, pc);
  696. if (!event)
  697. return;
  698. entry = ring_buffer_event_data(event);
  699. entry->ip = (unsigned long)tp->rp.kp.addr;
  700. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  701. if (!filter_current_check_discard(buffer, call, entry, event))
  702. trace_buffer_unlock_commit_regs(buffer, event,
  703. irq_flags, pc, regs);
  704. }
  705. static __kprobes void
  706. kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
  707. {
  708. struct event_file_link *link;
  709. list_for_each_entry_rcu(link, &tp->files, list)
  710. __kprobe_trace_func(tp, regs, link->file);
  711. }
  712. /* Kretprobe handler */
  713. static __kprobes void
  714. __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  715. struct pt_regs *regs,
  716. struct ftrace_event_file *ftrace_file)
  717. {
  718. struct kretprobe_trace_entry_head *entry;
  719. struct ring_buffer_event *event;
  720. struct ring_buffer *buffer;
  721. int size, pc, dsize;
  722. unsigned long irq_flags;
  723. struct ftrace_event_call *call = &tp->call;
  724. WARN_ON(call != ftrace_file->event_call);
  725. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  726. return;
  727. local_save_flags(irq_flags);
  728. pc = preempt_count();
  729. dsize = __get_data_size(tp, regs);
  730. size = sizeof(*entry) + tp->size + dsize;
  731. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  732. call->event.type,
  733. size, irq_flags, pc);
  734. if (!event)
  735. return;
  736. entry = ring_buffer_event_data(event);
  737. entry->func = (unsigned long)tp->rp.kp.addr;
  738. entry->ret_ip = (unsigned long)ri->ret_addr;
  739. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  740. if (!filter_current_check_discard(buffer, call, entry, event))
  741. trace_buffer_unlock_commit_regs(buffer, event,
  742. irq_flags, pc, regs);
  743. }
  744. static __kprobes void
  745. kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  746. struct pt_regs *regs)
  747. {
  748. struct event_file_link *link;
  749. list_for_each_entry_rcu(link, &tp->files, list)
  750. __kretprobe_trace_func(tp, ri, regs, link->file);
  751. }
  752. /* Event entry printers */
  753. static enum print_line_t
  754. print_kprobe_event(struct trace_iterator *iter, int flags,
  755. struct trace_event *event)
  756. {
  757. struct kprobe_trace_entry_head *field;
  758. struct trace_seq *s = &iter->seq;
  759. struct trace_probe *tp;
  760. u8 *data;
  761. int i;
  762. field = (struct kprobe_trace_entry_head *)iter->ent;
  763. tp = container_of(event, struct trace_probe, call.event);
  764. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  765. goto partial;
  766. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  767. goto partial;
  768. if (!trace_seq_puts(s, ")"))
  769. goto partial;
  770. data = (u8 *)&field[1];
  771. for (i = 0; i < tp->nr_args; i++)
  772. if (!tp->args[i].type->print(s, tp->args[i].name,
  773. data + tp->args[i].offset, field))
  774. goto partial;
  775. if (!trace_seq_puts(s, "\n"))
  776. goto partial;
  777. return TRACE_TYPE_HANDLED;
  778. partial:
  779. return TRACE_TYPE_PARTIAL_LINE;
  780. }
  781. static enum print_line_t
  782. print_kretprobe_event(struct trace_iterator *iter, int flags,
  783. struct trace_event *event)
  784. {
  785. struct kretprobe_trace_entry_head *field;
  786. struct trace_seq *s = &iter->seq;
  787. struct trace_probe *tp;
  788. u8 *data;
  789. int i;
  790. field = (struct kretprobe_trace_entry_head *)iter->ent;
  791. tp = container_of(event, struct trace_probe, call.event);
  792. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  793. goto partial;
  794. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  795. goto partial;
  796. if (!trace_seq_puts(s, " <- "))
  797. goto partial;
  798. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  799. goto partial;
  800. if (!trace_seq_puts(s, ")"))
  801. goto partial;
  802. data = (u8 *)&field[1];
  803. for (i = 0; i < tp->nr_args; i++)
  804. if (!tp->args[i].type->print(s, tp->args[i].name,
  805. data + tp->args[i].offset, field))
  806. goto partial;
  807. if (!trace_seq_puts(s, "\n"))
  808. goto partial;
  809. return TRACE_TYPE_HANDLED;
  810. partial:
  811. return TRACE_TYPE_PARTIAL_LINE;
  812. }
  813. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  814. {
  815. int ret, i;
  816. struct kprobe_trace_entry_head field;
  817. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  818. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  819. /* Set argument names as fields */
  820. for (i = 0; i < tp->nr_args; i++) {
  821. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  822. tp->args[i].name,
  823. sizeof(field) + tp->args[i].offset,
  824. tp->args[i].type->size,
  825. tp->args[i].type->is_signed,
  826. FILTER_OTHER);
  827. if (ret)
  828. return ret;
  829. }
  830. return 0;
  831. }
  832. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  833. {
  834. int ret, i;
  835. struct kretprobe_trace_entry_head field;
  836. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  837. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  838. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  839. /* Set argument names as fields */
  840. for (i = 0; i < tp->nr_args; i++) {
  841. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  842. tp->args[i].name,
  843. sizeof(field) + tp->args[i].offset,
  844. tp->args[i].type->size,
  845. tp->args[i].type->is_signed,
  846. FILTER_OTHER);
  847. if (ret)
  848. return ret;
  849. }
  850. return 0;
  851. }
  852. static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
  853. {
  854. int i;
  855. int pos = 0;
  856. const char *fmt, *arg;
  857. if (!trace_probe_is_return(tp)) {
  858. fmt = "(%lx)";
  859. arg = "REC->" FIELD_STRING_IP;
  860. } else {
  861. fmt = "(%lx <- %lx)";
  862. arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
  863. }
  864. /* When len=0, we just calculate the needed length */
  865. #define LEN_OR_ZERO (len ? len - pos : 0)
  866. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
  867. for (i = 0; i < tp->nr_args; i++) {
  868. pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
  869. tp->args[i].name, tp->args[i].type->fmt);
  870. }
  871. pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
  872. for (i = 0; i < tp->nr_args; i++) {
  873. if (strcmp(tp->args[i].type->name, "string") == 0)
  874. pos += snprintf(buf + pos, LEN_OR_ZERO,
  875. ", __get_str(%s)",
  876. tp->args[i].name);
  877. else
  878. pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
  879. tp->args[i].name);
  880. }
  881. #undef LEN_OR_ZERO
  882. /* return the length of print_fmt */
  883. return pos;
  884. }
  885. static int set_print_fmt(struct trace_probe *tp)
  886. {
  887. int len;
  888. char *print_fmt;
  889. /* First: called with 0 length to calculate the needed length */
  890. len = __set_print_fmt(tp, NULL, 0);
  891. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  892. if (!print_fmt)
  893. return -ENOMEM;
  894. /* Second: actually write the @print_fmt */
  895. __set_print_fmt(tp, print_fmt, len + 1);
  896. tp->call.print_fmt = print_fmt;
  897. return 0;
  898. }
  899. #ifdef CONFIG_PERF_EVENTS
  900. /* Kprobe profile handler */
  901. static __kprobes void
  902. kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
  903. {
  904. struct ftrace_event_call *call = &tp->call;
  905. struct kprobe_trace_entry_head *entry;
  906. struct hlist_head *head;
  907. int size, __size, dsize;
  908. int rctx;
  909. head = this_cpu_ptr(call->perf_events);
  910. if (hlist_empty(head))
  911. return;
  912. dsize = __get_data_size(tp, regs);
  913. __size = sizeof(*entry) + tp->size + dsize;
  914. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  915. size -= sizeof(u32);
  916. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  917. "profile buffer not large enough"))
  918. return;
  919. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  920. if (!entry)
  921. return;
  922. entry->ip = (unsigned long)tp->rp.kp.addr;
  923. memset(&entry[1], 0, dsize);
  924. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  925. perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
  926. }
  927. /* Kretprobe profile handler */
  928. static __kprobes void
  929. kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  930. struct pt_regs *regs)
  931. {
  932. struct ftrace_event_call *call = &tp->call;
  933. struct kretprobe_trace_entry_head *entry;
  934. struct hlist_head *head;
  935. int size, __size, dsize;
  936. int rctx;
  937. head = this_cpu_ptr(call->perf_events);
  938. if (hlist_empty(head))
  939. return;
  940. dsize = __get_data_size(tp, regs);
  941. __size = sizeof(*entry) + tp->size + dsize;
  942. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  943. size -= sizeof(u32);
  944. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  945. "profile buffer not large enough"))
  946. return;
  947. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  948. if (!entry)
  949. return;
  950. entry->func = (unsigned long)tp->rp.kp.addr;
  951. entry->ret_ip = (unsigned long)ri->ret_addr;
  952. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  953. perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
  954. }
  955. #endif /* CONFIG_PERF_EVENTS */
  956. /*
  957. * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
  958. *
  959. * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
  960. * lockless, but we can't race with this __init function.
  961. */
  962. static __kprobes
  963. int kprobe_register(struct ftrace_event_call *event,
  964. enum trace_reg type, void *data)
  965. {
  966. struct trace_probe *tp = (struct trace_probe *)event->data;
  967. struct ftrace_event_file *file = data;
  968. switch (type) {
  969. case TRACE_REG_REGISTER:
  970. return enable_trace_probe(tp, file);
  971. case TRACE_REG_UNREGISTER:
  972. return disable_trace_probe(tp, file);
  973. #ifdef CONFIG_PERF_EVENTS
  974. case TRACE_REG_PERF_REGISTER:
  975. return enable_trace_probe(tp, NULL);
  976. case TRACE_REG_PERF_UNREGISTER:
  977. return disable_trace_probe(tp, NULL);
  978. case TRACE_REG_PERF_OPEN:
  979. case TRACE_REG_PERF_CLOSE:
  980. case TRACE_REG_PERF_ADD:
  981. case TRACE_REG_PERF_DEL:
  982. return 0;
  983. #endif
  984. }
  985. return 0;
  986. }
  987. static __kprobes
  988. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  989. {
  990. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  991. tp->nhit++;
  992. if (tp->flags & TP_FLAG_TRACE)
  993. kprobe_trace_func(tp, regs);
  994. #ifdef CONFIG_PERF_EVENTS
  995. if (tp->flags & TP_FLAG_PROFILE)
  996. kprobe_perf_func(tp, regs);
  997. #endif
  998. return 0; /* We don't tweek kernel, so just return 0 */
  999. }
  1000. static __kprobes
  1001. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1002. {
  1003. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1004. tp->nhit++;
  1005. if (tp->flags & TP_FLAG_TRACE)
  1006. kretprobe_trace_func(tp, ri, regs);
  1007. #ifdef CONFIG_PERF_EVENTS
  1008. if (tp->flags & TP_FLAG_PROFILE)
  1009. kretprobe_perf_func(tp, ri, regs);
  1010. #endif
  1011. return 0; /* We don't tweek kernel, so just return 0 */
  1012. }
  1013. static struct trace_event_functions kretprobe_funcs = {
  1014. .trace = print_kretprobe_event
  1015. };
  1016. static struct trace_event_functions kprobe_funcs = {
  1017. .trace = print_kprobe_event
  1018. };
  1019. static int register_probe_event(struct trace_probe *tp)
  1020. {
  1021. struct ftrace_event_call *call = &tp->call;
  1022. int ret;
  1023. /* Initialize ftrace_event_call */
  1024. INIT_LIST_HEAD(&call->class->fields);
  1025. if (trace_probe_is_return(tp)) {
  1026. call->event.funcs = &kretprobe_funcs;
  1027. call->class->define_fields = kretprobe_event_define_fields;
  1028. } else {
  1029. call->event.funcs = &kprobe_funcs;
  1030. call->class->define_fields = kprobe_event_define_fields;
  1031. }
  1032. if (set_print_fmt(tp) < 0)
  1033. return -ENOMEM;
  1034. ret = register_ftrace_event(&call->event);
  1035. if (!ret) {
  1036. kfree(call->print_fmt);
  1037. return -ENODEV;
  1038. }
  1039. call->flags = 0;
  1040. call->class->reg = kprobe_register;
  1041. call->data = tp;
  1042. ret = trace_add_event_call(call);
  1043. if (ret) {
  1044. pr_info("Failed to register kprobe event: %s\n", call->name);
  1045. kfree(call->print_fmt);
  1046. unregister_ftrace_event(&call->event);
  1047. }
  1048. return ret;
  1049. }
  1050. static void unregister_probe_event(struct trace_probe *tp)
  1051. {
  1052. /* tp->event is unregistered in trace_remove_event_call() */
  1053. trace_remove_event_call(&tp->call);
  1054. kfree(tp->call.print_fmt);
  1055. }
  1056. /* Make a debugfs interface for controlling probe points */
  1057. static __init int init_kprobe_trace(void)
  1058. {
  1059. struct dentry *d_tracer;
  1060. struct dentry *entry;
  1061. if (register_module_notifier(&trace_probe_module_nb))
  1062. return -EINVAL;
  1063. d_tracer = tracing_init_dentry();
  1064. if (!d_tracer)
  1065. return 0;
  1066. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1067. NULL, &kprobe_events_ops);
  1068. /* Event list interface */
  1069. if (!entry)
  1070. pr_warning("Could not create debugfs "
  1071. "'kprobe_events' entry\n");
  1072. /* Profile interface */
  1073. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1074. NULL, &kprobe_profile_ops);
  1075. if (!entry)
  1076. pr_warning("Could not create debugfs "
  1077. "'kprobe_profile' entry\n");
  1078. return 0;
  1079. }
  1080. fs_initcall(init_kprobe_trace);
  1081. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1082. /*
  1083. * The "__used" keeps gcc from removing the function symbol
  1084. * from the kallsyms table.
  1085. */
  1086. static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1087. int a4, int a5, int a6)
  1088. {
  1089. return a1 + a2 + a3 + a4 + a5 + a6;
  1090. }
  1091. static struct ftrace_event_file *
  1092. find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
  1093. {
  1094. struct ftrace_event_file *file;
  1095. list_for_each_entry(file, &tr->events, list)
  1096. if (file->event_call == &tp->call)
  1097. return file;
  1098. return NULL;
  1099. }
  1100. /*
  1101. * Nobody but us can call enable_trace_probe/disable_trace_probe at this
  1102. * stage, we can do this lockless.
  1103. */
  1104. static __init int kprobe_trace_self_tests_init(void)
  1105. {
  1106. int ret, warn = 0;
  1107. int (*target)(int, int, int, int, int, int);
  1108. struct trace_probe *tp;
  1109. struct ftrace_event_file *file;
  1110. target = kprobe_trace_selftest_target;
  1111. pr_info("Testing kprobe tracing: ");
  1112. ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
  1113. "$stack $stack0 +0($stack)",
  1114. create_trace_probe);
  1115. if (WARN_ON_ONCE(ret)) {
  1116. pr_warn("error on probing function entry.\n");
  1117. warn++;
  1118. } else {
  1119. /* Enable trace point */
  1120. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1121. if (WARN_ON_ONCE(tp == NULL)) {
  1122. pr_warn("error on getting new probe.\n");
  1123. warn++;
  1124. } else {
  1125. file = find_trace_probe_file(tp, top_trace_array());
  1126. if (WARN_ON_ONCE(file == NULL)) {
  1127. pr_warn("error on getting probe file.\n");
  1128. warn++;
  1129. } else
  1130. enable_trace_probe(tp, file);
  1131. }
  1132. }
  1133. ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
  1134. "$retval", create_trace_probe);
  1135. if (WARN_ON_ONCE(ret)) {
  1136. pr_warn("error on probing function return.\n");
  1137. warn++;
  1138. } else {
  1139. /* Enable trace point */
  1140. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1141. if (WARN_ON_ONCE(tp == NULL)) {
  1142. pr_warn("error on getting 2nd new probe.\n");
  1143. warn++;
  1144. } else {
  1145. file = find_trace_probe_file(tp, top_trace_array());
  1146. if (WARN_ON_ONCE(file == NULL)) {
  1147. pr_warn("error on getting probe file.\n");
  1148. warn++;
  1149. } else
  1150. enable_trace_probe(tp, file);
  1151. }
  1152. }
  1153. if (warn)
  1154. goto end;
  1155. ret = target(1, 2, 3, 4, 5, 6);
  1156. /* Disable trace points before removing it */
  1157. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1158. if (WARN_ON_ONCE(tp == NULL)) {
  1159. pr_warn("error on getting test probe.\n");
  1160. warn++;
  1161. } else {
  1162. file = find_trace_probe_file(tp, top_trace_array());
  1163. if (WARN_ON_ONCE(file == NULL)) {
  1164. pr_warn("error on getting probe file.\n");
  1165. warn++;
  1166. } else
  1167. disable_trace_probe(tp, file);
  1168. }
  1169. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1170. if (WARN_ON_ONCE(tp == NULL)) {
  1171. pr_warn("error on getting 2nd test probe.\n");
  1172. warn++;
  1173. } else {
  1174. file = find_trace_probe_file(tp, top_trace_array());
  1175. if (WARN_ON_ONCE(file == NULL)) {
  1176. pr_warn("error on getting probe file.\n");
  1177. warn++;
  1178. } else
  1179. disable_trace_probe(tp, file);
  1180. }
  1181. ret = traceprobe_command("-:testprobe", create_trace_probe);
  1182. if (WARN_ON_ONCE(ret)) {
  1183. pr_warn("error on deleting a probe.\n");
  1184. warn++;
  1185. }
  1186. ret = traceprobe_command("-:testprobe2", create_trace_probe);
  1187. if (WARN_ON_ONCE(ret)) {
  1188. pr_warn("error on deleting a probe.\n");
  1189. warn++;
  1190. }
  1191. end:
  1192. release_all_trace_probes();
  1193. if (warn)
  1194. pr_cont("NG: Some tests are failed. Please check them.\n");
  1195. else
  1196. pr_cont("OK\n");
  1197. return 0;
  1198. }
  1199. late_initcall(kprobe_trace_self_tests_init);
  1200. #endif