trace_kprobe.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include "trace_probe.h"
  22. #define KPROBE_EVENT_SYSTEM "kprobes"
  23. /**
  24. * Kprobe event core functions
  25. */
  26. struct trace_probe {
  27. struct list_head list;
  28. struct kretprobe rp; /* Use rp.kp for kprobe use */
  29. unsigned long nhit;
  30. unsigned int flags; /* For TP_FLAG_* */
  31. const char *symbol; /* symbol name */
  32. struct ftrace_event_class class;
  33. struct ftrace_event_call call;
  34. struct ftrace_event_file * __rcu *files;
  35. ssize_t size; /* trace entry size */
  36. unsigned int nr_args;
  37. struct probe_arg args[];
  38. };
  39. #define SIZEOF_TRACE_PROBE(n) \
  40. (offsetof(struct trace_probe, args) + \
  41. (sizeof(struct probe_arg) * (n)))
  42. static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
  43. {
  44. return tp->rp.handler != NULL;
  45. }
  46. static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
  47. {
  48. return tp->symbol ? tp->symbol : "unknown";
  49. }
  50. static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
  51. {
  52. return tp->rp.kp.offset;
  53. }
  54. static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
  55. {
  56. return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
  57. }
  58. static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
  59. {
  60. return !!(tp->flags & TP_FLAG_REGISTERED);
  61. }
  62. static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
  63. {
  64. return !!(kprobe_gone(&tp->rp.kp));
  65. }
  66. static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
  67. struct module *mod)
  68. {
  69. int len = strlen(mod->name);
  70. const char *name = trace_probe_symbol(tp);
  71. return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  72. }
  73. static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
  74. {
  75. return !!strchr(trace_probe_symbol(tp), ':');
  76. }
  77. static int register_probe_event(struct trace_probe *tp);
  78. static void unregister_probe_event(struct trace_probe *tp);
  79. static DEFINE_MUTEX(probe_lock);
  80. static LIST_HEAD(probe_list);
  81. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  82. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  83. struct pt_regs *regs);
  84. /*
  85. * Allocate new trace_probe and initialize it (including kprobes).
  86. */
  87. static struct trace_probe *alloc_trace_probe(const char *group,
  88. const char *event,
  89. void *addr,
  90. const char *symbol,
  91. unsigned long offs,
  92. int nargs, bool is_return)
  93. {
  94. struct trace_probe *tp;
  95. int ret = -ENOMEM;
  96. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  97. if (!tp)
  98. return ERR_PTR(ret);
  99. if (symbol) {
  100. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  101. if (!tp->symbol)
  102. goto error;
  103. tp->rp.kp.symbol_name = tp->symbol;
  104. tp->rp.kp.offset = offs;
  105. } else
  106. tp->rp.kp.addr = addr;
  107. if (is_return)
  108. tp->rp.handler = kretprobe_dispatcher;
  109. else
  110. tp->rp.kp.pre_handler = kprobe_dispatcher;
  111. if (!event || !is_good_name(event)) {
  112. ret = -EINVAL;
  113. goto error;
  114. }
  115. tp->call.class = &tp->class;
  116. tp->call.name = kstrdup(event, GFP_KERNEL);
  117. if (!tp->call.name)
  118. goto error;
  119. if (!group || !is_good_name(group)) {
  120. ret = -EINVAL;
  121. goto error;
  122. }
  123. tp->class.system = kstrdup(group, GFP_KERNEL);
  124. if (!tp->class.system)
  125. goto error;
  126. INIT_LIST_HEAD(&tp->list);
  127. return tp;
  128. error:
  129. kfree(tp->call.name);
  130. kfree(tp->symbol);
  131. kfree(tp);
  132. return ERR_PTR(ret);
  133. }
  134. static void free_trace_probe(struct trace_probe *tp)
  135. {
  136. int i;
  137. for (i = 0; i < tp->nr_args; i++)
  138. traceprobe_free_probe_arg(&tp->args[i]);
  139. kfree(tp->call.class->system);
  140. kfree(tp->call.name);
  141. kfree(tp->symbol);
  142. kfree(tp);
  143. }
  144. static struct trace_probe *find_trace_probe(const char *event,
  145. const char *group)
  146. {
  147. struct trace_probe *tp;
  148. list_for_each_entry(tp, &probe_list, list)
  149. if (strcmp(tp->call.name, event) == 0 &&
  150. strcmp(tp->call.class->system, group) == 0)
  151. return tp;
  152. return NULL;
  153. }
  154. static int trace_probe_nr_files(struct trace_probe *tp)
  155. {
  156. struct ftrace_event_file **file;
  157. int ret = 0;
  158. /*
  159. * Since all tp->files updater is protected by probe_enable_lock,
  160. * we don't need to lock an rcu_read_lock.
  161. */
  162. file = rcu_dereference_raw(tp->files);
  163. if (file)
  164. while (*(file++))
  165. ret++;
  166. return ret;
  167. }
  168. static DEFINE_MUTEX(probe_enable_lock);
  169. /*
  170. * Enable trace_probe
  171. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  172. */
  173. static int
  174. enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  175. {
  176. int ret = 0;
  177. mutex_lock(&probe_enable_lock);
  178. if (file) {
  179. struct ftrace_event_file **new, **old;
  180. int n = trace_probe_nr_files(tp);
  181. old = rcu_dereference_raw(tp->files);
  182. /* 1 is for new one and 1 is for stopper */
  183. new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
  184. GFP_KERNEL);
  185. if (!new) {
  186. ret = -ENOMEM;
  187. goto out_unlock;
  188. }
  189. memcpy(new, old, n * sizeof(struct ftrace_event_file *));
  190. new[n] = file;
  191. /* The last one keeps a NULL */
  192. rcu_assign_pointer(tp->files, new);
  193. tp->flags |= TP_FLAG_TRACE;
  194. if (old) {
  195. /* Make sure the probe is done with old files */
  196. synchronize_sched();
  197. kfree(old);
  198. }
  199. } else
  200. tp->flags |= TP_FLAG_PROFILE;
  201. if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
  202. !trace_probe_has_gone(tp)) {
  203. if (trace_probe_is_return(tp))
  204. ret = enable_kretprobe(&tp->rp);
  205. else
  206. ret = enable_kprobe(&tp->rp.kp);
  207. }
  208. out_unlock:
  209. mutex_unlock(&probe_enable_lock);
  210. return ret;
  211. }
  212. static int
  213. trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
  214. {
  215. struct ftrace_event_file **files;
  216. int i;
  217. /*
  218. * Since all tp->files updater is protected by probe_enable_lock,
  219. * we don't need to lock an rcu_read_lock.
  220. */
  221. files = rcu_dereference_raw(tp->files);
  222. if (files) {
  223. for (i = 0; files[i]; i++)
  224. if (files[i] == file)
  225. return i;
  226. }
  227. return -1;
  228. }
  229. /*
  230. * Disable trace_probe
  231. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  232. */
  233. static int
  234. disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  235. {
  236. int ret = 0;
  237. mutex_lock(&probe_enable_lock);
  238. if (file) {
  239. struct ftrace_event_file **new, **old;
  240. int n = trace_probe_nr_files(tp);
  241. int i, j;
  242. old = rcu_dereference_raw(tp->files);
  243. if (n == 0 || trace_probe_file_index(tp, file) < 0) {
  244. ret = -EINVAL;
  245. goto out_unlock;
  246. }
  247. if (n == 1) { /* Remove the last file */
  248. tp->flags &= ~TP_FLAG_TRACE;
  249. new = NULL;
  250. } else {
  251. new = kzalloc(n * sizeof(struct ftrace_event_file *),
  252. GFP_KERNEL);
  253. if (!new) {
  254. ret = -ENOMEM;
  255. goto out_unlock;
  256. }
  257. /* This copy & check loop copies the NULL stopper too */
  258. for (i = 0, j = 0; j < n && i < n + 1; i++)
  259. if (old[i] != file)
  260. new[j++] = old[i];
  261. }
  262. rcu_assign_pointer(tp->files, new);
  263. /* Make sure the probe is done with old files */
  264. synchronize_sched();
  265. kfree(old);
  266. } else
  267. tp->flags &= ~TP_FLAG_PROFILE;
  268. if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
  269. if (trace_probe_is_return(tp))
  270. disable_kretprobe(&tp->rp);
  271. else
  272. disable_kprobe(&tp->rp.kp);
  273. }
  274. out_unlock:
  275. mutex_unlock(&probe_enable_lock);
  276. return ret;
  277. }
  278. /* Internal register function - just handle k*probes and flags */
  279. static int __register_trace_probe(struct trace_probe *tp)
  280. {
  281. int i, ret;
  282. if (trace_probe_is_registered(tp))
  283. return -EINVAL;
  284. for (i = 0; i < tp->nr_args; i++)
  285. traceprobe_update_arg(&tp->args[i]);
  286. /* Set/clear disabled flag according to tp->flag */
  287. if (trace_probe_is_enabled(tp))
  288. tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
  289. else
  290. tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  291. if (trace_probe_is_return(tp))
  292. ret = register_kretprobe(&tp->rp);
  293. else
  294. ret = register_kprobe(&tp->rp.kp);
  295. if (ret == 0)
  296. tp->flags |= TP_FLAG_REGISTERED;
  297. else {
  298. pr_warning("Could not insert probe at %s+%lu: %d\n",
  299. trace_probe_symbol(tp), trace_probe_offset(tp), ret);
  300. if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
  301. pr_warning("This probe might be able to register after"
  302. "target module is loaded. Continue.\n");
  303. ret = 0;
  304. } else if (ret == -EILSEQ) {
  305. pr_warning("Probing address(0x%p) is not an "
  306. "instruction boundary.\n",
  307. tp->rp.kp.addr);
  308. ret = -EINVAL;
  309. }
  310. }
  311. return ret;
  312. }
  313. /* Internal unregister function - just handle k*probes and flags */
  314. static void __unregister_trace_probe(struct trace_probe *tp)
  315. {
  316. if (trace_probe_is_registered(tp)) {
  317. if (trace_probe_is_return(tp))
  318. unregister_kretprobe(&tp->rp);
  319. else
  320. unregister_kprobe(&tp->rp.kp);
  321. tp->flags &= ~TP_FLAG_REGISTERED;
  322. /* Cleanup kprobe for reuse */
  323. if (tp->rp.kp.symbol_name)
  324. tp->rp.kp.addr = NULL;
  325. }
  326. }
  327. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  328. static int unregister_trace_probe(struct trace_probe *tp)
  329. {
  330. /* Enabled event can not be unregistered */
  331. if (trace_probe_is_enabled(tp))
  332. return -EBUSY;
  333. __unregister_trace_probe(tp);
  334. list_del(&tp->list);
  335. unregister_probe_event(tp);
  336. return 0;
  337. }
  338. /* Register a trace_probe and probe_event */
  339. static int register_trace_probe(struct trace_probe *tp)
  340. {
  341. struct trace_probe *old_tp;
  342. int ret;
  343. mutex_lock(&probe_lock);
  344. /* Delete old (same name) event if exist */
  345. old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
  346. if (old_tp) {
  347. ret = unregister_trace_probe(old_tp);
  348. if (ret < 0)
  349. goto end;
  350. free_trace_probe(old_tp);
  351. }
  352. /* Register new event */
  353. ret = register_probe_event(tp);
  354. if (ret) {
  355. pr_warning("Failed to register probe event(%d)\n", ret);
  356. goto end;
  357. }
  358. /* Register k*probe */
  359. ret = __register_trace_probe(tp);
  360. if (ret < 0)
  361. unregister_probe_event(tp);
  362. else
  363. list_add_tail(&tp->list, &probe_list);
  364. end:
  365. mutex_unlock(&probe_lock);
  366. return ret;
  367. }
  368. /* Module notifier call back, checking event on the module */
  369. static int trace_probe_module_callback(struct notifier_block *nb,
  370. unsigned long val, void *data)
  371. {
  372. struct module *mod = data;
  373. struct trace_probe *tp;
  374. int ret;
  375. if (val != MODULE_STATE_COMING)
  376. return NOTIFY_DONE;
  377. /* Update probes on coming module */
  378. mutex_lock(&probe_lock);
  379. list_for_each_entry(tp, &probe_list, list) {
  380. if (trace_probe_within_module(tp, mod)) {
  381. /* Don't need to check busy - this should have gone. */
  382. __unregister_trace_probe(tp);
  383. ret = __register_trace_probe(tp);
  384. if (ret)
  385. pr_warning("Failed to re-register probe %s on"
  386. "%s: %d\n",
  387. tp->call.name, mod->name, ret);
  388. }
  389. }
  390. mutex_unlock(&probe_lock);
  391. return NOTIFY_DONE;
  392. }
  393. static struct notifier_block trace_probe_module_nb = {
  394. .notifier_call = trace_probe_module_callback,
  395. .priority = 1 /* Invoked after kprobe module callback */
  396. };
  397. static int create_trace_probe(int argc, char **argv)
  398. {
  399. /*
  400. * Argument syntax:
  401. * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
  402. * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
  403. * Fetch args:
  404. * $retval : fetch return value
  405. * $stack : fetch stack address
  406. * $stackN : fetch Nth of stack (N:0-)
  407. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  408. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  409. * %REG : fetch register REG
  410. * Dereferencing memory fetch:
  411. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  412. * Alias name of args:
  413. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  414. * Type of args:
  415. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  416. */
  417. struct trace_probe *tp;
  418. int i, ret = 0;
  419. bool is_return = false, is_delete = false;
  420. char *symbol = NULL, *event = NULL, *group = NULL;
  421. char *arg;
  422. unsigned long offset = 0;
  423. void *addr = NULL;
  424. char buf[MAX_EVENT_NAME_LEN];
  425. /* argc must be >= 1 */
  426. if (argv[0][0] == 'p')
  427. is_return = false;
  428. else if (argv[0][0] == 'r')
  429. is_return = true;
  430. else if (argv[0][0] == '-')
  431. is_delete = true;
  432. else {
  433. pr_info("Probe definition must be started with 'p', 'r' or"
  434. " '-'.\n");
  435. return -EINVAL;
  436. }
  437. if (argv[0][1] == ':') {
  438. event = &argv[0][2];
  439. if (strchr(event, '/')) {
  440. group = event;
  441. event = strchr(group, '/') + 1;
  442. event[-1] = '\0';
  443. if (strlen(group) == 0) {
  444. pr_info("Group name is not specified\n");
  445. return -EINVAL;
  446. }
  447. }
  448. if (strlen(event) == 0) {
  449. pr_info("Event name is not specified\n");
  450. return -EINVAL;
  451. }
  452. }
  453. if (!group)
  454. group = KPROBE_EVENT_SYSTEM;
  455. if (is_delete) {
  456. if (!event) {
  457. pr_info("Delete command needs an event name.\n");
  458. return -EINVAL;
  459. }
  460. mutex_lock(&probe_lock);
  461. tp = find_trace_probe(event, group);
  462. if (!tp) {
  463. mutex_unlock(&probe_lock);
  464. pr_info("Event %s/%s doesn't exist.\n", group, event);
  465. return -ENOENT;
  466. }
  467. /* delete an event */
  468. ret = unregister_trace_probe(tp);
  469. if (ret == 0)
  470. free_trace_probe(tp);
  471. mutex_unlock(&probe_lock);
  472. return ret;
  473. }
  474. if (argc < 2) {
  475. pr_info("Probe point is not specified.\n");
  476. return -EINVAL;
  477. }
  478. if (isdigit(argv[1][0])) {
  479. if (is_return) {
  480. pr_info("Return probe point must be a symbol.\n");
  481. return -EINVAL;
  482. }
  483. /* an address specified */
  484. ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
  485. if (ret) {
  486. pr_info("Failed to parse address.\n");
  487. return ret;
  488. }
  489. } else {
  490. /* a symbol specified */
  491. symbol = argv[1];
  492. /* TODO: support .init module functions */
  493. ret = traceprobe_split_symbol_offset(symbol, &offset);
  494. if (ret) {
  495. pr_info("Failed to parse symbol.\n");
  496. return ret;
  497. }
  498. if (offset && is_return) {
  499. pr_info("Return probe must be used without offset.\n");
  500. return -EINVAL;
  501. }
  502. }
  503. argc -= 2; argv += 2;
  504. /* setup a probe */
  505. if (!event) {
  506. /* Make a new event name */
  507. if (symbol)
  508. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  509. is_return ? 'r' : 'p', symbol, offset);
  510. else
  511. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  512. is_return ? 'r' : 'p', addr);
  513. event = buf;
  514. }
  515. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  516. is_return);
  517. if (IS_ERR(tp)) {
  518. pr_info("Failed to allocate trace_probe.(%d)\n",
  519. (int)PTR_ERR(tp));
  520. return PTR_ERR(tp);
  521. }
  522. /* parse arguments */
  523. ret = 0;
  524. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  525. /* Increment count for freeing args in error case */
  526. tp->nr_args++;
  527. /* Parse argument name */
  528. arg = strchr(argv[i], '=');
  529. if (arg) {
  530. *arg++ = '\0';
  531. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  532. } else {
  533. arg = argv[i];
  534. /* If argument name is omitted, set "argN" */
  535. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  536. tp->args[i].name = kstrdup(buf, GFP_KERNEL);
  537. }
  538. if (!tp->args[i].name) {
  539. pr_info("Failed to allocate argument[%d] name.\n", i);
  540. ret = -ENOMEM;
  541. goto error;
  542. }
  543. if (!is_good_name(tp->args[i].name)) {
  544. pr_info("Invalid argument[%d] name: %s\n",
  545. i, tp->args[i].name);
  546. ret = -EINVAL;
  547. goto error;
  548. }
  549. if (traceprobe_conflict_field_name(tp->args[i].name,
  550. tp->args, i)) {
  551. pr_info("Argument[%d] name '%s' conflicts with "
  552. "another field.\n", i, argv[i]);
  553. ret = -EINVAL;
  554. goto error;
  555. }
  556. /* Parse fetch argument */
  557. ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
  558. is_return, true);
  559. if (ret) {
  560. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  561. goto error;
  562. }
  563. }
  564. ret = register_trace_probe(tp);
  565. if (ret)
  566. goto error;
  567. return 0;
  568. error:
  569. free_trace_probe(tp);
  570. return ret;
  571. }
  572. static int release_all_trace_probes(void)
  573. {
  574. struct trace_probe *tp;
  575. int ret = 0;
  576. mutex_lock(&probe_lock);
  577. /* Ensure no probe is in use. */
  578. list_for_each_entry(tp, &probe_list, list)
  579. if (trace_probe_is_enabled(tp)) {
  580. ret = -EBUSY;
  581. goto end;
  582. }
  583. /* TODO: Use batch unregistration */
  584. while (!list_empty(&probe_list)) {
  585. tp = list_entry(probe_list.next, struct trace_probe, list);
  586. unregister_trace_probe(tp);
  587. free_trace_probe(tp);
  588. }
  589. end:
  590. mutex_unlock(&probe_lock);
  591. return ret;
  592. }
  593. /* Probes listing interfaces */
  594. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  595. {
  596. mutex_lock(&probe_lock);
  597. return seq_list_start(&probe_list, *pos);
  598. }
  599. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  600. {
  601. return seq_list_next(v, &probe_list, pos);
  602. }
  603. static void probes_seq_stop(struct seq_file *m, void *v)
  604. {
  605. mutex_unlock(&probe_lock);
  606. }
  607. static int probes_seq_show(struct seq_file *m, void *v)
  608. {
  609. struct trace_probe *tp = v;
  610. int i;
  611. seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
  612. seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
  613. if (!tp->symbol)
  614. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  615. else if (tp->rp.kp.offset)
  616. seq_printf(m, " %s+%u", trace_probe_symbol(tp),
  617. tp->rp.kp.offset);
  618. else
  619. seq_printf(m, " %s", trace_probe_symbol(tp));
  620. for (i = 0; i < tp->nr_args; i++)
  621. seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
  622. seq_printf(m, "\n");
  623. return 0;
  624. }
  625. static const struct seq_operations probes_seq_op = {
  626. .start = probes_seq_start,
  627. .next = probes_seq_next,
  628. .stop = probes_seq_stop,
  629. .show = probes_seq_show
  630. };
  631. static int probes_open(struct inode *inode, struct file *file)
  632. {
  633. int ret;
  634. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  635. ret = release_all_trace_probes();
  636. if (ret < 0)
  637. return ret;
  638. }
  639. return seq_open(file, &probes_seq_op);
  640. }
  641. static ssize_t probes_write(struct file *file, const char __user *buffer,
  642. size_t count, loff_t *ppos)
  643. {
  644. return traceprobe_probes_write(file, buffer, count, ppos,
  645. create_trace_probe);
  646. }
  647. static const struct file_operations kprobe_events_ops = {
  648. .owner = THIS_MODULE,
  649. .open = probes_open,
  650. .read = seq_read,
  651. .llseek = seq_lseek,
  652. .release = seq_release,
  653. .write = probes_write,
  654. };
  655. /* Probes profiling interfaces */
  656. static int probes_profile_seq_show(struct seq_file *m, void *v)
  657. {
  658. struct trace_probe *tp = v;
  659. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  660. tp->rp.kp.nmissed);
  661. return 0;
  662. }
  663. static const struct seq_operations profile_seq_op = {
  664. .start = probes_seq_start,
  665. .next = probes_seq_next,
  666. .stop = probes_seq_stop,
  667. .show = probes_profile_seq_show
  668. };
  669. static int profile_open(struct inode *inode, struct file *file)
  670. {
  671. return seq_open(file, &profile_seq_op);
  672. }
  673. static const struct file_operations kprobe_profile_ops = {
  674. .owner = THIS_MODULE,
  675. .open = profile_open,
  676. .read = seq_read,
  677. .llseek = seq_lseek,
  678. .release = seq_release,
  679. };
  680. /* Sum up total data length for dynamic arraies (strings) */
  681. static __kprobes int __get_data_size(struct trace_probe *tp,
  682. struct pt_regs *regs)
  683. {
  684. int i, ret = 0;
  685. u32 len;
  686. for (i = 0; i < tp->nr_args; i++)
  687. if (unlikely(tp->args[i].fetch_size.fn)) {
  688. call_fetch(&tp->args[i].fetch_size, regs, &len);
  689. ret += len;
  690. }
  691. return ret;
  692. }
  693. /* Store the value of each argument */
  694. static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
  695. struct pt_regs *regs,
  696. u8 *data, int maxlen)
  697. {
  698. int i;
  699. u32 end = tp->size;
  700. u32 *dl; /* Data (relative) location */
  701. for (i = 0; i < tp->nr_args; i++) {
  702. if (unlikely(tp->args[i].fetch_size.fn)) {
  703. /*
  704. * First, we set the relative location and
  705. * maximum data length to *dl
  706. */
  707. dl = (u32 *)(data + tp->args[i].offset);
  708. *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
  709. /* Then try to fetch string or dynamic array data */
  710. call_fetch(&tp->args[i].fetch, regs, dl);
  711. /* Reduce maximum length */
  712. end += get_rloc_len(*dl);
  713. maxlen -= get_rloc_len(*dl);
  714. /* Trick here, convert data_rloc to data_loc */
  715. *dl = convert_rloc_to_loc(*dl,
  716. ent_size + tp->args[i].offset);
  717. } else
  718. /* Just fetching data normally */
  719. call_fetch(&tp->args[i].fetch, regs,
  720. data + tp->args[i].offset);
  721. }
  722. }
  723. /* Kprobe handler */
  724. static __kprobes void
  725. __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
  726. struct ftrace_event_file *ftrace_file)
  727. {
  728. struct kprobe_trace_entry_head *entry;
  729. struct ring_buffer_event *event;
  730. struct ring_buffer *buffer;
  731. int size, dsize, pc;
  732. unsigned long irq_flags;
  733. struct ftrace_event_call *call = &tp->call;
  734. WARN_ON(call != ftrace_file->event_call);
  735. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  736. return;
  737. local_save_flags(irq_flags);
  738. pc = preempt_count();
  739. dsize = __get_data_size(tp, regs);
  740. size = sizeof(*entry) + tp->size + dsize;
  741. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  742. call->event.type,
  743. size, irq_flags, pc);
  744. if (!event)
  745. return;
  746. entry = ring_buffer_event_data(event);
  747. entry->ip = (unsigned long)tp->rp.kp.addr;
  748. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  749. if (!filter_current_check_discard(buffer, call, entry, event))
  750. trace_buffer_unlock_commit_regs(buffer, event,
  751. irq_flags, pc, regs);
  752. }
  753. static __kprobes void
  754. kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
  755. {
  756. /*
  757. * Note: preempt is already disabled around the kprobe handler.
  758. * However, we still need an smp_read_barrier_depends() corresponding
  759. * to smp_wmb() in rcu_assign_pointer() to access the pointer.
  760. */
  761. struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
  762. if (unlikely(!file))
  763. return;
  764. while (*file) {
  765. __kprobe_trace_func(tp, regs, *file);
  766. file++;
  767. }
  768. }
  769. /* Kretprobe handler */
  770. static __kprobes void
  771. __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  772. struct pt_regs *regs,
  773. struct ftrace_event_file *ftrace_file)
  774. {
  775. struct kretprobe_trace_entry_head *entry;
  776. struct ring_buffer_event *event;
  777. struct ring_buffer *buffer;
  778. int size, pc, dsize;
  779. unsigned long irq_flags;
  780. struct ftrace_event_call *call = &tp->call;
  781. WARN_ON(call != ftrace_file->event_call);
  782. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  783. return;
  784. local_save_flags(irq_flags);
  785. pc = preempt_count();
  786. dsize = __get_data_size(tp, regs);
  787. size = sizeof(*entry) + tp->size + dsize;
  788. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  789. call->event.type,
  790. size, irq_flags, pc);
  791. if (!event)
  792. return;
  793. entry = ring_buffer_event_data(event);
  794. entry->func = (unsigned long)tp->rp.kp.addr;
  795. entry->ret_ip = (unsigned long)ri->ret_addr;
  796. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  797. if (!filter_current_check_discard(buffer, call, entry, event))
  798. trace_buffer_unlock_commit_regs(buffer, event,
  799. irq_flags, pc, regs);
  800. }
  801. static __kprobes void
  802. kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  803. struct pt_regs *regs)
  804. {
  805. /*
  806. * Note: preempt is already disabled around the kprobe handler.
  807. * However, we still need an smp_read_barrier_depends() corresponding
  808. * to smp_wmb() in rcu_assign_pointer() to access the pointer.
  809. */
  810. struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
  811. if (unlikely(!file))
  812. return;
  813. while (*file) {
  814. __kretprobe_trace_func(tp, ri, regs, *file);
  815. file++;
  816. }
  817. }
  818. /* Event entry printers */
  819. static enum print_line_t
  820. print_kprobe_event(struct trace_iterator *iter, int flags,
  821. struct trace_event *event)
  822. {
  823. struct kprobe_trace_entry_head *field;
  824. struct trace_seq *s = &iter->seq;
  825. struct trace_probe *tp;
  826. u8 *data;
  827. int i;
  828. field = (struct kprobe_trace_entry_head *)iter->ent;
  829. tp = container_of(event, struct trace_probe, call.event);
  830. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  831. goto partial;
  832. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  833. goto partial;
  834. if (!trace_seq_puts(s, ")"))
  835. goto partial;
  836. data = (u8 *)&field[1];
  837. for (i = 0; i < tp->nr_args; i++)
  838. if (!tp->args[i].type->print(s, tp->args[i].name,
  839. data + tp->args[i].offset, field))
  840. goto partial;
  841. if (!trace_seq_puts(s, "\n"))
  842. goto partial;
  843. return TRACE_TYPE_HANDLED;
  844. partial:
  845. return TRACE_TYPE_PARTIAL_LINE;
  846. }
  847. static enum print_line_t
  848. print_kretprobe_event(struct trace_iterator *iter, int flags,
  849. struct trace_event *event)
  850. {
  851. struct kretprobe_trace_entry_head *field;
  852. struct trace_seq *s = &iter->seq;
  853. struct trace_probe *tp;
  854. u8 *data;
  855. int i;
  856. field = (struct kretprobe_trace_entry_head *)iter->ent;
  857. tp = container_of(event, struct trace_probe, call.event);
  858. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  859. goto partial;
  860. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  861. goto partial;
  862. if (!trace_seq_puts(s, " <- "))
  863. goto partial;
  864. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  865. goto partial;
  866. if (!trace_seq_puts(s, ")"))
  867. goto partial;
  868. data = (u8 *)&field[1];
  869. for (i = 0; i < tp->nr_args; i++)
  870. if (!tp->args[i].type->print(s, tp->args[i].name,
  871. data + tp->args[i].offset, field))
  872. goto partial;
  873. if (!trace_seq_puts(s, "\n"))
  874. goto partial;
  875. return TRACE_TYPE_HANDLED;
  876. partial:
  877. return TRACE_TYPE_PARTIAL_LINE;
  878. }
  879. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  880. {
  881. int ret, i;
  882. struct kprobe_trace_entry_head field;
  883. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  884. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  885. /* Set argument names as fields */
  886. for (i = 0; i < tp->nr_args; i++) {
  887. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  888. tp->args[i].name,
  889. sizeof(field) + tp->args[i].offset,
  890. tp->args[i].type->size,
  891. tp->args[i].type->is_signed,
  892. FILTER_OTHER);
  893. if (ret)
  894. return ret;
  895. }
  896. return 0;
  897. }
  898. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  899. {
  900. int ret, i;
  901. struct kretprobe_trace_entry_head field;
  902. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  903. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  904. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  905. /* Set argument names as fields */
  906. for (i = 0; i < tp->nr_args; i++) {
  907. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  908. tp->args[i].name,
  909. sizeof(field) + tp->args[i].offset,
  910. tp->args[i].type->size,
  911. tp->args[i].type->is_signed,
  912. FILTER_OTHER);
  913. if (ret)
  914. return ret;
  915. }
  916. return 0;
  917. }
  918. static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
  919. {
  920. int i;
  921. int pos = 0;
  922. const char *fmt, *arg;
  923. if (!trace_probe_is_return(tp)) {
  924. fmt = "(%lx)";
  925. arg = "REC->" FIELD_STRING_IP;
  926. } else {
  927. fmt = "(%lx <- %lx)";
  928. arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
  929. }
  930. /* When len=0, we just calculate the needed length */
  931. #define LEN_OR_ZERO (len ? len - pos : 0)
  932. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
  933. for (i = 0; i < tp->nr_args; i++) {
  934. pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
  935. tp->args[i].name, tp->args[i].type->fmt);
  936. }
  937. pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
  938. for (i = 0; i < tp->nr_args; i++) {
  939. if (strcmp(tp->args[i].type->name, "string") == 0)
  940. pos += snprintf(buf + pos, LEN_OR_ZERO,
  941. ", __get_str(%s)",
  942. tp->args[i].name);
  943. else
  944. pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
  945. tp->args[i].name);
  946. }
  947. #undef LEN_OR_ZERO
  948. /* return the length of print_fmt */
  949. return pos;
  950. }
  951. static int set_print_fmt(struct trace_probe *tp)
  952. {
  953. int len;
  954. char *print_fmt;
  955. /* First: called with 0 length to calculate the needed length */
  956. len = __set_print_fmt(tp, NULL, 0);
  957. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  958. if (!print_fmt)
  959. return -ENOMEM;
  960. /* Second: actually write the @print_fmt */
  961. __set_print_fmt(tp, print_fmt, len + 1);
  962. tp->call.print_fmt = print_fmt;
  963. return 0;
  964. }
  965. #ifdef CONFIG_PERF_EVENTS
  966. /* Kprobe profile handler */
  967. static __kprobes void
  968. kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
  969. {
  970. struct ftrace_event_call *call = &tp->call;
  971. struct kprobe_trace_entry_head *entry;
  972. struct hlist_head *head;
  973. int size, __size, dsize;
  974. int rctx;
  975. dsize = __get_data_size(tp, regs);
  976. __size = sizeof(*entry) + tp->size + dsize;
  977. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  978. size -= sizeof(u32);
  979. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  980. "profile buffer not large enough"))
  981. return;
  982. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  983. if (!entry)
  984. return;
  985. entry->ip = (unsigned long)tp->rp.kp.addr;
  986. memset(&entry[1], 0, dsize);
  987. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  988. head = this_cpu_ptr(call->perf_events);
  989. perf_trace_buf_submit(entry, size, rctx,
  990. entry->ip, 1, regs, head, NULL);
  991. }
  992. /* Kretprobe profile handler */
  993. static __kprobes void
  994. kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  995. struct pt_regs *regs)
  996. {
  997. struct ftrace_event_call *call = &tp->call;
  998. struct kretprobe_trace_entry_head *entry;
  999. struct hlist_head *head;
  1000. int size, __size, dsize;
  1001. int rctx;
  1002. dsize = __get_data_size(tp, regs);
  1003. __size = sizeof(*entry) + tp->size + dsize;
  1004. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1005. size -= sizeof(u32);
  1006. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  1007. "profile buffer not large enough"))
  1008. return;
  1009. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  1010. if (!entry)
  1011. return;
  1012. entry->func = (unsigned long)tp->rp.kp.addr;
  1013. entry->ret_ip = (unsigned long)ri->ret_addr;
  1014. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  1015. head = this_cpu_ptr(call->perf_events);
  1016. perf_trace_buf_submit(entry, size, rctx,
  1017. entry->ret_ip, 1, regs, head, NULL);
  1018. }
  1019. #endif /* CONFIG_PERF_EVENTS */
  1020. static __kprobes
  1021. int kprobe_register(struct ftrace_event_call *event,
  1022. enum trace_reg type, void *data)
  1023. {
  1024. struct trace_probe *tp = (struct trace_probe *)event->data;
  1025. struct ftrace_event_file *file = data;
  1026. switch (type) {
  1027. case TRACE_REG_REGISTER:
  1028. return enable_trace_probe(tp, file);
  1029. case TRACE_REG_UNREGISTER:
  1030. return disable_trace_probe(tp, file);
  1031. #ifdef CONFIG_PERF_EVENTS
  1032. case TRACE_REG_PERF_REGISTER:
  1033. return enable_trace_probe(tp, NULL);
  1034. case TRACE_REG_PERF_UNREGISTER:
  1035. return disable_trace_probe(tp, NULL);
  1036. case TRACE_REG_PERF_OPEN:
  1037. case TRACE_REG_PERF_CLOSE:
  1038. case TRACE_REG_PERF_ADD:
  1039. case TRACE_REG_PERF_DEL:
  1040. return 0;
  1041. #endif
  1042. }
  1043. return 0;
  1044. }
  1045. static __kprobes
  1046. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1047. {
  1048. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1049. tp->nhit++;
  1050. if (tp->flags & TP_FLAG_TRACE)
  1051. kprobe_trace_func(tp, regs);
  1052. #ifdef CONFIG_PERF_EVENTS
  1053. if (tp->flags & TP_FLAG_PROFILE)
  1054. kprobe_perf_func(tp, regs);
  1055. #endif
  1056. return 0; /* We don't tweek kernel, so just return 0 */
  1057. }
  1058. static __kprobes
  1059. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1060. {
  1061. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1062. tp->nhit++;
  1063. if (tp->flags & TP_FLAG_TRACE)
  1064. kretprobe_trace_func(tp, ri, regs);
  1065. #ifdef CONFIG_PERF_EVENTS
  1066. if (tp->flags & TP_FLAG_PROFILE)
  1067. kretprobe_perf_func(tp, ri, regs);
  1068. #endif
  1069. return 0; /* We don't tweek kernel, so just return 0 */
  1070. }
  1071. static struct trace_event_functions kretprobe_funcs = {
  1072. .trace = print_kretprobe_event
  1073. };
  1074. static struct trace_event_functions kprobe_funcs = {
  1075. .trace = print_kprobe_event
  1076. };
  1077. static int register_probe_event(struct trace_probe *tp)
  1078. {
  1079. struct ftrace_event_call *call = &tp->call;
  1080. int ret;
  1081. /* Initialize ftrace_event_call */
  1082. INIT_LIST_HEAD(&call->class->fields);
  1083. if (trace_probe_is_return(tp)) {
  1084. call->event.funcs = &kretprobe_funcs;
  1085. call->class->define_fields = kretprobe_event_define_fields;
  1086. } else {
  1087. call->event.funcs = &kprobe_funcs;
  1088. call->class->define_fields = kprobe_event_define_fields;
  1089. }
  1090. if (set_print_fmt(tp) < 0)
  1091. return -ENOMEM;
  1092. ret = register_ftrace_event(&call->event);
  1093. if (!ret) {
  1094. kfree(call->print_fmt);
  1095. return -ENODEV;
  1096. }
  1097. call->flags = 0;
  1098. call->class->reg = kprobe_register;
  1099. call->data = tp;
  1100. ret = trace_add_event_call(call);
  1101. if (ret) {
  1102. pr_info("Failed to register kprobe event: %s\n", call->name);
  1103. kfree(call->print_fmt);
  1104. unregister_ftrace_event(&call->event);
  1105. }
  1106. return ret;
  1107. }
  1108. static void unregister_probe_event(struct trace_probe *tp)
  1109. {
  1110. /* tp->event is unregistered in trace_remove_event_call() */
  1111. trace_remove_event_call(&tp->call);
  1112. kfree(tp->call.print_fmt);
  1113. }
  1114. /* Make a debugfs interface for controlling probe points */
  1115. static __init int init_kprobe_trace(void)
  1116. {
  1117. struct dentry *d_tracer;
  1118. struct dentry *entry;
  1119. if (register_module_notifier(&trace_probe_module_nb))
  1120. return -EINVAL;
  1121. d_tracer = tracing_init_dentry();
  1122. if (!d_tracer)
  1123. return 0;
  1124. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1125. NULL, &kprobe_events_ops);
  1126. /* Event list interface */
  1127. if (!entry)
  1128. pr_warning("Could not create debugfs "
  1129. "'kprobe_events' entry\n");
  1130. /* Profile interface */
  1131. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1132. NULL, &kprobe_profile_ops);
  1133. if (!entry)
  1134. pr_warning("Could not create debugfs "
  1135. "'kprobe_profile' entry\n");
  1136. return 0;
  1137. }
  1138. fs_initcall(init_kprobe_trace);
  1139. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1140. /*
  1141. * The "__used" keeps gcc from removing the function symbol
  1142. * from the kallsyms table.
  1143. */
  1144. static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1145. int a4, int a5, int a6)
  1146. {
  1147. return a1 + a2 + a3 + a4 + a5 + a6;
  1148. }
  1149. static struct ftrace_event_file *
  1150. find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
  1151. {
  1152. struct ftrace_event_file *file;
  1153. list_for_each_entry(file, &tr->events, list)
  1154. if (file->event_call == &tp->call)
  1155. return file;
  1156. return NULL;
  1157. }
  1158. static __init int kprobe_trace_self_tests_init(void)
  1159. {
  1160. int ret, warn = 0;
  1161. int (*target)(int, int, int, int, int, int);
  1162. struct trace_probe *tp;
  1163. struct ftrace_event_file *file;
  1164. target = kprobe_trace_selftest_target;
  1165. pr_info("Testing kprobe tracing: ");
  1166. ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
  1167. "$stack $stack0 +0($stack)",
  1168. create_trace_probe);
  1169. if (WARN_ON_ONCE(ret)) {
  1170. pr_warn("error on probing function entry.\n");
  1171. warn++;
  1172. } else {
  1173. /* Enable trace point */
  1174. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1175. if (WARN_ON_ONCE(tp == NULL)) {
  1176. pr_warn("error on getting new probe.\n");
  1177. warn++;
  1178. } else {
  1179. file = find_trace_probe_file(tp, top_trace_array());
  1180. if (WARN_ON_ONCE(file == NULL)) {
  1181. pr_warn("error on getting probe file.\n");
  1182. warn++;
  1183. } else
  1184. enable_trace_probe(tp, file);
  1185. }
  1186. }
  1187. ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
  1188. "$retval", create_trace_probe);
  1189. if (WARN_ON_ONCE(ret)) {
  1190. pr_warn("error on probing function return.\n");
  1191. warn++;
  1192. } else {
  1193. /* Enable trace point */
  1194. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1195. if (WARN_ON_ONCE(tp == NULL)) {
  1196. pr_warn("error on getting 2nd new probe.\n");
  1197. warn++;
  1198. } else {
  1199. file = find_trace_probe_file(tp, top_trace_array());
  1200. if (WARN_ON_ONCE(file == NULL)) {
  1201. pr_warn("error on getting probe file.\n");
  1202. warn++;
  1203. } else
  1204. enable_trace_probe(tp, file);
  1205. }
  1206. }
  1207. if (warn)
  1208. goto end;
  1209. ret = target(1, 2, 3, 4, 5, 6);
  1210. /* Disable trace points before removing it */
  1211. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1212. if (WARN_ON_ONCE(tp == NULL)) {
  1213. pr_warn("error on getting test probe.\n");
  1214. warn++;
  1215. } else {
  1216. file = find_trace_probe_file(tp, top_trace_array());
  1217. if (WARN_ON_ONCE(file == NULL)) {
  1218. pr_warn("error on getting probe file.\n");
  1219. warn++;
  1220. } else
  1221. disable_trace_probe(tp, file);
  1222. }
  1223. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1224. if (WARN_ON_ONCE(tp == NULL)) {
  1225. pr_warn("error on getting 2nd test probe.\n");
  1226. warn++;
  1227. } else {
  1228. file = find_trace_probe_file(tp, top_trace_array());
  1229. if (WARN_ON_ONCE(file == NULL)) {
  1230. pr_warn("error on getting probe file.\n");
  1231. warn++;
  1232. } else
  1233. disable_trace_probe(tp, file);
  1234. }
  1235. ret = traceprobe_command("-:testprobe", create_trace_probe);
  1236. if (WARN_ON_ONCE(ret)) {
  1237. pr_warn("error on deleting a probe.\n");
  1238. warn++;
  1239. }
  1240. ret = traceprobe_command("-:testprobe2", create_trace_probe);
  1241. if (WARN_ON_ONCE(ret)) {
  1242. pr_warn("error on deleting a probe.\n");
  1243. warn++;
  1244. }
  1245. end:
  1246. release_all_trace_probes();
  1247. if (warn)
  1248. pr_cont("NG: Some tests are failed. Please check them.\n");
  1249. else
  1250. pr_cont("OK\n");
  1251. return 0;
  1252. }
  1253. late_initcall(kprobe_trace_self_tests_init);
  1254. #endif