trace_kprobe.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include "trace_probe.h"
  22. #define KPROBE_EVENT_SYSTEM "kprobes"
  23. /**
  24. * Kprobe event core functions
  25. */
  26. struct trace_probe {
  27. struct list_head list;
  28. struct kretprobe rp; /* Use rp.kp for kprobe use */
  29. unsigned long nhit;
  30. unsigned int flags; /* For TP_FLAG_* */
  31. const char *symbol; /* symbol name */
  32. struct ftrace_event_class class;
  33. struct ftrace_event_call call;
  34. struct ftrace_event_file **files;
  35. ssize_t size; /* trace entry size */
  36. unsigned int nr_args;
  37. struct probe_arg args[];
  38. };
  39. #define SIZEOF_TRACE_PROBE(n) \
  40. (offsetof(struct trace_probe, args) + \
  41. (sizeof(struct probe_arg) * (n)))
  42. static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
  43. {
  44. return tp->rp.handler != NULL;
  45. }
  46. static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
  47. {
  48. return tp->symbol ? tp->symbol : "unknown";
  49. }
  50. static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
  51. {
  52. return tp->rp.kp.offset;
  53. }
  54. static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
  55. {
  56. return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
  57. }
  58. static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
  59. {
  60. return !!(tp->flags & TP_FLAG_REGISTERED);
  61. }
  62. static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
  63. {
  64. return !!(kprobe_gone(&tp->rp.kp));
  65. }
  66. static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
  67. struct module *mod)
  68. {
  69. int len = strlen(mod->name);
  70. const char *name = trace_probe_symbol(tp);
  71. return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  72. }
  73. static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
  74. {
  75. return !!strchr(trace_probe_symbol(tp), ':');
  76. }
  77. static int register_probe_event(struct trace_probe *tp);
  78. static void unregister_probe_event(struct trace_probe *tp);
  79. static DEFINE_MUTEX(probe_lock);
  80. static LIST_HEAD(probe_list);
  81. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  82. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  83. struct pt_regs *regs);
  84. /*
  85. * Allocate new trace_probe and initialize it (including kprobes).
  86. */
  87. static struct trace_probe *alloc_trace_probe(const char *group,
  88. const char *event,
  89. void *addr,
  90. const char *symbol,
  91. unsigned long offs,
  92. int nargs, bool is_return)
  93. {
  94. struct trace_probe *tp;
  95. int ret = -ENOMEM;
  96. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  97. if (!tp)
  98. return ERR_PTR(ret);
  99. if (symbol) {
  100. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  101. if (!tp->symbol)
  102. goto error;
  103. tp->rp.kp.symbol_name = tp->symbol;
  104. tp->rp.kp.offset = offs;
  105. } else
  106. tp->rp.kp.addr = addr;
  107. if (is_return)
  108. tp->rp.handler = kretprobe_dispatcher;
  109. else
  110. tp->rp.kp.pre_handler = kprobe_dispatcher;
  111. if (!event || !is_good_name(event)) {
  112. ret = -EINVAL;
  113. goto error;
  114. }
  115. tp->call.class = &tp->class;
  116. tp->call.name = kstrdup(event, GFP_KERNEL);
  117. if (!tp->call.name)
  118. goto error;
  119. if (!group || !is_good_name(group)) {
  120. ret = -EINVAL;
  121. goto error;
  122. }
  123. tp->class.system = kstrdup(group, GFP_KERNEL);
  124. if (!tp->class.system)
  125. goto error;
  126. INIT_LIST_HEAD(&tp->list);
  127. return tp;
  128. error:
  129. kfree(tp->call.name);
  130. kfree(tp->symbol);
  131. kfree(tp);
  132. return ERR_PTR(ret);
  133. }
  134. static void free_trace_probe(struct trace_probe *tp)
  135. {
  136. int i;
  137. for (i = 0; i < tp->nr_args; i++)
  138. traceprobe_free_probe_arg(&tp->args[i]);
  139. kfree(tp->call.class->system);
  140. kfree(tp->call.name);
  141. kfree(tp->symbol);
  142. kfree(tp);
  143. }
  144. static struct trace_probe *find_trace_probe(const char *event,
  145. const char *group)
  146. {
  147. struct trace_probe *tp;
  148. list_for_each_entry(tp, &probe_list, list)
  149. if (strcmp(tp->call.name, event) == 0 &&
  150. strcmp(tp->call.class->system, group) == 0)
  151. return tp;
  152. return NULL;
  153. }
  154. static int trace_probe_nr_files(struct trace_probe *tp)
  155. {
  156. struct ftrace_event_file **file = tp->files;
  157. int ret = 0;
  158. if (file)
  159. while (*(file++))
  160. ret++;
  161. return ret;
  162. }
  163. static DEFINE_MUTEX(probe_enable_lock);
  164. /*
  165. * Enable trace_probe
  166. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
  167. */
  168. static int
  169. enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  170. {
  171. int ret = 0;
  172. mutex_lock(&probe_enable_lock);
  173. if (file) {
  174. struct ftrace_event_file **new, **old = tp->files;
  175. int n = trace_probe_nr_files(tp);
  176. /* 1 is for new one and 1 is for stopper */
  177. new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
  178. GFP_KERNEL);
  179. if (!new) {
  180. ret = -ENOMEM;
  181. goto out_unlock;
  182. }
  183. memcpy(new, old, n * sizeof(struct ftrace_event_file *));
  184. new[n] = file;
  185. /* The last one keeps a NULL */
  186. rcu_assign_pointer(tp->files, new);
  187. tp->flags |= TP_FLAG_TRACE;
  188. if (old) {
  189. /* Make sure the probe is done with old files */
  190. synchronize_sched();
  191. kfree(old);
  192. }
  193. } else
  194. tp->flags |= TP_FLAG_PROFILE;
  195. if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
  196. !trace_probe_has_gone(tp)) {
  197. if (trace_probe_is_return(tp))
  198. ret = enable_kretprobe(&tp->rp);
  199. else
  200. ret = enable_kprobe(&tp->rp.kp);
  201. }
  202. out_unlock:
  203. mutex_unlock(&probe_enable_lock);
  204. return ret;
  205. }
  206. static int
  207. trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
  208. {
  209. int i;
  210. if (tp->files) {
  211. for (i = 0; tp->files[i]; i++)
  212. if (tp->files[i] == file)
  213. return i;
  214. }
  215. return -1;
  216. }
  217. /*
  218. * Disable trace_probe
  219. * if the file is NULL, disable "perf" handler, or disable "trace" handler.
  220. */
  221. static int
  222. disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
  223. {
  224. int ret = 0;
  225. mutex_lock(&probe_enable_lock);
  226. if (file) {
  227. struct ftrace_event_file **new, **old = tp->files;
  228. int n = trace_probe_nr_files(tp);
  229. int i, j;
  230. if (n == 0 || trace_probe_file_index(tp, file) < 0) {
  231. ret = -EINVAL;
  232. goto out_unlock;
  233. }
  234. if (n == 1) { /* Remove the last file */
  235. tp->flags &= ~TP_FLAG_TRACE;
  236. new = NULL;
  237. } else {
  238. new = kzalloc(n * sizeof(struct ftrace_event_file *),
  239. GFP_KERNEL);
  240. if (!new) {
  241. ret = -ENOMEM;
  242. goto out_unlock;
  243. }
  244. /* This copy & check loop copies the NULL stopper too */
  245. for (i = 0, j = 0; j < n && i < n + 1; i++)
  246. if (old[i] != file)
  247. new[j++] = old[i];
  248. }
  249. rcu_assign_pointer(tp->files, new);
  250. /* Make sure the probe is done with old files */
  251. synchronize_sched();
  252. kfree(old);
  253. } else
  254. tp->flags &= ~TP_FLAG_PROFILE;
  255. if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
  256. if (trace_probe_is_return(tp))
  257. disable_kretprobe(&tp->rp);
  258. else
  259. disable_kprobe(&tp->rp.kp);
  260. }
  261. out_unlock:
  262. mutex_unlock(&probe_enable_lock);
  263. return ret;
  264. }
  265. /* Internal register function - just handle k*probes and flags */
  266. static int __register_trace_probe(struct trace_probe *tp)
  267. {
  268. int i, ret;
  269. if (trace_probe_is_registered(tp))
  270. return -EINVAL;
  271. for (i = 0; i < tp->nr_args; i++)
  272. traceprobe_update_arg(&tp->args[i]);
  273. /* Set/clear disabled flag according to tp->flag */
  274. if (trace_probe_is_enabled(tp))
  275. tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
  276. else
  277. tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  278. if (trace_probe_is_return(tp))
  279. ret = register_kretprobe(&tp->rp);
  280. else
  281. ret = register_kprobe(&tp->rp.kp);
  282. if (ret == 0)
  283. tp->flags |= TP_FLAG_REGISTERED;
  284. else {
  285. pr_warning("Could not insert probe at %s+%lu: %d\n",
  286. trace_probe_symbol(tp), trace_probe_offset(tp), ret);
  287. if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
  288. pr_warning("This probe might be able to register after"
  289. "target module is loaded. Continue.\n");
  290. ret = 0;
  291. } else if (ret == -EILSEQ) {
  292. pr_warning("Probing address(0x%p) is not an "
  293. "instruction boundary.\n",
  294. tp->rp.kp.addr);
  295. ret = -EINVAL;
  296. }
  297. }
  298. return ret;
  299. }
  300. /* Internal unregister function - just handle k*probes and flags */
  301. static void __unregister_trace_probe(struct trace_probe *tp)
  302. {
  303. if (trace_probe_is_registered(tp)) {
  304. if (trace_probe_is_return(tp))
  305. unregister_kretprobe(&tp->rp);
  306. else
  307. unregister_kprobe(&tp->rp.kp);
  308. tp->flags &= ~TP_FLAG_REGISTERED;
  309. /* Cleanup kprobe for reuse */
  310. if (tp->rp.kp.symbol_name)
  311. tp->rp.kp.addr = NULL;
  312. }
  313. }
  314. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  315. static int unregister_trace_probe(struct trace_probe *tp)
  316. {
  317. /* Enabled event can not be unregistered */
  318. if (trace_probe_is_enabled(tp))
  319. return -EBUSY;
  320. __unregister_trace_probe(tp);
  321. list_del(&tp->list);
  322. unregister_probe_event(tp);
  323. return 0;
  324. }
  325. /* Register a trace_probe and probe_event */
  326. static int register_trace_probe(struct trace_probe *tp)
  327. {
  328. struct trace_probe *old_tp;
  329. int ret;
  330. mutex_lock(&probe_lock);
  331. /* Delete old (same name) event if exist */
  332. old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
  333. if (old_tp) {
  334. ret = unregister_trace_probe(old_tp);
  335. if (ret < 0)
  336. goto end;
  337. free_trace_probe(old_tp);
  338. }
  339. /* Register new event */
  340. ret = register_probe_event(tp);
  341. if (ret) {
  342. pr_warning("Failed to register probe event(%d)\n", ret);
  343. goto end;
  344. }
  345. /* Register k*probe */
  346. ret = __register_trace_probe(tp);
  347. if (ret < 0)
  348. unregister_probe_event(tp);
  349. else
  350. list_add_tail(&tp->list, &probe_list);
  351. end:
  352. mutex_unlock(&probe_lock);
  353. return ret;
  354. }
  355. /* Module notifier call back, checking event on the module */
  356. static int trace_probe_module_callback(struct notifier_block *nb,
  357. unsigned long val, void *data)
  358. {
  359. struct module *mod = data;
  360. struct trace_probe *tp;
  361. int ret;
  362. if (val != MODULE_STATE_COMING)
  363. return NOTIFY_DONE;
  364. /* Update probes on coming module */
  365. mutex_lock(&probe_lock);
  366. list_for_each_entry(tp, &probe_list, list) {
  367. if (trace_probe_within_module(tp, mod)) {
  368. /* Don't need to check busy - this should have gone. */
  369. __unregister_trace_probe(tp);
  370. ret = __register_trace_probe(tp);
  371. if (ret)
  372. pr_warning("Failed to re-register probe %s on"
  373. "%s: %d\n",
  374. tp->call.name, mod->name, ret);
  375. }
  376. }
  377. mutex_unlock(&probe_lock);
  378. return NOTIFY_DONE;
  379. }
  380. static struct notifier_block trace_probe_module_nb = {
  381. .notifier_call = trace_probe_module_callback,
  382. .priority = 1 /* Invoked after kprobe module callback */
  383. };
  384. static int create_trace_probe(int argc, char **argv)
  385. {
  386. /*
  387. * Argument syntax:
  388. * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
  389. * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
  390. * Fetch args:
  391. * $retval : fetch return value
  392. * $stack : fetch stack address
  393. * $stackN : fetch Nth of stack (N:0-)
  394. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  395. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  396. * %REG : fetch register REG
  397. * Dereferencing memory fetch:
  398. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  399. * Alias name of args:
  400. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  401. * Type of args:
  402. * FETCHARG:TYPE : use TYPE instead of unsigned long.
  403. */
  404. struct trace_probe *tp;
  405. int i, ret = 0;
  406. bool is_return = false, is_delete = false;
  407. char *symbol = NULL, *event = NULL, *group = NULL;
  408. char *arg;
  409. unsigned long offset = 0;
  410. void *addr = NULL;
  411. char buf[MAX_EVENT_NAME_LEN];
  412. /* argc must be >= 1 */
  413. if (argv[0][0] == 'p')
  414. is_return = false;
  415. else if (argv[0][0] == 'r')
  416. is_return = true;
  417. else if (argv[0][0] == '-')
  418. is_delete = true;
  419. else {
  420. pr_info("Probe definition must be started with 'p', 'r' or"
  421. " '-'.\n");
  422. return -EINVAL;
  423. }
  424. if (argv[0][1] == ':') {
  425. event = &argv[0][2];
  426. if (strchr(event, '/')) {
  427. group = event;
  428. event = strchr(group, '/') + 1;
  429. event[-1] = '\0';
  430. if (strlen(group) == 0) {
  431. pr_info("Group name is not specified\n");
  432. return -EINVAL;
  433. }
  434. }
  435. if (strlen(event) == 0) {
  436. pr_info("Event name is not specified\n");
  437. return -EINVAL;
  438. }
  439. }
  440. if (!group)
  441. group = KPROBE_EVENT_SYSTEM;
  442. if (is_delete) {
  443. if (!event) {
  444. pr_info("Delete command needs an event name.\n");
  445. return -EINVAL;
  446. }
  447. mutex_lock(&probe_lock);
  448. tp = find_trace_probe(event, group);
  449. if (!tp) {
  450. mutex_unlock(&probe_lock);
  451. pr_info("Event %s/%s doesn't exist.\n", group, event);
  452. return -ENOENT;
  453. }
  454. /* delete an event */
  455. ret = unregister_trace_probe(tp);
  456. if (ret == 0)
  457. free_trace_probe(tp);
  458. mutex_unlock(&probe_lock);
  459. return ret;
  460. }
  461. if (argc < 2) {
  462. pr_info("Probe point is not specified.\n");
  463. return -EINVAL;
  464. }
  465. if (isdigit(argv[1][0])) {
  466. if (is_return) {
  467. pr_info("Return probe point must be a symbol.\n");
  468. return -EINVAL;
  469. }
  470. /* an address specified */
  471. ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
  472. if (ret) {
  473. pr_info("Failed to parse address.\n");
  474. return ret;
  475. }
  476. } else {
  477. /* a symbol specified */
  478. symbol = argv[1];
  479. /* TODO: support .init module functions */
  480. ret = traceprobe_split_symbol_offset(symbol, &offset);
  481. if (ret) {
  482. pr_info("Failed to parse symbol.\n");
  483. return ret;
  484. }
  485. if (offset && is_return) {
  486. pr_info("Return probe must be used without offset.\n");
  487. return -EINVAL;
  488. }
  489. }
  490. argc -= 2; argv += 2;
  491. /* setup a probe */
  492. if (!event) {
  493. /* Make a new event name */
  494. if (symbol)
  495. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  496. is_return ? 'r' : 'p', symbol, offset);
  497. else
  498. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  499. is_return ? 'r' : 'p', addr);
  500. event = buf;
  501. }
  502. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  503. is_return);
  504. if (IS_ERR(tp)) {
  505. pr_info("Failed to allocate trace_probe.(%d)\n",
  506. (int)PTR_ERR(tp));
  507. return PTR_ERR(tp);
  508. }
  509. /* parse arguments */
  510. ret = 0;
  511. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  512. /* Increment count for freeing args in error case */
  513. tp->nr_args++;
  514. /* Parse argument name */
  515. arg = strchr(argv[i], '=');
  516. if (arg) {
  517. *arg++ = '\0';
  518. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  519. } else {
  520. arg = argv[i];
  521. /* If argument name is omitted, set "argN" */
  522. snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
  523. tp->args[i].name = kstrdup(buf, GFP_KERNEL);
  524. }
  525. if (!tp->args[i].name) {
  526. pr_info("Failed to allocate argument[%d] name.\n", i);
  527. ret = -ENOMEM;
  528. goto error;
  529. }
  530. if (!is_good_name(tp->args[i].name)) {
  531. pr_info("Invalid argument[%d] name: %s\n",
  532. i, tp->args[i].name);
  533. ret = -EINVAL;
  534. goto error;
  535. }
  536. if (traceprobe_conflict_field_name(tp->args[i].name,
  537. tp->args, i)) {
  538. pr_info("Argument[%d] name '%s' conflicts with "
  539. "another field.\n", i, argv[i]);
  540. ret = -EINVAL;
  541. goto error;
  542. }
  543. /* Parse fetch argument */
  544. ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
  545. is_return, true);
  546. if (ret) {
  547. pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
  548. goto error;
  549. }
  550. }
  551. ret = register_trace_probe(tp);
  552. if (ret)
  553. goto error;
  554. return 0;
  555. error:
  556. free_trace_probe(tp);
  557. return ret;
  558. }
  559. static int release_all_trace_probes(void)
  560. {
  561. struct trace_probe *tp;
  562. int ret = 0;
  563. mutex_lock(&probe_lock);
  564. /* Ensure no probe is in use. */
  565. list_for_each_entry(tp, &probe_list, list)
  566. if (trace_probe_is_enabled(tp)) {
  567. ret = -EBUSY;
  568. goto end;
  569. }
  570. /* TODO: Use batch unregistration */
  571. while (!list_empty(&probe_list)) {
  572. tp = list_entry(probe_list.next, struct trace_probe, list);
  573. unregister_trace_probe(tp);
  574. free_trace_probe(tp);
  575. }
  576. end:
  577. mutex_unlock(&probe_lock);
  578. return ret;
  579. }
  580. /* Probes listing interfaces */
  581. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  582. {
  583. mutex_lock(&probe_lock);
  584. return seq_list_start(&probe_list, *pos);
  585. }
  586. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  587. {
  588. return seq_list_next(v, &probe_list, pos);
  589. }
  590. static void probes_seq_stop(struct seq_file *m, void *v)
  591. {
  592. mutex_unlock(&probe_lock);
  593. }
  594. static int probes_seq_show(struct seq_file *m, void *v)
  595. {
  596. struct trace_probe *tp = v;
  597. int i;
  598. seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
  599. seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
  600. if (!tp->symbol)
  601. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  602. else if (tp->rp.kp.offset)
  603. seq_printf(m, " %s+%u", trace_probe_symbol(tp),
  604. tp->rp.kp.offset);
  605. else
  606. seq_printf(m, " %s", trace_probe_symbol(tp));
  607. for (i = 0; i < tp->nr_args; i++)
  608. seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
  609. seq_printf(m, "\n");
  610. return 0;
  611. }
  612. static const struct seq_operations probes_seq_op = {
  613. .start = probes_seq_start,
  614. .next = probes_seq_next,
  615. .stop = probes_seq_stop,
  616. .show = probes_seq_show
  617. };
  618. static int probes_open(struct inode *inode, struct file *file)
  619. {
  620. int ret;
  621. if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
  622. ret = release_all_trace_probes();
  623. if (ret < 0)
  624. return ret;
  625. }
  626. return seq_open(file, &probes_seq_op);
  627. }
  628. static ssize_t probes_write(struct file *file, const char __user *buffer,
  629. size_t count, loff_t *ppos)
  630. {
  631. return traceprobe_probes_write(file, buffer, count, ppos,
  632. create_trace_probe);
  633. }
  634. static const struct file_operations kprobe_events_ops = {
  635. .owner = THIS_MODULE,
  636. .open = probes_open,
  637. .read = seq_read,
  638. .llseek = seq_lseek,
  639. .release = seq_release,
  640. .write = probes_write,
  641. };
  642. /* Probes profiling interfaces */
  643. static int probes_profile_seq_show(struct seq_file *m, void *v)
  644. {
  645. struct trace_probe *tp = v;
  646. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  647. tp->rp.kp.nmissed);
  648. return 0;
  649. }
  650. static const struct seq_operations profile_seq_op = {
  651. .start = probes_seq_start,
  652. .next = probes_seq_next,
  653. .stop = probes_seq_stop,
  654. .show = probes_profile_seq_show
  655. };
  656. static int profile_open(struct inode *inode, struct file *file)
  657. {
  658. return seq_open(file, &profile_seq_op);
  659. }
  660. static const struct file_operations kprobe_profile_ops = {
  661. .owner = THIS_MODULE,
  662. .open = profile_open,
  663. .read = seq_read,
  664. .llseek = seq_lseek,
  665. .release = seq_release,
  666. };
  667. /* Sum up total data length for dynamic arraies (strings) */
  668. static __kprobes int __get_data_size(struct trace_probe *tp,
  669. struct pt_regs *regs)
  670. {
  671. int i, ret = 0;
  672. u32 len;
  673. for (i = 0; i < tp->nr_args; i++)
  674. if (unlikely(tp->args[i].fetch_size.fn)) {
  675. call_fetch(&tp->args[i].fetch_size, regs, &len);
  676. ret += len;
  677. }
  678. return ret;
  679. }
  680. /* Store the value of each argument */
  681. static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
  682. struct pt_regs *regs,
  683. u8 *data, int maxlen)
  684. {
  685. int i;
  686. u32 end = tp->size;
  687. u32 *dl; /* Data (relative) location */
  688. for (i = 0; i < tp->nr_args; i++) {
  689. if (unlikely(tp->args[i].fetch_size.fn)) {
  690. /*
  691. * First, we set the relative location and
  692. * maximum data length to *dl
  693. */
  694. dl = (u32 *)(data + tp->args[i].offset);
  695. *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
  696. /* Then try to fetch string or dynamic array data */
  697. call_fetch(&tp->args[i].fetch, regs, dl);
  698. /* Reduce maximum length */
  699. end += get_rloc_len(*dl);
  700. maxlen -= get_rloc_len(*dl);
  701. /* Trick here, convert data_rloc to data_loc */
  702. *dl = convert_rloc_to_loc(*dl,
  703. ent_size + tp->args[i].offset);
  704. } else
  705. /* Just fetching data normally */
  706. call_fetch(&tp->args[i].fetch, regs,
  707. data + tp->args[i].offset);
  708. }
  709. }
  710. /* Kprobe handler */
  711. static __kprobes void
  712. __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
  713. struct ftrace_event_file *ftrace_file)
  714. {
  715. struct kprobe_trace_entry_head *entry;
  716. struct ring_buffer_event *event;
  717. struct ring_buffer *buffer;
  718. int size, dsize, pc;
  719. unsigned long irq_flags;
  720. struct ftrace_event_call *call = &tp->call;
  721. WARN_ON(call != ftrace_file->event_call);
  722. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  723. return;
  724. local_save_flags(irq_flags);
  725. pc = preempt_count();
  726. dsize = __get_data_size(tp, regs);
  727. size = sizeof(*entry) + tp->size + dsize;
  728. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  729. call->event.type,
  730. size, irq_flags, pc);
  731. if (!event)
  732. return;
  733. entry = ring_buffer_event_data(event);
  734. entry->ip = (unsigned long)tp->rp.kp.addr;
  735. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  736. if (!filter_current_check_discard(buffer, call, entry, event))
  737. trace_buffer_unlock_commit_regs(buffer, event,
  738. irq_flags, pc, regs);
  739. }
  740. static __kprobes void
  741. kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
  742. {
  743. struct ftrace_event_file **file = tp->files;
  744. /* Note: preempt is already disabled around the kprobe handler */
  745. while (*file) {
  746. __kprobe_trace_func(tp, regs, *file);
  747. file++;
  748. }
  749. }
  750. /* Kretprobe handler */
  751. static __kprobes void
  752. __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  753. struct pt_regs *regs,
  754. struct ftrace_event_file *ftrace_file)
  755. {
  756. struct kretprobe_trace_entry_head *entry;
  757. struct ring_buffer_event *event;
  758. struct ring_buffer *buffer;
  759. int size, pc, dsize;
  760. unsigned long irq_flags;
  761. struct ftrace_event_call *call = &tp->call;
  762. WARN_ON(call != ftrace_file->event_call);
  763. if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
  764. return;
  765. local_save_flags(irq_flags);
  766. pc = preempt_count();
  767. dsize = __get_data_size(tp, regs);
  768. size = sizeof(*entry) + tp->size + dsize;
  769. event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
  770. call->event.type,
  771. size, irq_flags, pc);
  772. if (!event)
  773. return;
  774. entry = ring_buffer_event_data(event);
  775. entry->func = (unsigned long)tp->rp.kp.addr;
  776. entry->ret_ip = (unsigned long)ri->ret_addr;
  777. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  778. if (!filter_current_check_discard(buffer, call, entry, event))
  779. trace_buffer_unlock_commit_regs(buffer, event,
  780. irq_flags, pc, regs);
  781. }
  782. static __kprobes void
  783. kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  784. struct pt_regs *regs)
  785. {
  786. struct ftrace_event_file **file = tp->files;
  787. /* Note: preempt is already disabled around the kprobe handler */
  788. while (*file) {
  789. __kretprobe_trace_func(tp, ri, regs, *file);
  790. file++;
  791. }
  792. }
  793. /* Event entry printers */
  794. enum print_line_t
  795. print_kprobe_event(struct trace_iterator *iter, int flags,
  796. struct trace_event *event)
  797. {
  798. struct kprobe_trace_entry_head *field;
  799. struct trace_seq *s = &iter->seq;
  800. struct trace_probe *tp;
  801. u8 *data;
  802. int i;
  803. field = (struct kprobe_trace_entry_head *)iter->ent;
  804. tp = container_of(event, struct trace_probe, call.event);
  805. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  806. goto partial;
  807. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  808. goto partial;
  809. if (!trace_seq_puts(s, ")"))
  810. goto partial;
  811. data = (u8 *)&field[1];
  812. for (i = 0; i < tp->nr_args; i++)
  813. if (!tp->args[i].type->print(s, tp->args[i].name,
  814. data + tp->args[i].offset, field))
  815. goto partial;
  816. if (!trace_seq_puts(s, "\n"))
  817. goto partial;
  818. return TRACE_TYPE_HANDLED;
  819. partial:
  820. return TRACE_TYPE_PARTIAL_LINE;
  821. }
  822. enum print_line_t
  823. print_kretprobe_event(struct trace_iterator *iter, int flags,
  824. struct trace_event *event)
  825. {
  826. struct kretprobe_trace_entry_head *field;
  827. struct trace_seq *s = &iter->seq;
  828. struct trace_probe *tp;
  829. u8 *data;
  830. int i;
  831. field = (struct kretprobe_trace_entry_head *)iter->ent;
  832. tp = container_of(event, struct trace_probe, call.event);
  833. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  834. goto partial;
  835. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  836. goto partial;
  837. if (!trace_seq_puts(s, " <- "))
  838. goto partial;
  839. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  840. goto partial;
  841. if (!trace_seq_puts(s, ")"))
  842. goto partial;
  843. data = (u8 *)&field[1];
  844. for (i = 0; i < tp->nr_args; i++)
  845. if (!tp->args[i].type->print(s, tp->args[i].name,
  846. data + tp->args[i].offset, field))
  847. goto partial;
  848. if (!trace_seq_puts(s, "\n"))
  849. goto partial;
  850. return TRACE_TYPE_HANDLED;
  851. partial:
  852. return TRACE_TYPE_PARTIAL_LINE;
  853. }
  854. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  855. {
  856. int ret, i;
  857. struct kprobe_trace_entry_head field;
  858. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  859. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  860. /* Set argument names as fields */
  861. for (i = 0; i < tp->nr_args; i++) {
  862. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  863. tp->args[i].name,
  864. sizeof(field) + tp->args[i].offset,
  865. tp->args[i].type->size,
  866. tp->args[i].type->is_signed,
  867. FILTER_OTHER);
  868. if (ret)
  869. return ret;
  870. }
  871. return 0;
  872. }
  873. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  874. {
  875. int ret, i;
  876. struct kretprobe_trace_entry_head field;
  877. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  878. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  879. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  880. /* Set argument names as fields */
  881. for (i = 0; i < tp->nr_args; i++) {
  882. ret = trace_define_field(event_call, tp->args[i].type->fmttype,
  883. tp->args[i].name,
  884. sizeof(field) + tp->args[i].offset,
  885. tp->args[i].type->size,
  886. tp->args[i].type->is_signed,
  887. FILTER_OTHER);
  888. if (ret)
  889. return ret;
  890. }
  891. return 0;
  892. }
  893. static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
  894. {
  895. int i;
  896. int pos = 0;
  897. const char *fmt, *arg;
  898. if (!trace_probe_is_return(tp)) {
  899. fmt = "(%lx)";
  900. arg = "REC->" FIELD_STRING_IP;
  901. } else {
  902. fmt = "(%lx <- %lx)";
  903. arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
  904. }
  905. /* When len=0, we just calculate the needed length */
  906. #define LEN_OR_ZERO (len ? len - pos : 0)
  907. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
  908. for (i = 0; i < tp->nr_args; i++) {
  909. pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
  910. tp->args[i].name, tp->args[i].type->fmt);
  911. }
  912. pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
  913. for (i = 0; i < tp->nr_args; i++) {
  914. if (strcmp(tp->args[i].type->name, "string") == 0)
  915. pos += snprintf(buf + pos, LEN_OR_ZERO,
  916. ", __get_str(%s)",
  917. tp->args[i].name);
  918. else
  919. pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
  920. tp->args[i].name);
  921. }
  922. #undef LEN_OR_ZERO
  923. /* return the length of print_fmt */
  924. return pos;
  925. }
  926. static int set_print_fmt(struct trace_probe *tp)
  927. {
  928. int len;
  929. char *print_fmt;
  930. /* First: called with 0 length to calculate the needed length */
  931. len = __set_print_fmt(tp, NULL, 0);
  932. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  933. if (!print_fmt)
  934. return -ENOMEM;
  935. /* Second: actually write the @print_fmt */
  936. __set_print_fmt(tp, print_fmt, len + 1);
  937. tp->call.print_fmt = print_fmt;
  938. return 0;
  939. }
  940. #ifdef CONFIG_PERF_EVENTS
  941. /* Kprobe profile handler */
  942. static __kprobes void
  943. kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
  944. {
  945. struct ftrace_event_call *call = &tp->call;
  946. struct kprobe_trace_entry_head *entry;
  947. struct hlist_head *head;
  948. int size, __size, dsize;
  949. int rctx;
  950. dsize = __get_data_size(tp, regs);
  951. __size = sizeof(*entry) + tp->size + dsize;
  952. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  953. size -= sizeof(u32);
  954. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  955. "profile buffer not large enough"))
  956. return;
  957. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  958. if (!entry)
  959. return;
  960. entry->ip = (unsigned long)tp->rp.kp.addr;
  961. memset(&entry[1], 0, dsize);
  962. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  963. head = this_cpu_ptr(call->perf_events);
  964. perf_trace_buf_submit(entry, size, rctx,
  965. entry->ip, 1, regs, head, NULL);
  966. }
  967. /* Kretprobe profile handler */
  968. static __kprobes void
  969. kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
  970. struct pt_regs *regs)
  971. {
  972. struct ftrace_event_call *call = &tp->call;
  973. struct kretprobe_trace_entry_head *entry;
  974. struct hlist_head *head;
  975. int size, __size, dsize;
  976. int rctx;
  977. dsize = __get_data_size(tp, regs);
  978. __size = sizeof(*entry) + tp->size + dsize;
  979. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  980. size -= sizeof(u32);
  981. if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
  982. "profile buffer not large enough"))
  983. return;
  984. entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
  985. if (!entry)
  986. return;
  987. entry->func = (unsigned long)tp->rp.kp.addr;
  988. entry->ret_ip = (unsigned long)ri->ret_addr;
  989. store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
  990. head = this_cpu_ptr(call->perf_events);
  991. perf_trace_buf_submit(entry, size, rctx,
  992. entry->ret_ip, 1, regs, head, NULL);
  993. }
  994. #endif /* CONFIG_PERF_EVENTS */
  995. static __kprobes
  996. int kprobe_register(struct ftrace_event_call *event,
  997. enum trace_reg type, void *data)
  998. {
  999. struct trace_probe *tp = (struct trace_probe *)event->data;
  1000. struct ftrace_event_file *file = data;
  1001. switch (type) {
  1002. case TRACE_REG_REGISTER:
  1003. return enable_trace_probe(tp, file);
  1004. case TRACE_REG_UNREGISTER:
  1005. return disable_trace_probe(tp, file);
  1006. #ifdef CONFIG_PERF_EVENTS
  1007. case TRACE_REG_PERF_REGISTER:
  1008. return enable_trace_probe(tp, NULL);
  1009. case TRACE_REG_PERF_UNREGISTER:
  1010. return disable_trace_probe(tp, NULL);
  1011. case TRACE_REG_PERF_OPEN:
  1012. case TRACE_REG_PERF_CLOSE:
  1013. case TRACE_REG_PERF_ADD:
  1014. case TRACE_REG_PERF_DEL:
  1015. return 0;
  1016. #endif
  1017. }
  1018. return 0;
  1019. }
  1020. static __kprobes
  1021. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1022. {
  1023. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1024. tp->nhit++;
  1025. if (tp->flags & TP_FLAG_TRACE)
  1026. kprobe_trace_func(tp, regs);
  1027. #ifdef CONFIG_PERF_EVENTS
  1028. if (tp->flags & TP_FLAG_PROFILE)
  1029. kprobe_perf_func(tp, regs);
  1030. #endif
  1031. return 0; /* We don't tweek kernel, so just return 0 */
  1032. }
  1033. static __kprobes
  1034. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1035. {
  1036. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1037. tp->nhit++;
  1038. if (tp->flags & TP_FLAG_TRACE)
  1039. kretprobe_trace_func(tp, ri, regs);
  1040. #ifdef CONFIG_PERF_EVENTS
  1041. if (tp->flags & TP_FLAG_PROFILE)
  1042. kretprobe_perf_func(tp, ri, regs);
  1043. #endif
  1044. return 0; /* We don't tweek kernel, so just return 0 */
  1045. }
  1046. static struct trace_event_functions kretprobe_funcs = {
  1047. .trace = print_kretprobe_event
  1048. };
  1049. static struct trace_event_functions kprobe_funcs = {
  1050. .trace = print_kprobe_event
  1051. };
  1052. static int register_probe_event(struct trace_probe *tp)
  1053. {
  1054. struct ftrace_event_call *call = &tp->call;
  1055. int ret;
  1056. /* Initialize ftrace_event_call */
  1057. INIT_LIST_HEAD(&call->class->fields);
  1058. if (trace_probe_is_return(tp)) {
  1059. call->event.funcs = &kretprobe_funcs;
  1060. call->class->define_fields = kretprobe_event_define_fields;
  1061. } else {
  1062. call->event.funcs = &kprobe_funcs;
  1063. call->class->define_fields = kprobe_event_define_fields;
  1064. }
  1065. if (set_print_fmt(tp) < 0)
  1066. return -ENOMEM;
  1067. ret = register_ftrace_event(&call->event);
  1068. if (!ret) {
  1069. kfree(call->print_fmt);
  1070. return -ENODEV;
  1071. }
  1072. call->flags = 0;
  1073. call->class->reg = kprobe_register;
  1074. call->data = tp;
  1075. ret = trace_add_event_call(call);
  1076. if (ret) {
  1077. pr_info("Failed to register kprobe event: %s\n", call->name);
  1078. kfree(call->print_fmt);
  1079. unregister_ftrace_event(&call->event);
  1080. }
  1081. return ret;
  1082. }
  1083. static void unregister_probe_event(struct trace_probe *tp)
  1084. {
  1085. /* tp->event is unregistered in trace_remove_event_call() */
  1086. trace_remove_event_call(&tp->call);
  1087. kfree(tp->call.print_fmt);
  1088. }
  1089. /* Make a debugfs interface for controlling probe points */
  1090. static __init int init_kprobe_trace(void)
  1091. {
  1092. struct dentry *d_tracer;
  1093. struct dentry *entry;
  1094. if (register_module_notifier(&trace_probe_module_nb))
  1095. return -EINVAL;
  1096. d_tracer = tracing_init_dentry();
  1097. if (!d_tracer)
  1098. return 0;
  1099. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1100. NULL, &kprobe_events_ops);
  1101. /* Event list interface */
  1102. if (!entry)
  1103. pr_warning("Could not create debugfs "
  1104. "'kprobe_events' entry\n");
  1105. /* Profile interface */
  1106. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1107. NULL, &kprobe_profile_ops);
  1108. if (!entry)
  1109. pr_warning("Could not create debugfs "
  1110. "'kprobe_profile' entry\n");
  1111. return 0;
  1112. }
  1113. fs_initcall(init_kprobe_trace);
  1114. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1115. /*
  1116. * The "__used" keeps gcc from removing the function symbol
  1117. * from the kallsyms table.
  1118. */
  1119. static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1120. int a4, int a5, int a6)
  1121. {
  1122. return a1 + a2 + a3 + a4 + a5 + a6;
  1123. }
  1124. static struct ftrace_event_file *
  1125. find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
  1126. {
  1127. struct ftrace_event_file *file;
  1128. list_for_each_entry(file, &tr->events, list)
  1129. if (file->event_call == &tp->call)
  1130. return file;
  1131. return NULL;
  1132. }
  1133. static __init int kprobe_trace_self_tests_init(void)
  1134. {
  1135. int ret, warn = 0;
  1136. int (*target)(int, int, int, int, int, int);
  1137. struct trace_probe *tp;
  1138. struct ftrace_event_file *file;
  1139. target = kprobe_trace_selftest_target;
  1140. pr_info("Testing kprobe tracing: ");
  1141. ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
  1142. "$stack $stack0 +0($stack)",
  1143. create_trace_probe);
  1144. if (WARN_ON_ONCE(ret)) {
  1145. pr_warn("error on probing function entry.\n");
  1146. warn++;
  1147. } else {
  1148. /* Enable trace point */
  1149. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1150. if (WARN_ON_ONCE(tp == NULL)) {
  1151. pr_warn("error on getting new probe.\n");
  1152. warn++;
  1153. } else {
  1154. file = find_trace_probe_file(tp, top_trace_array());
  1155. if (WARN_ON_ONCE(file == NULL)) {
  1156. pr_warn("error on getting probe file.\n");
  1157. warn++;
  1158. } else
  1159. enable_trace_probe(tp, file);
  1160. }
  1161. }
  1162. ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
  1163. "$retval", create_trace_probe);
  1164. if (WARN_ON_ONCE(ret)) {
  1165. pr_warn("error on probing function return.\n");
  1166. warn++;
  1167. } else {
  1168. /* Enable trace point */
  1169. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1170. if (WARN_ON_ONCE(tp == NULL)) {
  1171. pr_warn("error on getting 2nd new probe.\n");
  1172. warn++;
  1173. } else {
  1174. file = find_trace_probe_file(tp, top_trace_array());
  1175. if (WARN_ON_ONCE(file == NULL)) {
  1176. pr_warn("error on getting probe file.\n");
  1177. warn++;
  1178. } else
  1179. enable_trace_probe(tp, file);
  1180. }
  1181. }
  1182. if (warn)
  1183. goto end;
  1184. ret = target(1, 2, 3, 4, 5, 6);
  1185. /* Disable trace points before removing it */
  1186. tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
  1187. if (WARN_ON_ONCE(tp == NULL)) {
  1188. pr_warn("error on getting test probe.\n");
  1189. warn++;
  1190. } else {
  1191. file = find_trace_probe_file(tp, top_trace_array());
  1192. if (WARN_ON_ONCE(file == NULL)) {
  1193. pr_warn("error on getting probe file.\n");
  1194. warn++;
  1195. } else
  1196. disable_trace_probe(tp, file);
  1197. }
  1198. tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
  1199. if (WARN_ON_ONCE(tp == NULL)) {
  1200. pr_warn("error on getting 2nd test probe.\n");
  1201. warn++;
  1202. } else {
  1203. file = find_trace_probe_file(tp, top_trace_array());
  1204. if (WARN_ON_ONCE(file == NULL)) {
  1205. pr_warn("error on getting probe file.\n");
  1206. warn++;
  1207. } else
  1208. disable_trace_probe(tp, file);
  1209. }
  1210. ret = traceprobe_command("-:testprobe", create_trace_probe);
  1211. if (WARN_ON_ONCE(ret)) {
  1212. pr_warn("error on deleting a probe.\n");
  1213. warn++;
  1214. }
  1215. ret = traceprobe_command("-:testprobe2", create_trace_probe);
  1216. if (WARN_ON_ONCE(ret)) {
  1217. pr_warn("error on deleting a probe.\n");
  1218. warn++;
  1219. }
  1220. end:
  1221. release_all_trace_probes();
  1222. if (warn)
  1223. pr_cont("NG: Some tests are failed. Please check them.\n");
  1224. else
  1225. pr_cont("OK\n");
  1226. return 0;
  1227. }
  1228. late_initcall(kprobe_trace_self_tests_init);
  1229. #endif