trace_kprobe.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553
  1. /*
  2. * Kprobes-based tracing events
  3. *
  4. * Created by Masami Hiramatsu <mhiramat@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/slab.h>
  24. #include <linux/smp.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/types.h>
  27. #include <linux/string.h>
  28. #include <linux/ctype.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/perf_event.h>
  31. #include "trace.h"
  32. #include "trace_output.h"
  33. #define MAX_TRACE_ARGS 128
  34. #define MAX_ARGSTR_LEN 63
  35. #define MAX_EVENT_NAME_LEN 64
  36. #define KPROBE_EVENT_SYSTEM "kprobes"
  37. /* Reserved field names */
  38. #define FIELD_STRING_IP "__probe_ip"
  39. #define FIELD_STRING_NARGS "__probe_nargs"
  40. #define FIELD_STRING_RETIP "__probe_ret_ip"
  41. #define FIELD_STRING_FUNC "__probe_func"
  42. const char *reserved_field_names[] = {
  43. "common_type",
  44. "common_flags",
  45. "common_preempt_count",
  46. "common_pid",
  47. "common_tgid",
  48. "common_lock_depth",
  49. FIELD_STRING_IP,
  50. FIELD_STRING_NARGS,
  51. FIELD_STRING_RETIP,
  52. FIELD_STRING_FUNC,
  53. };
  54. struct fetch_func {
  55. unsigned long (*func)(struct pt_regs *, void *);
  56. void *data;
  57. };
  58. static __kprobes unsigned long call_fetch(struct fetch_func *f,
  59. struct pt_regs *regs)
  60. {
  61. return f->func(regs, f->data);
  62. }
  63. /* fetch handlers */
  64. static __kprobes unsigned long fetch_register(struct pt_regs *regs,
  65. void *offset)
  66. {
  67. return regs_get_register(regs, (unsigned int)((unsigned long)offset));
  68. }
  69. static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
  70. void *num)
  71. {
  72. return regs_get_kernel_stack_nth(regs,
  73. (unsigned int)((unsigned long)num));
  74. }
  75. static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
  76. {
  77. unsigned long retval;
  78. if (probe_kernel_address(addr, retval))
  79. return 0;
  80. return retval;
  81. }
  82. static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
  83. {
  84. return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
  85. }
  86. static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
  87. void *dummy)
  88. {
  89. return regs_return_value(regs);
  90. }
  91. static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
  92. void *dummy)
  93. {
  94. return kernel_stack_pointer(regs);
  95. }
  96. /* Memory fetching by symbol */
  97. struct symbol_cache {
  98. char *symbol;
  99. long offset;
  100. unsigned long addr;
  101. };
  102. static unsigned long update_symbol_cache(struct symbol_cache *sc)
  103. {
  104. sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  105. if (sc->addr)
  106. sc->addr += sc->offset;
  107. return sc->addr;
  108. }
  109. static void free_symbol_cache(struct symbol_cache *sc)
  110. {
  111. kfree(sc->symbol);
  112. kfree(sc);
  113. }
  114. static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
  115. {
  116. struct symbol_cache *sc;
  117. if (!sym || strlen(sym) == 0)
  118. return NULL;
  119. sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
  120. if (!sc)
  121. return NULL;
  122. sc->symbol = kstrdup(sym, GFP_KERNEL);
  123. if (!sc->symbol) {
  124. kfree(sc);
  125. return NULL;
  126. }
  127. sc->offset = offset;
  128. update_symbol_cache(sc);
  129. return sc;
  130. }
  131. static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
  132. {
  133. struct symbol_cache *sc = data;
  134. if (sc->addr)
  135. return fetch_memory(regs, (void *)sc->addr);
  136. else
  137. return 0;
  138. }
  139. /* Special indirect memory access interface */
  140. struct indirect_fetch_data {
  141. struct fetch_func orig;
  142. long offset;
  143. };
  144. static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
  145. {
  146. struct indirect_fetch_data *ind = data;
  147. unsigned long addr;
  148. addr = call_fetch(&ind->orig, regs);
  149. if (addr) {
  150. addr += ind->offset;
  151. return fetch_memory(regs, (void *)addr);
  152. } else
  153. return 0;
  154. }
  155. static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
  156. {
  157. if (data->orig.func == fetch_indirect)
  158. free_indirect_fetch_data(data->orig.data);
  159. else if (data->orig.func == fetch_symbol)
  160. free_symbol_cache(data->orig.data);
  161. kfree(data);
  162. }
  163. /**
  164. * Kprobe event core functions
  165. */
  166. struct probe_arg {
  167. struct fetch_func fetch;
  168. const char *name;
  169. };
  170. /* Flags for trace_probe */
  171. #define TP_FLAG_TRACE 1
  172. #define TP_FLAG_PROFILE 2
  173. struct trace_probe {
  174. struct list_head list;
  175. struct kretprobe rp; /* Use rp.kp for kprobe use */
  176. unsigned long nhit;
  177. unsigned int flags; /* For TP_FLAG_* */
  178. const char *symbol; /* symbol name */
  179. struct ftrace_event_call call;
  180. struct trace_event event;
  181. unsigned int nr_args;
  182. struct probe_arg args[];
  183. };
  184. #define SIZEOF_TRACE_PROBE(n) \
  185. (offsetof(struct trace_probe, args) + \
  186. (sizeof(struct probe_arg) * (n)))
  187. static __kprobes int probe_is_return(struct trace_probe *tp)
  188. {
  189. return tp->rp.handler != NULL;
  190. }
  191. static __kprobes const char *probe_symbol(struct trace_probe *tp)
  192. {
  193. return tp->symbol ? tp->symbol : "unknown";
  194. }
  195. static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
  196. {
  197. int ret = -EINVAL;
  198. if (ff->func == fetch_argument)
  199. ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
  200. else if (ff->func == fetch_register) {
  201. const char *name;
  202. name = regs_query_register_name((unsigned int)((long)ff->data));
  203. ret = snprintf(buf, n, "%%%s", name);
  204. } else if (ff->func == fetch_stack)
  205. ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
  206. else if (ff->func == fetch_memory)
  207. ret = snprintf(buf, n, "@0x%p", ff->data);
  208. else if (ff->func == fetch_symbol) {
  209. struct symbol_cache *sc = ff->data;
  210. if (sc->offset)
  211. ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
  212. sc->offset);
  213. else
  214. ret = snprintf(buf, n, "@%s", sc->symbol);
  215. } else if (ff->func == fetch_retvalue)
  216. ret = snprintf(buf, n, "$retval");
  217. else if (ff->func == fetch_stack_address)
  218. ret = snprintf(buf, n, "$stack");
  219. else if (ff->func == fetch_indirect) {
  220. struct indirect_fetch_data *id = ff->data;
  221. size_t l = 0;
  222. ret = snprintf(buf, n, "%+ld(", id->offset);
  223. if (ret >= n)
  224. goto end;
  225. l += ret;
  226. ret = probe_arg_string(buf + l, n - l, &id->orig);
  227. if (ret < 0)
  228. goto end;
  229. l += ret;
  230. ret = snprintf(buf + l, n - l, ")");
  231. ret += l;
  232. }
  233. end:
  234. if (ret >= n)
  235. return -ENOSPC;
  236. return ret;
  237. }
  238. static int register_probe_event(struct trace_probe *tp);
  239. static void unregister_probe_event(struct trace_probe *tp);
  240. static DEFINE_MUTEX(probe_lock);
  241. static LIST_HEAD(probe_list);
  242. static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  243. static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  244. struct pt_regs *regs);
  245. /* Check the name is good for event/group */
  246. static int check_event_name(const char *name)
  247. {
  248. if (!isalpha(*name) && *name != '_')
  249. return 0;
  250. while (*++name != '\0') {
  251. if (!isalpha(*name) && !isdigit(*name) && *name != '_')
  252. return 0;
  253. }
  254. return 1;
  255. }
  256. /*
  257. * Allocate new trace_probe and initialize it (including kprobes).
  258. */
  259. static struct trace_probe *alloc_trace_probe(const char *group,
  260. const char *event,
  261. void *addr,
  262. const char *symbol,
  263. unsigned long offs,
  264. int nargs, int is_return)
  265. {
  266. struct trace_probe *tp;
  267. int ret = -ENOMEM;
  268. tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
  269. if (!tp)
  270. return ERR_PTR(ret);
  271. if (symbol) {
  272. tp->symbol = kstrdup(symbol, GFP_KERNEL);
  273. if (!tp->symbol)
  274. goto error;
  275. tp->rp.kp.symbol_name = tp->symbol;
  276. tp->rp.kp.offset = offs;
  277. } else
  278. tp->rp.kp.addr = addr;
  279. if (is_return)
  280. tp->rp.handler = kretprobe_dispatcher;
  281. else
  282. tp->rp.kp.pre_handler = kprobe_dispatcher;
  283. if (!event || !check_event_name(event)) {
  284. ret = -EINVAL;
  285. goto error;
  286. }
  287. tp->call.name = kstrdup(event, GFP_KERNEL);
  288. if (!tp->call.name)
  289. goto error;
  290. if (!group || !check_event_name(group)) {
  291. ret = -EINVAL;
  292. goto error;
  293. }
  294. tp->call.system = kstrdup(group, GFP_KERNEL);
  295. if (!tp->call.system)
  296. goto error;
  297. INIT_LIST_HEAD(&tp->list);
  298. return tp;
  299. error:
  300. kfree(tp->call.name);
  301. kfree(tp->symbol);
  302. kfree(tp);
  303. return ERR_PTR(ret);
  304. }
  305. static void free_probe_arg(struct probe_arg *arg)
  306. {
  307. if (arg->fetch.func == fetch_symbol)
  308. free_symbol_cache(arg->fetch.data);
  309. else if (arg->fetch.func == fetch_indirect)
  310. free_indirect_fetch_data(arg->fetch.data);
  311. kfree(arg->name);
  312. }
  313. static void free_trace_probe(struct trace_probe *tp)
  314. {
  315. int i;
  316. for (i = 0; i < tp->nr_args; i++)
  317. free_probe_arg(&tp->args[i]);
  318. kfree(tp->call.system);
  319. kfree(tp->call.name);
  320. kfree(tp->symbol);
  321. kfree(tp);
  322. }
  323. static struct trace_probe *find_probe_event(const char *event,
  324. const char *group)
  325. {
  326. struct trace_probe *tp;
  327. list_for_each_entry(tp, &probe_list, list)
  328. if (strcmp(tp->call.name, event) == 0 &&
  329. strcmp(tp->call.system, group) == 0)
  330. return tp;
  331. return NULL;
  332. }
  333. /* Unregister a trace_probe and probe_event: call with locking probe_lock */
  334. static void unregister_trace_probe(struct trace_probe *tp)
  335. {
  336. if (probe_is_return(tp))
  337. unregister_kretprobe(&tp->rp);
  338. else
  339. unregister_kprobe(&tp->rp.kp);
  340. list_del(&tp->list);
  341. unregister_probe_event(tp);
  342. }
  343. /* Register a trace_probe and probe_event */
  344. static int register_trace_probe(struct trace_probe *tp)
  345. {
  346. struct trace_probe *old_tp;
  347. int ret;
  348. mutex_lock(&probe_lock);
  349. /* register as an event */
  350. old_tp = find_probe_event(tp->call.name, tp->call.system);
  351. if (old_tp) {
  352. /* delete old event */
  353. unregister_trace_probe(old_tp);
  354. free_trace_probe(old_tp);
  355. }
  356. ret = register_probe_event(tp);
  357. if (ret) {
  358. pr_warning("Faild to register probe event(%d)\n", ret);
  359. goto end;
  360. }
  361. tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
  362. if (probe_is_return(tp))
  363. ret = register_kretprobe(&tp->rp);
  364. else
  365. ret = register_kprobe(&tp->rp.kp);
  366. if (ret) {
  367. pr_warning("Could not insert probe(%d)\n", ret);
  368. if (ret == -EILSEQ) {
  369. pr_warning("Probing address(0x%p) is not an "
  370. "instruction boundary.\n",
  371. tp->rp.kp.addr);
  372. ret = -EINVAL;
  373. }
  374. unregister_probe_event(tp);
  375. } else
  376. list_add_tail(&tp->list, &probe_list);
  377. end:
  378. mutex_unlock(&probe_lock);
  379. return ret;
  380. }
  381. /* Split symbol and offset. */
  382. static int split_symbol_offset(char *symbol, unsigned long *offset)
  383. {
  384. char *tmp;
  385. int ret;
  386. if (!offset)
  387. return -EINVAL;
  388. tmp = strchr(symbol, '+');
  389. if (tmp) {
  390. /* skip sign because strict_strtol doesn't accept '+' */
  391. ret = strict_strtoul(tmp + 1, 0, offset);
  392. if (ret)
  393. return ret;
  394. *tmp = '\0';
  395. } else
  396. *offset = 0;
  397. return 0;
  398. }
  399. #define PARAM_MAX_ARGS 16
  400. #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
  401. static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
  402. {
  403. int ret = 0;
  404. unsigned long param;
  405. if (strcmp(arg, "retval") == 0) {
  406. if (is_return) {
  407. ff->func = fetch_retvalue;
  408. ff->data = NULL;
  409. } else
  410. ret = -EINVAL;
  411. } else if (strncmp(arg, "stack", 5) == 0) {
  412. if (arg[5] == '\0') {
  413. ff->func = fetch_stack_address;
  414. ff->data = NULL;
  415. } else if (isdigit(arg[5])) {
  416. ret = strict_strtoul(arg + 5, 10, &param);
  417. if (ret || param > PARAM_MAX_STACK)
  418. ret = -EINVAL;
  419. else {
  420. ff->func = fetch_stack;
  421. ff->data = (void *)param;
  422. }
  423. } else
  424. ret = -EINVAL;
  425. } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
  426. ret = strict_strtoul(arg + 3, 10, &param);
  427. if (ret || param > PARAM_MAX_ARGS)
  428. ret = -EINVAL;
  429. else {
  430. ff->func = fetch_argument;
  431. ff->data = (void *)param;
  432. }
  433. } else
  434. ret = -EINVAL;
  435. return ret;
  436. }
  437. /* Recursive argument parser */
  438. static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  439. {
  440. int ret = 0;
  441. unsigned long param;
  442. long offset;
  443. char *tmp;
  444. switch (arg[0]) {
  445. case '$':
  446. ret = parse_probe_vars(arg + 1, ff, is_return);
  447. break;
  448. case '%': /* named register */
  449. ret = regs_query_register_offset(arg + 1);
  450. if (ret >= 0) {
  451. ff->func = fetch_register;
  452. ff->data = (void *)(unsigned long)ret;
  453. ret = 0;
  454. }
  455. break;
  456. case '@': /* memory or symbol */
  457. if (isdigit(arg[1])) {
  458. ret = strict_strtoul(arg + 1, 0, &param);
  459. if (ret)
  460. break;
  461. ff->func = fetch_memory;
  462. ff->data = (void *)param;
  463. } else {
  464. ret = split_symbol_offset(arg + 1, &offset);
  465. if (ret)
  466. break;
  467. ff->data = alloc_symbol_cache(arg + 1, offset);
  468. if (ff->data)
  469. ff->func = fetch_symbol;
  470. else
  471. ret = -EINVAL;
  472. }
  473. break;
  474. case '+': /* indirect memory */
  475. case '-':
  476. tmp = strchr(arg, '(');
  477. if (!tmp) {
  478. ret = -EINVAL;
  479. break;
  480. }
  481. *tmp = '\0';
  482. ret = strict_strtol(arg + 1, 0, &offset);
  483. if (ret)
  484. break;
  485. if (arg[0] == '-')
  486. offset = -offset;
  487. arg = tmp + 1;
  488. tmp = strrchr(arg, ')');
  489. if (tmp) {
  490. struct indirect_fetch_data *id;
  491. *tmp = '\0';
  492. id = kzalloc(sizeof(struct indirect_fetch_data),
  493. GFP_KERNEL);
  494. if (!id)
  495. return -ENOMEM;
  496. id->offset = offset;
  497. ret = __parse_probe_arg(arg, &id->orig, is_return);
  498. if (ret)
  499. kfree(id);
  500. else {
  501. ff->func = fetch_indirect;
  502. ff->data = (void *)id;
  503. }
  504. } else
  505. ret = -EINVAL;
  506. break;
  507. default:
  508. /* TODO: support custom handler */
  509. ret = -EINVAL;
  510. }
  511. return ret;
  512. }
  513. /* String length checking wrapper */
  514. static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
  515. {
  516. if (strlen(arg) > MAX_ARGSTR_LEN) {
  517. pr_info("Argument is too long.: %s\n", arg);
  518. return -ENOSPC;
  519. }
  520. return __parse_probe_arg(arg, ff, is_return);
  521. }
  522. /* Return 1 if name is reserved or already used by another argument */
  523. static int conflict_field_name(const char *name,
  524. struct probe_arg *args, int narg)
  525. {
  526. int i;
  527. for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
  528. if (strcmp(reserved_field_names[i], name) == 0)
  529. return 1;
  530. for (i = 0; i < narg; i++)
  531. if (strcmp(args[i].name, name) == 0)
  532. return 1;
  533. return 0;
  534. }
  535. static int create_trace_probe(int argc, char **argv)
  536. {
  537. /*
  538. * Argument syntax:
  539. * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
  540. * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
  541. * Fetch args:
  542. * $argN : fetch Nth of function argument. (N:0-)
  543. * $retval : fetch return value
  544. * $stack : fetch stack address
  545. * $stackN : fetch Nth of stack (N:0-)
  546. * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
  547. * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
  548. * %REG : fetch register REG
  549. * Indirect memory fetch:
  550. * +|-offs(ARG) : fetch memory at ARG +|- offs address.
  551. * Alias name of args:
  552. * NAME=FETCHARG : set NAME as alias of FETCHARG.
  553. */
  554. struct trace_probe *tp;
  555. int i, ret = 0;
  556. int is_return = 0, is_delete = 0;
  557. char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
  558. unsigned long offset = 0;
  559. void *addr = NULL;
  560. char buf[MAX_EVENT_NAME_LEN];
  561. /* argc must be >= 1 */
  562. if (argv[0][0] == 'p')
  563. is_return = 0;
  564. else if (argv[0][0] == 'r')
  565. is_return = 1;
  566. else if (argv[0][0] == '-')
  567. is_delete = 1;
  568. else {
  569. pr_info("Probe definition must be started with 'p', 'r' or"
  570. " '-'.\n");
  571. return -EINVAL;
  572. }
  573. if (argv[0][1] == ':') {
  574. event = &argv[0][2];
  575. if (strchr(event, '/')) {
  576. group = event;
  577. event = strchr(group, '/') + 1;
  578. event[-1] = '\0';
  579. if (strlen(group) == 0) {
  580. pr_info("Group name is not specifiled\n");
  581. return -EINVAL;
  582. }
  583. }
  584. if (strlen(event) == 0) {
  585. pr_info("Event name is not specifiled\n");
  586. return -EINVAL;
  587. }
  588. }
  589. if (!group)
  590. group = KPROBE_EVENT_SYSTEM;
  591. if (is_delete) {
  592. if (!event) {
  593. pr_info("Delete command needs an event name.\n");
  594. return -EINVAL;
  595. }
  596. tp = find_probe_event(event, group);
  597. if (!tp) {
  598. pr_info("Event %s/%s doesn't exist.\n", group, event);
  599. return -ENOENT;
  600. }
  601. /* delete an event */
  602. unregister_trace_probe(tp);
  603. free_trace_probe(tp);
  604. return 0;
  605. }
  606. if (argc < 2) {
  607. pr_info("Probe point is not specified.\n");
  608. return -EINVAL;
  609. }
  610. if (isdigit(argv[1][0])) {
  611. if (is_return) {
  612. pr_info("Return probe point must be a symbol.\n");
  613. return -EINVAL;
  614. }
  615. /* an address specified */
  616. ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
  617. if (ret) {
  618. pr_info("Failed to parse address.\n");
  619. return ret;
  620. }
  621. } else {
  622. /* a symbol specified */
  623. symbol = argv[1];
  624. /* TODO: support .init module functions */
  625. ret = split_symbol_offset(symbol, &offset);
  626. if (ret) {
  627. pr_info("Failed to parse symbol.\n");
  628. return ret;
  629. }
  630. if (offset && is_return) {
  631. pr_info("Return probe must be used without offset.\n");
  632. return -EINVAL;
  633. }
  634. }
  635. argc -= 2; argv += 2;
  636. /* setup a probe */
  637. if (!event) {
  638. /* Make a new event name */
  639. if (symbol)
  640. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
  641. is_return ? 'r' : 'p', symbol, offset);
  642. else
  643. snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
  644. is_return ? 'r' : 'p', addr);
  645. event = buf;
  646. }
  647. tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
  648. is_return);
  649. if (IS_ERR(tp)) {
  650. pr_info("Failed to allocate trace_probe.(%d)\n",
  651. (int)PTR_ERR(tp));
  652. return PTR_ERR(tp);
  653. }
  654. /* parse arguments */
  655. ret = 0;
  656. for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
  657. /* Parse argument name */
  658. arg = strchr(argv[i], '=');
  659. if (arg)
  660. *arg++ = '\0';
  661. else
  662. arg = argv[i];
  663. if (conflict_field_name(argv[i], tp->args, i)) {
  664. pr_info("Argument%d name '%s' conflicts with "
  665. "another field.\n", i, argv[i]);
  666. ret = -EINVAL;
  667. goto error;
  668. }
  669. tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
  670. if (!tp->args[i].name) {
  671. pr_info("Failed to allocate argument%d name '%s'.\n",
  672. i, argv[i]);
  673. ret = -ENOMEM;
  674. goto error;
  675. }
  676. /* Parse fetch argument */
  677. ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
  678. if (ret) {
  679. pr_info("Parse error at argument%d. (%d)\n", i, ret);
  680. kfree(tp->args[i].name);
  681. goto error;
  682. }
  683. tp->nr_args++;
  684. }
  685. ret = register_trace_probe(tp);
  686. if (ret)
  687. goto error;
  688. return 0;
  689. error:
  690. free_trace_probe(tp);
  691. return ret;
  692. }
  693. static void cleanup_all_probes(void)
  694. {
  695. struct trace_probe *tp;
  696. mutex_lock(&probe_lock);
  697. /* TODO: Use batch unregistration */
  698. while (!list_empty(&probe_list)) {
  699. tp = list_entry(probe_list.next, struct trace_probe, list);
  700. unregister_trace_probe(tp);
  701. free_trace_probe(tp);
  702. }
  703. mutex_unlock(&probe_lock);
  704. }
  705. /* Probes listing interfaces */
  706. static void *probes_seq_start(struct seq_file *m, loff_t *pos)
  707. {
  708. mutex_lock(&probe_lock);
  709. return seq_list_start(&probe_list, *pos);
  710. }
  711. static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
  712. {
  713. return seq_list_next(v, &probe_list, pos);
  714. }
  715. static void probes_seq_stop(struct seq_file *m, void *v)
  716. {
  717. mutex_unlock(&probe_lock);
  718. }
  719. static int probes_seq_show(struct seq_file *m, void *v)
  720. {
  721. struct trace_probe *tp = v;
  722. int i, ret;
  723. char buf[MAX_ARGSTR_LEN + 1];
  724. seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
  725. seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
  726. if (!tp->symbol)
  727. seq_printf(m, " 0x%p", tp->rp.kp.addr);
  728. else if (tp->rp.kp.offset)
  729. seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
  730. else
  731. seq_printf(m, " %s", probe_symbol(tp));
  732. for (i = 0; i < tp->nr_args; i++) {
  733. ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
  734. if (ret < 0) {
  735. pr_warning("Argument%d decoding error(%d).\n", i, ret);
  736. return ret;
  737. }
  738. seq_printf(m, " %s=%s", tp->args[i].name, buf);
  739. }
  740. seq_printf(m, "\n");
  741. return 0;
  742. }
  743. static const struct seq_operations probes_seq_op = {
  744. .start = probes_seq_start,
  745. .next = probes_seq_next,
  746. .stop = probes_seq_stop,
  747. .show = probes_seq_show
  748. };
  749. static int probes_open(struct inode *inode, struct file *file)
  750. {
  751. if ((file->f_mode & FMODE_WRITE) &&
  752. (file->f_flags & O_TRUNC))
  753. cleanup_all_probes();
  754. return seq_open(file, &probes_seq_op);
  755. }
  756. static int command_trace_probe(const char *buf)
  757. {
  758. char **argv;
  759. int argc = 0, ret = 0;
  760. argv = argv_split(GFP_KERNEL, buf, &argc);
  761. if (!argv)
  762. return -ENOMEM;
  763. if (argc)
  764. ret = create_trace_probe(argc, argv);
  765. argv_free(argv);
  766. return ret;
  767. }
  768. #define WRITE_BUFSIZE 128
  769. static ssize_t probes_write(struct file *file, const char __user *buffer,
  770. size_t count, loff_t *ppos)
  771. {
  772. char *kbuf, *tmp;
  773. int ret;
  774. size_t done;
  775. size_t size;
  776. kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
  777. if (!kbuf)
  778. return -ENOMEM;
  779. ret = done = 0;
  780. while (done < count) {
  781. size = count - done;
  782. if (size >= WRITE_BUFSIZE)
  783. size = WRITE_BUFSIZE - 1;
  784. if (copy_from_user(kbuf, buffer + done, size)) {
  785. ret = -EFAULT;
  786. goto out;
  787. }
  788. kbuf[size] = '\0';
  789. tmp = strchr(kbuf, '\n');
  790. if (tmp) {
  791. *tmp = '\0';
  792. size = tmp - kbuf + 1;
  793. } else if (done + size < count) {
  794. pr_warning("Line length is too long: "
  795. "Should be less than %d.", WRITE_BUFSIZE);
  796. ret = -EINVAL;
  797. goto out;
  798. }
  799. done += size;
  800. /* Remove comments */
  801. tmp = strchr(kbuf, '#');
  802. if (tmp)
  803. *tmp = '\0';
  804. ret = command_trace_probe(kbuf);
  805. if (ret)
  806. goto out;
  807. }
  808. ret = done;
  809. out:
  810. kfree(kbuf);
  811. return ret;
  812. }
  813. static const struct file_operations kprobe_events_ops = {
  814. .owner = THIS_MODULE,
  815. .open = probes_open,
  816. .read = seq_read,
  817. .llseek = seq_lseek,
  818. .release = seq_release,
  819. .write = probes_write,
  820. };
  821. /* Probes profiling interfaces */
  822. static int probes_profile_seq_show(struct seq_file *m, void *v)
  823. {
  824. struct trace_probe *tp = v;
  825. seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
  826. tp->rp.kp.nmissed);
  827. return 0;
  828. }
  829. static const struct seq_operations profile_seq_op = {
  830. .start = probes_seq_start,
  831. .next = probes_seq_next,
  832. .stop = probes_seq_stop,
  833. .show = probes_profile_seq_show
  834. };
  835. static int profile_open(struct inode *inode, struct file *file)
  836. {
  837. return seq_open(file, &profile_seq_op);
  838. }
  839. static const struct file_operations kprobe_profile_ops = {
  840. .owner = THIS_MODULE,
  841. .open = profile_open,
  842. .read = seq_read,
  843. .llseek = seq_lseek,
  844. .release = seq_release,
  845. };
  846. /* Kprobe handler */
  847. static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
  848. {
  849. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  850. struct kprobe_trace_entry *entry;
  851. struct ring_buffer_event *event;
  852. struct ring_buffer *buffer;
  853. int size, i, pc;
  854. unsigned long irq_flags;
  855. struct ftrace_event_call *call = &tp->call;
  856. tp->nhit++;
  857. local_save_flags(irq_flags);
  858. pc = preempt_count();
  859. size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  860. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  861. irq_flags, pc);
  862. if (!event)
  863. return 0;
  864. entry = ring_buffer_event_data(event);
  865. entry->nargs = tp->nr_args;
  866. entry->ip = (unsigned long)kp->addr;
  867. for (i = 0; i < tp->nr_args; i++)
  868. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  869. if (!filter_current_check_discard(buffer, call, entry, event))
  870. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  871. return 0;
  872. }
  873. /* Kretprobe handler */
  874. static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
  875. struct pt_regs *regs)
  876. {
  877. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  878. struct kretprobe_trace_entry *entry;
  879. struct ring_buffer_event *event;
  880. struct ring_buffer *buffer;
  881. int size, i, pc;
  882. unsigned long irq_flags;
  883. struct ftrace_event_call *call = &tp->call;
  884. local_save_flags(irq_flags);
  885. pc = preempt_count();
  886. size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  887. event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
  888. irq_flags, pc);
  889. if (!event)
  890. return 0;
  891. entry = ring_buffer_event_data(event);
  892. entry->nargs = tp->nr_args;
  893. entry->func = (unsigned long)tp->rp.kp.addr;
  894. entry->ret_ip = (unsigned long)ri->ret_addr;
  895. for (i = 0; i < tp->nr_args; i++)
  896. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  897. if (!filter_current_check_discard(buffer, call, entry, event))
  898. trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
  899. return 0;
  900. }
  901. /* Event entry printers */
  902. enum print_line_t
  903. print_kprobe_event(struct trace_iterator *iter, int flags)
  904. {
  905. struct kprobe_trace_entry *field;
  906. struct trace_seq *s = &iter->seq;
  907. struct trace_event *event;
  908. struct trace_probe *tp;
  909. int i;
  910. field = (struct kprobe_trace_entry *)iter->ent;
  911. event = ftrace_find_event(field->ent.type);
  912. tp = container_of(event, struct trace_probe, event);
  913. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  914. goto partial;
  915. if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
  916. goto partial;
  917. if (!trace_seq_puts(s, ")"))
  918. goto partial;
  919. for (i = 0; i < field->nargs; i++)
  920. if (!trace_seq_printf(s, " %s=%lx",
  921. tp->args[i].name, field->args[i]))
  922. goto partial;
  923. if (!trace_seq_puts(s, "\n"))
  924. goto partial;
  925. return TRACE_TYPE_HANDLED;
  926. partial:
  927. return TRACE_TYPE_PARTIAL_LINE;
  928. }
  929. enum print_line_t
  930. print_kretprobe_event(struct trace_iterator *iter, int flags)
  931. {
  932. struct kretprobe_trace_entry *field;
  933. struct trace_seq *s = &iter->seq;
  934. struct trace_event *event;
  935. struct trace_probe *tp;
  936. int i;
  937. field = (struct kretprobe_trace_entry *)iter->ent;
  938. event = ftrace_find_event(field->ent.type);
  939. tp = container_of(event, struct trace_probe, event);
  940. if (!trace_seq_printf(s, "%s: (", tp->call.name))
  941. goto partial;
  942. if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
  943. goto partial;
  944. if (!trace_seq_puts(s, " <- "))
  945. goto partial;
  946. if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
  947. goto partial;
  948. if (!trace_seq_puts(s, ")"))
  949. goto partial;
  950. for (i = 0; i < field->nargs; i++)
  951. if (!trace_seq_printf(s, " %s=%lx",
  952. tp->args[i].name, field->args[i]))
  953. goto partial;
  954. if (!trace_seq_puts(s, "\n"))
  955. goto partial;
  956. return TRACE_TYPE_HANDLED;
  957. partial:
  958. return TRACE_TYPE_PARTIAL_LINE;
  959. }
  960. static int probe_event_enable(struct ftrace_event_call *call)
  961. {
  962. struct trace_probe *tp = (struct trace_probe *)call->data;
  963. tp->flags |= TP_FLAG_TRACE;
  964. if (probe_is_return(tp))
  965. return enable_kretprobe(&tp->rp);
  966. else
  967. return enable_kprobe(&tp->rp.kp);
  968. }
  969. static void probe_event_disable(struct ftrace_event_call *call)
  970. {
  971. struct trace_probe *tp = (struct trace_probe *)call->data;
  972. tp->flags &= ~TP_FLAG_TRACE;
  973. if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
  974. if (probe_is_return(tp))
  975. disable_kretprobe(&tp->rp);
  976. else
  977. disable_kprobe(&tp->rp.kp);
  978. }
  979. }
  980. static int probe_event_raw_init(struct ftrace_event_call *event_call)
  981. {
  982. INIT_LIST_HEAD(&event_call->fields);
  983. return 0;
  984. }
  985. #undef DEFINE_FIELD
  986. #define DEFINE_FIELD(type, item, name, is_signed) \
  987. do { \
  988. ret = trace_define_field(event_call, #type, name, \
  989. offsetof(typeof(field), item), \
  990. sizeof(field.item), is_signed, \
  991. FILTER_OTHER); \
  992. if (ret) \
  993. return ret; \
  994. } while (0)
  995. static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
  996. {
  997. int ret, i;
  998. struct kprobe_trace_entry field;
  999. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  1000. DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
  1001. DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
  1002. /* Set argument names as fields */
  1003. for (i = 0; i < tp->nr_args; i++)
  1004. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  1005. return 0;
  1006. }
  1007. static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
  1008. {
  1009. int ret, i;
  1010. struct kretprobe_trace_entry field;
  1011. struct trace_probe *tp = (struct trace_probe *)event_call->data;
  1012. DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
  1013. DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
  1014. DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
  1015. /* Set argument names as fields */
  1016. for (i = 0; i < tp->nr_args; i++)
  1017. DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
  1018. return 0;
  1019. }
  1020. static int __probe_event_show_format(struct trace_seq *s,
  1021. struct trace_probe *tp, const char *fmt,
  1022. const char *arg)
  1023. {
  1024. int i;
  1025. /* Show format */
  1026. if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
  1027. return 0;
  1028. for (i = 0; i < tp->nr_args; i++)
  1029. if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
  1030. return 0;
  1031. if (!trace_seq_printf(s, "\", %s", arg))
  1032. return 0;
  1033. for (i = 0; i < tp->nr_args; i++)
  1034. if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
  1035. return 0;
  1036. return trace_seq_puts(s, "\n");
  1037. }
  1038. #undef SHOW_FIELD
  1039. #define SHOW_FIELD(type, item, name) \
  1040. do { \
  1041. ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \
  1042. "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\
  1043. (unsigned int)offsetof(typeof(field), item),\
  1044. (unsigned int)sizeof(type), \
  1045. is_signed_type(type)); \
  1046. if (!ret) \
  1047. return 0; \
  1048. } while (0)
  1049. static int kprobe_event_show_format(struct ftrace_event_call *call,
  1050. struct trace_seq *s)
  1051. {
  1052. struct kprobe_trace_entry field __attribute__((unused));
  1053. int ret, i;
  1054. struct trace_probe *tp = (struct trace_probe *)call->data;
  1055. SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
  1056. SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
  1057. /* Show fields */
  1058. for (i = 0; i < tp->nr_args; i++)
  1059. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  1060. trace_seq_puts(s, "\n");
  1061. return __probe_event_show_format(s, tp, "(%lx)",
  1062. "REC->" FIELD_STRING_IP);
  1063. }
  1064. static int kretprobe_event_show_format(struct ftrace_event_call *call,
  1065. struct trace_seq *s)
  1066. {
  1067. struct kretprobe_trace_entry field __attribute__((unused));
  1068. int ret, i;
  1069. struct trace_probe *tp = (struct trace_probe *)call->data;
  1070. SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
  1071. SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
  1072. SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
  1073. /* Show fields */
  1074. for (i = 0; i < tp->nr_args; i++)
  1075. SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
  1076. trace_seq_puts(s, "\n");
  1077. return __probe_event_show_format(s, tp, "(%lx <- %lx)",
  1078. "REC->" FIELD_STRING_FUNC
  1079. ", REC->" FIELD_STRING_RETIP);
  1080. }
  1081. #ifdef CONFIG_EVENT_PROFILE
  1082. /* Kprobe profile handler */
  1083. static __kprobes int kprobe_profile_func(struct kprobe *kp,
  1084. struct pt_regs *regs)
  1085. {
  1086. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1087. struct ftrace_event_call *call = &tp->call;
  1088. struct kprobe_trace_entry *entry;
  1089. struct trace_entry *ent;
  1090. int size, __size, i, pc, __cpu;
  1091. unsigned long irq_flags;
  1092. char *trace_buf;
  1093. char *raw_data;
  1094. int rctx;
  1095. pc = preempt_count();
  1096. __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
  1097. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1098. size -= sizeof(u32);
  1099. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1100. "profile buffer not large enough"))
  1101. return 0;
  1102. /*
  1103. * Protect the non nmi buffer
  1104. * This also protects the rcu read side
  1105. */
  1106. local_irq_save(irq_flags);
  1107. rctx = perf_swevent_get_recursion_context();
  1108. if (rctx < 0)
  1109. goto end_recursion;
  1110. __cpu = smp_processor_id();
  1111. if (in_nmi())
  1112. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  1113. else
  1114. trace_buf = rcu_dereference(perf_trace_buf);
  1115. if (!trace_buf)
  1116. goto end;
  1117. raw_data = per_cpu_ptr(trace_buf, __cpu);
  1118. /* Zero dead bytes from alignment to avoid buffer leak to userspace */
  1119. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1120. entry = (struct kprobe_trace_entry *)raw_data;
  1121. ent = &entry->ent;
  1122. tracing_generic_entry_update(ent, irq_flags, pc);
  1123. ent->type = call->id;
  1124. entry->nargs = tp->nr_args;
  1125. entry->ip = (unsigned long)kp->addr;
  1126. for (i = 0; i < tp->nr_args; i++)
  1127. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1128. perf_tp_event(call->id, entry->ip, 1, entry, size);
  1129. end:
  1130. perf_swevent_put_recursion_context(rctx);
  1131. end_recursion:
  1132. local_irq_restore(irq_flags);
  1133. return 0;
  1134. }
  1135. /* Kretprobe profile handler */
  1136. static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
  1137. struct pt_regs *regs)
  1138. {
  1139. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1140. struct ftrace_event_call *call = &tp->call;
  1141. struct kretprobe_trace_entry *entry;
  1142. struct trace_entry *ent;
  1143. int size, __size, i, pc, __cpu;
  1144. unsigned long irq_flags;
  1145. char *trace_buf;
  1146. char *raw_data;
  1147. int rctx;
  1148. pc = preempt_count();
  1149. __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
  1150. size = ALIGN(__size + sizeof(u32), sizeof(u64));
  1151. size -= sizeof(u32);
  1152. if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
  1153. "profile buffer not large enough"))
  1154. return 0;
  1155. /*
  1156. * Protect the non nmi buffer
  1157. * This also protects the rcu read side
  1158. */
  1159. local_irq_save(irq_flags);
  1160. rctx = perf_swevent_get_recursion_context();
  1161. if (rctx < 0)
  1162. goto end_recursion;
  1163. __cpu = smp_processor_id();
  1164. if (in_nmi())
  1165. trace_buf = rcu_dereference(perf_trace_buf_nmi);
  1166. else
  1167. trace_buf = rcu_dereference(perf_trace_buf);
  1168. if (!trace_buf)
  1169. goto end;
  1170. raw_data = per_cpu_ptr(trace_buf, __cpu);
  1171. /* Zero dead bytes from alignment to avoid buffer leak to userspace */
  1172. *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
  1173. entry = (struct kretprobe_trace_entry *)raw_data;
  1174. ent = &entry->ent;
  1175. tracing_generic_entry_update(ent, irq_flags, pc);
  1176. ent->type = call->id;
  1177. entry->nargs = tp->nr_args;
  1178. entry->func = (unsigned long)tp->rp.kp.addr;
  1179. entry->ret_ip = (unsigned long)ri->ret_addr;
  1180. for (i = 0; i < tp->nr_args; i++)
  1181. entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
  1182. perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
  1183. end:
  1184. perf_swevent_put_recursion_context(rctx);
  1185. end_recursion:
  1186. local_irq_restore(irq_flags);
  1187. return 0;
  1188. }
  1189. static int probe_profile_enable(struct ftrace_event_call *call)
  1190. {
  1191. struct trace_probe *tp = (struct trace_probe *)call->data;
  1192. tp->flags |= TP_FLAG_PROFILE;
  1193. if (probe_is_return(tp))
  1194. return enable_kretprobe(&tp->rp);
  1195. else
  1196. return enable_kprobe(&tp->rp.kp);
  1197. }
  1198. static void probe_profile_disable(struct ftrace_event_call *call)
  1199. {
  1200. struct trace_probe *tp = (struct trace_probe *)call->data;
  1201. tp->flags &= ~TP_FLAG_PROFILE;
  1202. if (!(tp->flags & TP_FLAG_TRACE)) {
  1203. if (probe_is_return(tp))
  1204. disable_kretprobe(&tp->rp);
  1205. else
  1206. disable_kprobe(&tp->rp.kp);
  1207. }
  1208. }
  1209. #endif /* CONFIG_EVENT_PROFILE */
  1210. static __kprobes
  1211. int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
  1212. {
  1213. struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
  1214. if (tp->flags & TP_FLAG_TRACE)
  1215. kprobe_trace_func(kp, regs);
  1216. #ifdef CONFIG_EVENT_PROFILE
  1217. if (tp->flags & TP_FLAG_PROFILE)
  1218. kprobe_profile_func(kp, regs);
  1219. #endif /* CONFIG_EVENT_PROFILE */
  1220. return 0; /* We don't tweek kernel, so just return 0 */
  1221. }
  1222. static __kprobes
  1223. int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
  1224. {
  1225. struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
  1226. if (tp->flags & TP_FLAG_TRACE)
  1227. kretprobe_trace_func(ri, regs);
  1228. #ifdef CONFIG_EVENT_PROFILE
  1229. if (tp->flags & TP_FLAG_PROFILE)
  1230. kretprobe_profile_func(ri, regs);
  1231. #endif /* CONFIG_EVENT_PROFILE */
  1232. return 0; /* We don't tweek kernel, so just return 0 */
  1233. }
  1234. static int register_probe_event(struct trace_probe *tp)
  1235. {
  1236. struct ftrace_event_call *call = &tp->call;
  1237. int ret;
  1238. /* Initialize ftrace_event_call */
  1239. if (probe_is_return(tp)) {
  1240. tp->event.trace = print_kretprobe_event;
  1241. call->raw_init = probe_event_raw_init;
  1242. call->show_format = kretprobe_event_show_format;
  1243. call->define_fields = kretprobe_event_define_fields;
  1244. } else {
  1245. tp->event.trace = print_kprobe_event;
  1246. call->raw_init = probe_event_raw_init;
  1247. call->show_format = kprobe_event_show_format;
  1248. call->define_fields = kprobe_event_define_fields;
  1249. }
  1250. call->event = &tp->event;
  1251. call->id = register_ftrace_event(&tp->event);
  1252. if (!call->id)
  1253. return -ENODEV;
  1254. call->enabled = 0;
  1255. call->regfunc = probe_event_enable;
  1256. call->unregfunc = probe_event_disable;
  1257. #ifdef CONFIG_EVENT_PROFILE
  1258. call->profile_enable = probe_profile_enable;
  1259. call->profile_disable = probe_profile_disable;
  1260. #endif
  1261. call->data = tp;
  1262. ret = trace_add_event_call(call);
  1263. if (ret) {
  1264. pr_info("Failed to register kprobe event: %s\n", call->name);
  1265. unregister_ftrace_event(&tp->event);
  1266. }
  1267. return ret;
  1268. }
  1269. static void unregister_probe_event(struct trace_probe *tp)
  1270. {
  1271. /* tp->event is unregistered in trace_remove_event_call() */
  1272. trace_remove_event_call(&tp->call);
  1273. }
  1274. /* Make a debugfs interface for controling probe points */
  1275. static __init int init_kprobe_trace(void)
  1276. {
  1277. struct dentry *d_tracer;
  1278. struct dentry *entry;
  1279. d_tracer = tracing_init_dentry();
  1280. if (!d_tracer)
  1281. return 0;
  1282. entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
  1283. NULL, &kprobe_events_ops);
  1284. /* Event list interface */
  1285. if (!entry)
  1286. pr_warning("Could not create debugfs "
  1287. "'kprobe_events' entry\n");
  1288. /* Profile interface */
  1289. entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
  1290. NULL, &kprobe_profile_ops);
  1291. if (!entry)
  1292. pr_warning("Could not create debugfs "
  1293. "'kprobe_profile' entry\n");
  1294. return 0;
  1295. }
  1296. fs_initcall(init_kprobe_trace);
  1297. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1298. static int kprobe_trace_selftest_target(int a1, int a2, int a3,
  1299. int a4, int a5, int a6)
  1300. {
  1301. return a1 + a2 + a3 + a4 + a5 + a6;
  1302. }
  1303. static __init int kprobe_trace_self_tests_init(void)
  1304. {
  1305. int ret;
  1306. int (*target)(int, int, int, int, int, int);
  1307. target = kprobe_trace_selftest_target;
  1308. pr_info("Testing kprobe tracing: ");
  1309. ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
  1310. "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
  1311. if (WARN_ON_ONCE(ret))
  1312. pr_warning("error enabling function entry\n");
  1313. ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
  1314. "$retval");
  1315. if (WARN_ON_ONCE(ret))
  1316. pr_warning("error enabling function return\n");
  1317. ret = target(1, 2, 3, 4, 5, 6);
  1318. cleanup_all_probes();
  1319. pr_cont("OK\n");
  1320. return 0;
  1321. }
  1322. late_initcall(kprobe_trace_self_tests_init);
  1323. #endif