ftrace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes to Ingo Molnar, for suggesting the idea.
  7. * Mathieu Desnoyers, for suggesting postponing the modifications.
  8. * Arjan van de Ven, for keeping me straight, and explaining to me
  9. * the dangers of modifying code on the run.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/spinlock.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/percpu.h>
  17. #include <linux/sched.h>
  18. #include <linux/init.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <trace/syscall.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/kprobes.h>
  24. #include <asm/ftrace.h>
  25. #include <asm/nops.h>
  26. #ifdef CONFIG_DYNAMIC_FTRACE
  27. int ftrace_arch_code_modify_prepare(void)
  28. {
  29. set_kernel_text_rw();
  30. set_all_modules_text_rw();
  31. return 0;
  32. }
  33. int ftrace_arch_code_modify_post_process(void)
  34. {
  35. set_all_modules_text_ro();
  36. set_kernel_text_ro();
  37. return 0;
  38. }
  39. union ftrace_code_union {
  40. char code[MCOUNT_INSN_SIZE];
  41. struct {
  42. char e8;
  43. int offset;
  44. } __attribute__((packed));
  45. };
  46. static int ftrace_calc_offset(long ip, long addr)
  47. {
  48. return (int)(addr - ip);
  49. }
  50. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  51. {
  52. static union ftrace_code_union calc;
  53. calc.e8 = 0xe8;
  54. calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  55. /*
  56. * No locking needed, this must be called via kstop_machine
  57. * which in essence is like running on a uniprocessor machine.
  58. */
  59. return calc.code;
  60. }
  61. static inline int
  62. within(unsigned long addr, unsigned long start, unsigned long end)
  63. {
  64. return addr >= start && addr < end;
  65. }
  66. static int
  67. do_ftrace_mod_code(unsigned long ip, const void *new_code)
  68. {
  69. /*
  70. * On x86_64, kernel text mappings are mapped read-only with
  71. * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  72. * of the kernel text mapping to modify the kernel text.
  73. *
  74. * For 32bit kernels, these mappings are same and we can use
  75. * kernel identity mapping to modify code.
  76. */
  77. if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  78. ip = (unsigned long)__va(__pa(ip));
  79. return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
  80. }
  81. static const unsigned char *ftrace_nop_replace(void)
  82. {
  83. return ideal_nops[NOP_ATOMIC5];
  84. }
  85. static int
  86. ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
  87. unsigned const char *new_code)
  88. {
  89. unsigned char replaced[MCOUNT_INSN_SIZE];
  90. /*
  91. * Note: Due to modules and __init, code can
  92. * disappear and change, we need to protect against faulting
  93. * as well as code changing. We do this by using the
  94. * probe_kernel_* functions.
  95. *
  96. * No real locking needed, this code is run through
  97. * kstop_machine, or before SMP starts.
  98. */
  99. /* read the text we want to modify */
  100. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  101. return -EFAULT;
  102. /* Make sure it is what we expect it to be */
  103. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  104. return -EINVAL;
  105. /* replace the text with the new text */
  106. if (do_ftrace_mod_code(ip, new_code))
  107. return -EPERM;
  108. sync_core();
  109. return 0;
  110. }
  111. int ftrace_make_nop(struct module *mod,
  112. struct dyn_ftrace *rec, unsigned long addr)
  113. {
  114. unsigned const char *new, *old;
  115. unsigned long ip = rec->ip;
  116. old = ftrace_call_replace(ip, addr);
  117. new = ftrace_nop_replace();
  118. /*
  119. * On boot up, and when modules are loaded, the MCOUNT_ADDR
  120. * is converted to a nop, and will never become MCOUNT_ADDR
  121. * again. This code is either running before SMP (on boot up)
  122. * or before the code will ever be executed (module load).
  123. * We do not want to use the breakpoint version in this case,
  124. * just modify the code directly.
  125. */
  126. if (addr == MCOUNT_ADDR)
  127. return ftrace_modify_code_direct(rec->ip, old, new);
  128. /* Normal cases use add_brk_on_nop */
  129. WARN_ONCE(1, "invalid use of ftrace_make_nop");
  130. return -EINVAL;
  131. }
  132. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  133. {
  134. unsigned const char *new, *old;
  135. unsigned long ip = rec->ip;
  136. old = ftrace_nop_replace();
  137. new = ftrace_call_replace(ip, addr);
  138. /* Should only be called when module is loaded */
  139. return ftrace_modify_code_direct(rec->ip, old, new);
  140. }
  141. /*
  142. * The modifying_ftrace_code is used to tell the breakpoint
  143. * handler to call ftrace_int3_handler(). If it fails to
  144. * call this handler for a breakpoint added by ftrace, then
  145. * the kernel may crash.
  146. *
  147. * As atomic_writes on x86 do not need a barrier, we do not
  148. * need to add smp_mb()s for this to work. It is also considered
  149. * that we can not read the modifying_ftrace_code before
  150. * executing the breakpoint. That would be quite remarkable if
  151. * it could do that. Here's the flow that is required:
  152. *
  153. * CPU-0 CPU-1
  154. *
  155. * atomic_inc(mfc);
  156. * write int3s
  157. * <trap-int3> // implicit (r)mb
  158. * if (atomic_read(mfc))
  159. * call ftrace_int3_handler()
  160. *
  161. * Then when we are finished:
  162. *
  163. * atomic_dec(mfc);
  164. *
  165. * If we hit a breakpoint that was not set by ftrace, it does not
  166. * matter if ftrace_int3_handler() is called or not. It will
  167. * simply be ignored. But it is crucial that a ftrace nop/caller
  168. * breakpoint is handled. No other user should ever place a
  169. * breakpoint on an ftrace nop/caller location. It must only
  170. * be done by this code.
  171. */
  172. atomic_t modifying_ftrace_code __read_mostly;
  173. static int
  174. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  175. unsigned const char *new_code);
  176. #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
  177. /*
  178. * Should never be called:
  179. * As it is only called by __ftrace_replace_code() which is called by
  180. * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
  181. * which is called to turn mcount into nops or nops into function calls
  182. * but not to convert a function from not using regs to one that uses
  183. * regs, which ftrace_modify_call() is for.
  184. */
  185. int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  186. unsigned long addr)
  187. {
  188. WARN_ON(1);
  189. return -EINVAL;
  190. }
  191. #endif
  192. int ftrace_update_ftrace_func(ftrace_func_t func)
  193. {
  194. unsigned long ip = (unsigned long)(&ftrace_call);
  195. unsigned char old[MCOUNT_INSN_SIZE], *new;
  196. int ret;
  197. memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
  198. new = ftrace_call_replace(ip, (unsigned long)func);
  199. /* See comment above by declaration of modifying_ftrace_code */
  200. atomic_inc(&modifying_ftrace_code);
  201. ret = ftrace_modify_code(ip, old, new);
  202. #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
  203. /* Also update the regs callback function */
  204. if (!ret) {
  205. ip = (unsigned long)(&ftrace_regs_call);
  206. memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
  207. new = ftrace_call_replace(ip, (unsigned long)func);
  208. ret = ftrace_modify_code(ip, old, new);
  209. }
  210. #endif
  211. atomic_dec(&modifying_ftrace_code);
  212. return ret;
  213. }
  214. /*
  215. * A breakpoint was added to the code address we are about to
  216. * modify, and this is the handle that will just skip over it.
  217. * We are either changing a nop into a trace call, or a trace
  218. * call to a nop. While the change is taking place, we treat
  219. * it just like it was a nop.
  220. */
  221. int ftrace_int3_handler(struct pt_regs *regs)
  222. {
  223. if (WARN_ON_ONCE(!regs))
  224. return 0;
  225. if (!ftrace_location(regs->ip - 1))
  226. return 0;
  227. regs->ip += MCOUNT_INSN_SIZE - 1;
  228. return 1;
  229. }
  230. static int ftrace_write(unsigned long ip, const char *val, int size)
  231. {
  232. /*
  233. * On x86_64, kernel text mappings are mapped read-only with
  234. * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  235. * of the kernel text mapping to modify the kernel text.
  236. *
  237. * For 32bit kernels, these mappings are same and we can use
  238. * kernel identity mapping to modify code.
  239. */
  240. if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  241. ip = (unsigned long)__va(__pa(ip));
  242. return probe_kernel_write((void *)ip, val, size);
  243. }
  244. static int add_break(unsigned long ip, const char *old)
  245. {
  246. unsigned char replaced[MCOUNT_INSN_SIZE];
  247. unsigned char brk = BREAKPOINT_INSTRUCTION;
  248. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  249. return -EFAULT;
  250. /* Make sure it is what we expect it to be */
  251. if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
  252. return -EINVAL;
  253. if (ftrace_write(ip, &brk, 1))
  254. return -EPERM;
  255. return 0;
  256. }
  257. static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
  258. {
  259. unsigned const char *old;
  260. unsigned long ip = rec->ip;
  261. old = ftrace_call_replace(ip, addr);
  262. return add_break(rec->ip, old);
  263. }
  264. static int add_brk_on_nop(struct dyn_ftrace *rec)
  265. {
  266. unsigned const char *old;
  267. old = ftrace_nop_replace();
  268. return add_break(rec->ip, old);
  269. }
  270. /*
  271. * If the record has the FTRACE_FL_REGS set, that means that it
  272. * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
  273. * is not not set, then it wants to convert to the normal callback.
  274. */
  275. static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
  276. {
  277. if (rec->flags & FTRACE_FL_REGS)
  278. return (unsigned long)FTRACE_REGS_ADDR;
  279. else
  280. return (unsigned long)FTRACE_ADDR;
  281. }
  282. /*
  283. * The FTRACE_FL_REGS_EN is set when the record already points to
  284. * a function that saves all the regs. Basically the '_EN' version
  285. * represents the current state of the function.
  286. */
  287. static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
  288. {
  289. if (rec->flags & FTRACE_FL_REGS_EN)
  290. return (unsigned long)FTRACE_REGS_ADDR;
  291. else
  292. return (unsigned long)FTRACE_ADDR;
  293. }
  294. static int add_breakpoints(struct dyn_ftrace *rec, int enable)
  295. {
  296. unsigned long ftrace_addr;
  297. int ret;
  298. ret = ftrace_test_record(rec, enable);
  299. ftrace_addr = get_ftrace_addr(rec);
  300. switch (ret) {
  301. case FTRACE_UPDATE_IGNORE:
  302. return 0;
  303. case FTRACE_UPDATE_MAKE_CALL:
  304. /* converting nop to call */
  305. return add_brk_on_nop(rec);
  306. case FTRACE_UPDATE_MODIFY_CALL_REGS:
  307. case FTRACE_UPDATE_MODIFY_CALL:
  308. ftrace_addr = get_ftrace_old_addr(rec);
  309. /* fall through */
  310. case FTRACE_UPDATE_MAKE_NOP:
  311. /* converting a call to a nop */
  312. return add_brk_on_call(rec, ftrace_addr);
  313. }
  314. return 0;
  315. }
  316. /*
  317. * On error, we need to remove breakpoints. This needs to
  318. * be done caefully. If the address does not currently have a
  319. * breakpoint, we know we are done. Otherwise, we look at the
  320. * remaining 4 bytes of the instruction. If it matches a nop
  321. * we replace the breakpoint with the nop. Otherwise we replace
  322. * it with the call instruction.
  323. */
  324. static int remove_breakpoint(struct dyn_ftrace *rec)
  325. {
  326. unsigned char ins[MCOUNT_INSN_SIZE];
  327. unsigned char brk = BREAKPOINT_INSTRUCTION;
  328. const unsigned char *nop;
  329. unsigned long ftrace_addr;
  330. unsigned long ip = rec->ip;
  331. /* If we fail the read, just give up */
  332. if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
  333. return -EFAULT;
  334. /* If this does not have a breakpoint, we are done */
  335. if (ins[0] != brk)
  336. return -1;
  337. nop = ftrace_nop_replace();
  338. /*
  339. * If the last 4 bytes of the instruction do not match
  340. * a nop, then we assume that this is a call to ftrace_addr.
  341. */
  342. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
  343. /*
  344. * For extra paranoidism, we check if the breakpoint is on
  345. * a call that would actually jump to the ftrace_addr.
  346. * If not, don't touch the breakpoint, we make just create
  347. * a disaster.
  348. */
  349. ftrace_addr = get_ftrace_addr(rec);
  350. nop = ftrace_call_replace(ip, ftrace_addr);
  351. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
  352. goto update;
  353. /* Check both ftrace_addr and ftrace_old_addr */
  354. ftrace_addr = get_ftrace_old_addr(rec);
  355. nop = ftrace_call_replace(ip, ftrace_addr);
  356. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
  357. return -EINVAL;
  358. }
  359. update:
  360. return probe_kernel_write((void *)ip, &nop[0], 1);
  361. }
  362. static int add_update_code(unsigned long ip, unsigned const char *new)
  363. {
  364. /* skip breakpoint */
  365. ip++;
  366. new++;
  367. if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
  368. return -EPERM;
  369. return 0;
  370. }
  371. static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
  372. {
  373. unsigned long ip = rec->ip;
  374. unsigned const char *new;
  375. new = ftrace_call_replace(ip, addr);
  376. return add_update_code(ip, new);
  377. }
  378. static int add_update_nop(struct dyn_ftrace *rec)
  379. {
  380. unsigned long ip = rec->ip;
  381. unsigned const char *new;
  382. new = ftrace_nop_replace();
  383. return add_update_code(ip, new);
  384. }
  385. static int add_update(struct dyn_ftrace *rec, int enable)
  386. {
  387. unsigned long ftrace_addr;
  388. int ret;
  389. ret = ftrace_test_record(rec, enable);
  390. ftrace_addr = get_ftrace_addr(rec);
  391. switch (ret) {
  392. case FTRACE_UPDATE_IGNORE:
  393. return 0;
  394. case FTRACE_UPDATE_MODIFY_CALL_REGS:
  395. case FTRACE_UPDATE_MODIFY_CALL:
  396. case FTRACE_UPDATE_MAKE_CALL:
  397. /* converting nop to call */
  398. return add_update_call(rec, ftrace_addr);
  399. case FTRACE_UPDATE_MAKE_NOP:
  400. /* converting a call to a nop */
  401. return add_update_nop(rec);
  402. }
  403. return 0;
  404. }
  405. static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
  406. {
  407. unsigned long ip = rec->ip;
  408. unsigned const char *new;
  409. new = ftrace_call_replace(ip, addr);
  410. if (ftrace_write(ip, new, 1))
  411. return -EPERM;
  412. return 0;
  413. }
  414. static int finish_update_nop(struct dyn_ftrace *rec)
  415. {
  416. unsigned long ip = rec->ip;
  417. unsigned const char *new;
  418. new = ftrace_nop_replace();
  419. if (ftrace_write(ip, new, 1))
  420. return -EPERM;
  421. return 0;
  422. }
  423. static int finish_update(struct dyn_ftrace *rec, int enable)
  424. {
  425. unsigned long ftrace_addr;
  426. int ret;
  427. ret = ftrace_update_record(rec, enable);
  428. ftrace_addr = get_ftrace_addr(rec);
  429. switch (ret) {
  430. case FTRACE_UPDATE_IGNORE:
  431. return 0;
  432. case FTRACE_UPDATE_MODIFY_CALL_REGS:
  433. case FTRACE_UPDATE_MODIFY_CALL:
  434. case FTRACE_UPDATE_MAKE_CALL:
  435. /* converting nop to call */
  436. return finish_update_call(rec, ftrace_addr);
  437. case FTRACE_UPDATE_MAKE_NOP:
  438. /* converting a call to a nop */
  439. return finish_update_nop(rec);
  440. }
  441. return 0;
  442. }
  443. static void do_sync_core(void *data)
  444. {
  445. sync_core();
  446. }
  447. static void run_sync(void)
  448. {
  449. int enable_irqs = irqs_disabled();
  450. /* We may be called with interrupts disbled (on bootup). */
  451. if (enable_irqs)
  452. local_irq_enable();
  453. on_each_cpu(do_sync_core, NULL, 1);
  454. if (enable_irqs)
  455. local_irq_disable();
  456. }
  457. void ftrace_replace_code(int enable)
  458. {
  459. struct ftrace_rec_iter *iter;
  460. struct dyn_ftrace *rec;
  461. const char *report = "adding breakpoints";
  462. int count = 0;
  463. int ret;
  464. for_ftrace_rec_iter(iter) {
  465. rec = ftrace_rec_iter_record(iter);
  466. ret = add_breakpoints(rec, enable);
  467. if (ret)
  468. goto remove_breakpoints;
  469. count++;
  470. }
  471. run_sync();
  472. report = "updating code";
  473. for_ftrace_rec_iter(iter) {
  474. rec = ftrace_rec_iter_record(iter);
  475. ret = add_update(rec, enable);
  476. if (ret)
  477. goto remove_breakpoints;
  478. }
  479. run_sync();
  480. report = "removing breakpoints";
  481. for_ftrace_rec_iter(iter) {
  482. rec = ftrace_rec_iter_record(iter);
  483. ret = finish_update(rec, enable);
  484. if (ret)
  485. goto remove_breakpoints;
  486. }
  487. run_sync();
  488. return;
  489. remove_breakpoints:
  490. ftrace_bug(ret, rec ? rec->ip : 0);
  491. printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
  492. for_ftrace_rec_iter(iter) {
  493. rec = ftrace_rec_iter_record(iter);
  494. remove_breakpoint(rec);
  495. }
  496. }
  497. static int
  498. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  499. unsigned const char *new_code)
  500. {
  501. int ret;
  502. ret = add_break(ip, old_code);
  503. if (ret)
  504. goto out;
  505. run_sync();
  506. ret = add_update_code(ip, new_code);
  507. if (ret)
  508. goto fail_update;
  509. run_sync();
  510. ret = ftrace_write(ip, new_code, 1);
  511. if (ret) {
  512. ret = -EPERM;
  513. goto out;
  514. }
  515. run_sync();
  516. out:
  517. return ret;
  518. fail_update:
  519. probe_kernel_write((void *)ip, &old_code[0], 1);
  520. goto out;
  521. }
  522. void arch_ftrace_update_code(int command)
  523. {
  524. /* See comment above by declaration of modifying_ftrace_code */
  525. atomic_inc(&modifying_ftrace_code);
  526. ftrace_modify_all_code(command);
  527. atomic_dec(&modifying_ftrace_code);
  528. }
  529. int __init ftrace_dyn_arch_init(void *data)
  530. {
  531. /* The return code is retured via data */
  532. *(unsigned long *)data = 0;
  533. return 0;
  534. }
  535. #endif
  536. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  537. #ifdef CONFIG_DYNAMIC_FTRACE
  538. extern void ftrace_graph_call(void);
  539. static int ftrace_mod_jmp(unsigned long ip,
  540. int old_offset, int new_offset)
  541. {
  542. unsigned char code[MCOUNT_INSN_SIZE];
  543. if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
  544. return -EFAULT;
  545. if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
  546. return -EINVAL;
  547. *(int *)(&code[1]) = new_offset;
  548. if (do_ftrace_mod_code(ip, &code))
  549. return -EPERM;
  550. return 0;
  551. }
  552. int ftrace_enable_ftrace_graph_caller(void)
  553. {
  554. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  555. int old_offset, new_offset;
  556. old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  557. new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  558. return ftrace_mod_jmp(ip, old_offset, new_offset);
  559. }
  560. int ftrace_disable_ftrace_graph_caller(void)
  561. {
  562. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  563. int old_offset, new_offset;
  564. old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  565. new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  566. return ftrace_mod_jmp(ip, old_offset, new_offset);
  567. }
  568. #endif /* !CONFIG_DYNAMIC_FTRACE */
  569. /*
  570. * Hook the return address and push it in the stack of return addrs
  571. * in current thread info.
  572. */
  573. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  574. unsigned long frame_pointer)
  575. {
  576. unsigned long old;
  577. int faulted;
  578. struct ftrace_graph_ent trace;
  579. unsigned long return_hooker = (unsigned long)
  580. &return_to_handler;
  581. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  582. return;
  583. /*
  584. * Protect against fault, even if it shouldn't
  585. * happen. This tool is too much intrusive to
  586. * ignore such a protection.
  587. */
  588. asm volatile(
  589. "1: " _ASM_MOV " (%[parent]), %[old]\n"
  590. "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
  591. " movl $0, %[faulted]\n"
  592. "3:\n"
  593. ".section .fixup, \"ax\"\n"
  594. "4: movl $1, %[faulted]\n"
  595. " jmp 3b\n"
  596. ".previous\n"
  597. _ASM_EXTABLE(1b, 4b)
  598. _ASM_EXTABLE(2b, 4b)
  599. : [old] "=&r" (old), [faulted] "=r" (faulted)
  600. : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
  601. : "memory"
  602. );
  603. if (unlikely(faulted)) {
  604. ftrace_graph_stop();
  605. WARN_ON(1);
  606. return;
  607. }
  608. trace.func = self_addr;
  609. trace.depth = current->curr_ret_stack + 1;
  610. /* Only trace if the calling function expects to */
  611. if (!ftrace_graph_entry(&trace)) {
  612. *parent = old;
  613. return;
  614. }
  615. if (ftrace_push_return_trace(old, self_addr, &trace.depth,
  616. frame_pointer) == -EBUSY) {
  617. *parent = old;
  618. return;
  619. }
  620. }
  621. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */