ftrace.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes to Ingo Molnar, for suggesting the idea.
  7. * Mathieu Desnoyers, for suggesting postponing the modifications.
  8. * Arjan van de Ven, for keeping me straight, and explaining to me
  9. * the dangers of modifying code on the run.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/spinlock.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/percpu.h>
  17. #include <linux/sched.h>
  18. #include <linux/init.h>
  19. #include <linux/list.h>
  20. #include <linux/module.h>
  21. #include <trace/syscall.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/kprobes.h>
  24. #include <asm/ftrace.h>
  25. #include <asm/nops.h>
  26. #ifdef CONFIG_DYNAMIC_FTRACE
  27. int ftrace_arch_code_modify_prepare(void)
  28. {
  29. set_kernel_text_rw();
  30. set_all_modules_text_rw();
  31. return 0;
  32. }
  33. int ftrace_arch_code_modify_post_process(void)
  34. {
  35. set_all_modules_text_ro();
  36. set_kernel_text_ro();
  37. return 0;
  38. }
  39. union ftrace_code_union {
  40. char code[MCOUNT_INSN_SIZE];
  41. struct {
  42. char e8;
  43. int offset;
  44. } __attribute__((packed));
  45. };
  46. static int ftrace_calc_offset(long ip, long addr)
  47. {
  48. return (int)(addr - ip);
  49. }
  50. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  51. {
  52. static union ftrace_code_union calc;
  53. calc.e8 = 0xe8;
  54. calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  55. /*
  56. * No locking needed, this must be called via kstop_machine
  57. * which in essence is like running on a uniprocessor machine.
  58. */
  59. return calc.code;
  60. }
  61. static inline int
  62. within(unsigned long addr, unsigned long start, unsigned long end)
  63. {
  64. return addr >= start && addr < end;
  65. }
  66. static int
  67. do_ftrace_mod_code(unsigned long ip, const void *new_code)
  68. {
  69. /*
  70. * On x86_64, kernel text mappings are mapped read-only with
  71. * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  72. * of the kernel text mapping to modify the kernel text.
  73. *
  74. * For 32bit kernels, these mappings are same and we can use
  75. * kernel identity mapping to modify code.
  76. */
  77. if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  78. ip = (unsigned long)__va(__pa(ip));
  79. return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
  80. }
  81. static const unsigned char *ftrace_nop_replace(void)
  82. {
  83. return ideal_nops[NOP_ATOMIC5];
  84. }
  85. static int
  86. ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
  87. unsigned const char *new_code)
  88. {
  89. unsigned char replaced[MCOUNT_INSN_SIZE];
  90. /*
  91. * Note: Due to modules and __init, code can
  92. * disappear and change, we need to protect against faulting
  93. * as well as code changing. We do this by using the
  94. * probe_kernel_* functions.
  95. *
  96. * No real locking needed, this code is run through
  97. * kstop_machine, or before SMP starts.
  98. */
  99. /* read the text we want to modify */
  100. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  101. return -EFAULT;
  102. /* Make sure it is what we expect it to be */
  103. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  104. return -EINVAL;
  105. /* replace the text with the new text */
  106. if (do_ftrace_mod_code(ip, new_code))
  107. return -EPERM;
  108. sync_core();
  109. return 0;
  110. }
  111. int ftrace_make_nop(struct module *mod,
  112. struct dyn_ftrace *rec, unsigned long addr)
  113. {
  114. unsigned const char *new, *old;
  115. unsigned long ip = rec->ip;
  116. old = ftrace_call_replace(ip, addr);
  117. new = ftrace_nop_replace();
  118. /*
  119. * On boot up, and when modules are loaded, the MCOUNT_ADDR
  120. * is converted to a nop, and will never become MCOUNT_ADDR
  121. * again. This code is either running before SMP (on boot up)
  122. * or before the code will ever be executed (module load).
  123. * We do not want to use the breakpoint version in this case,
  124. * just modify the code directly.
  125. */
  126. if (addr == MCOUNT_ADDR)
  127. return ftrace_modify_code_direct(rec->ip, old, new);
  128. /* Normal cases use add_brk_on_nop */
  129. WARN_ONCE(1, "invalid use of ftrace_make_nop");
  130. return -EINVAL;
  131. }
  132. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  133. {
  134. unsigned const char *new, *old;
  135. unsigned long ip = rec->ip;
  136. old = ftrace_nop_replace();
  137. new = ftrace_call_replace(ip, addr);
  138. /* Should only be called when module is loaded */
  139. return ftrace_modify_code_direct(rec->ip, old, new);
  140. }
  141. /*
  142. * The modifying_ftrace_code is used to tell the breakpoint
  143. * handler to call ftrace_int3_handler(). If it fails to
  144. * call this handler for a breakpoint added by ftrace, then
  145. * the kernel may crash.
  146. *
  147. * As atomic_writes on x86 do not need a barrier, we do not
  148. * need to add smp_mb()s for this to work. It is also considered
  149. * that we can not read the modifying_ftrace_code before
  150. * executing the breakpoint. That would be quite remarkable if
  151. * it could do that. Here's the flow that is required:
  152. *
  153. * CPU-0 CPU-1
  154. *
  155. * atomic_inc(mfc);
  156. * write int3s
  157. * <trap-int3> // implicit (r)mb
  158. * if (atomic_read(mfc))
  159. * call ftrace_int3_handler()
  160. *
  161. * Then when we are finished:
  162. *
  163. * atomic_dec(mfc);
  164. *
  165. * If we hit a breakpoint that was not set by ftrace, it does not
  166. * matter if ftrace_int3_handler() is called or not. It will
  167. * simply be ignored. But it is crucial that a ftrace nop/caller
  168. * breakpoint is handled. No other user should ever place a
  169. * breakpoint on an ftrace nop/caller location. It must only
  170. * be done by this code.
  171. */
  172. atomic_t modifying_ftrace_code __read_mostly;
  173. static int
  174. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  175. unsigned const char *new_code);
  176. int ftrace_update_ftrace_func(ftrace_func_t func)
  177. {
  178. unsigned long ip = (unsigned long)(&ftrace_call);
  179. unsigned char old[MCOUNT_INSN_SIZE], *new;
  180. int ret;
  181. memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
  182. new = ftrace_call_replace(ip, (unsigned long)func);
  183. /* See comment above by declaration of modifying_ftrace_code */
  184. atomic_inc(&modifying_ftrace_code);
  185. ret = ftrace_modify_code(ip, old, new);
  186. atomic_dec(&modifying_ftrace_code);
  187. return ret;
  188. }
  189. /*
  190. * A breakpoint was added to the code address we are about to
  191. * modify, and this is the handle that will just skip over it.
  192. * We are either changing a nop into a trace call, or a trace
  193. * call to a nop. While the change is taking place, we treat
  194. * it just like it was a nop.
  195. */
  196. int ftrace_int3_handler(struct pt_regs *regs)
  197. {
  198. if (WARN_ON_ONCE(!regs))
  199. return 0;
  200. if (!ftrace_location(regs->ip - 1))
  201. return 0;
  202. regs->ip += MCOUNT_INSN_SIZE - 1;
  203. return 1;
  204. }
  205. static int ftrace_write(unsigned long ip, const char *val, int size)
  206. {
  207. /*
  208. * On x86_64, kernel text mappings are mapped read-only with
  209. * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  210. * of the kernel text mapping to modify the kernel text.
  211. *
  212. * For 32bit kernels, these mappings are same and we can use
  213. * kernel identity mapping to modify code.
  214. */
  215. if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  216. ip = (unsigned long)__va(__pa(ip));
  217. return probe_kernel_write((void *)ip, val, size);
  218. }
  219. static int add_break(unsigned long ip, const char *old)
  220. {
  221. unsigned char replaced[MCOUNT_INSN_SIZE];
  222. unsigned char brk = BREAKPOINT_INSTRUCTION;
  223. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  224. return -EFAULT;
  225. /* Make sure it is what we expect it to be */
  226. if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
  227. return -EINVAL;
  228. if (ftrace_write(ip, &brk, 1))
  229. return -EPERM;
  230. return 0;
  231. }
  232. static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
  233. {
  234. unsigned const char *old;
  235. unsigned long ip = rec->ip;
  236. old = ftrace_call_replace(ip, addr);
  237. return add_break(rec->ip, old);
  238. }
  239. static int add_brk_on_nop(struct dyn_ftrace *rec)
  240. {
  241. unsigned const char *old;
  242. old = ftrace_nop_replace();
  243. return add_break(rec->ip, old);
  244. }
  245. static int add_breakpoints(struct dyn_ftrace *rec, int enable)
  246. {
  247. unsigned long ftrace_addr;
  248. int ret;
  249. ret = ftrace_test_record(rec, enable);
  250. ftrace_addr = (unsigned long)FTRACE_ADDR;
  251. switch (ret) {
  252. case FTRACE_UPDATE_IGNORE:
  253. return 0;
  254. case FTRACE_UPDATE_MAKE_CALL:
  255. /* converting nop to call */
  256. return add_brk_on_nop(rec);
  257. case FTRACE_UPDATE_MAKE_NOP:
  258. /* converting a call to a nop */
  259. return add_brk_on_call(rec, ftrace_addr);
  260. }
  261. return 0;
  262. }
  263. /*
  264. * On error, we need to remove breakpoints. This needs to
  265. * be done caefully. If the address does not currently have a
  266. * breakpoint, we know we are done. Otherwise, we look at the
  267. * remaining 4 bytes of the instruction. If it matches a nop
  268. * we replace the breakpoint with the nop. Otherwise we replace
  269. * it with the call instruction.
  270. */
  271. static int remove_breakpoint(struct dyn_ftrace *rec)
  272. {
  273. unsigned char ins[MCOUNT_INSN_SIZE];
  274. unsigned char brk = BREAKPOINT_INSTRUCTION;
  275. const unsigned char *nop;
  276. unsigned long ftrace_addr;
  277. unsigned long ip = rec->ip;
  278. /* If we fail the read, just give up */
  279. if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
  280. return -EFAULT;
  281. /* If this does not have a breakpoint, we are done */
  282. if (ins[0] != brk)
  283. return -1;
  284. nop = ftrace_nop_replace();
  285. /*
  286. * If the last 4 bytes of the instruction do not match
  287. * a nop, then we assume that this is a call to ftrace_addr.
  288. */
  289. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
  290. /*
  291. * For extra paranoidism, we check if the breakpoint is on
  292. * a call that would actually jump to the ftrace_addr.
  293. * If not, don't touch the breakpoint, we make just create
  294. * a disaster.
  295. */
  296. ftrace_addr = (unsigned long)FTRACE_ADDR;
  297. nop = ftrace_call_replace(ip, ftrace_addr);
  298. if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
  299. return -EINVAL;
  300. }
  301. return probe_kernel_write((void *)ip, &nop[0], 1);
  302. }
  303. static int add_update_code(unsigned long ip, unsigned const char *new)
  304. {
  305. /* skip breakpoint */
  306. ip++;
  307. new++;
  308. if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
  309. return -EPERM;
  310. return 0;
  311. }
  312. static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
  313. {
  314. unsigned long ip = rec->ip;
  315. unsigned const char *new;
  316. new = ftrace_call_replace(ip, addr);
  317. return add_update_code(ip, new);
  318. }
  319. static int add_update_nop(struct dyn_ftrace *rec)
  320. {
  321. unsigned long ip = rec->ip;
  322. unsigned const char *new;
  323. new = ftrace_nop_replace();
  324. return add_update_code(ip, new);
  325. }
  326. static int add_update(struct dyn_ftrace *rec, int enable)
  327. {
  328. unsigned long ftrace_addr;
  329. int ret;
  330. ret = ftrace_test_record(rec, enable);
  331. ftrace_addr = (unsigned long)FTRACE_ADDR;
  332. switch (ret) {
  333. case FTRACE_UPDATE_IGNORE:
  334. return 0;
  335. case FTRACE_UPDATE_MAKE_CALL:
  336. /* converting nop to call */
  337. return add_update_call(rec, ftrace_addr);
  338. case FTRACE_UPDATE_MAKE_NOP:
  339. /* converting a call to a nop */
  340. return add_update_nop(rec);
  341. }
  342. return 0;
  343. }
  344. static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
  345. {
  346. unsigned long ip = rec->ip;
  347. unsigned const char *new;
  348. new = ftrace_call_replace(ip, addr);
  349. if (ftrace_write(ip, new, 1))
  350. return -EPERM;
  351. return 0;
  352. }
  353. static int finish_update_nop(struct dyn_ftrace *rec)
  354. {
  355. unsigned long ip = rec->ip;
  356. unsigned const char *new;
  357. new = ftrace_nop_replace();
  358. if (ftrace_write(ip, new, 1))
  359. return -EPERM;
  360. return 0;
  361. }
  362. static int finish_update(struct dyn_ftrace *rec, int enable)
  363. {
  364. unsigned long ftrace_addr;
  365. int ret;
  366. ret = ftrace_update_record(rec, enable);
  367. ftrace_addr = (unsigned long)FTRACE_ADDR;
  368. switch (ret) {
  369. case FTRACE_UPDATE_IGNORE:
  370. return 0;
  371. case FTRACE_UPDATE_MAKE_CALL:
  372. /* converting nop to call */
  373. return finish_update_call(rec, ftrace_addr);
  374. case FTRACE_UPDATE_MAKE_NOP:
  375. /* converting a call to a nop */
  376. return finish_update_nop(rec);
  377. }
  378. return 0;
  379. }
  380. static void do_sync_core(void *data)
  381. {
  382. sync_core();
  383. }
  384. static void run_sync(void)
  385. {
  386. int enable_irqs = irqs_disabled();
  387. /* We may be called with interrupts disbled (on bootup). */
  388. if (enable_irqs)
  389. local_irq_enable();
  390. on_each_cpu(do_sync_core, NULL, 1);
  391. if (enable_irqs)
  392. local_irq_disable();
  393. }
  394. void ftrace_replace_code(int enable)
  395. {
  396. struct ftrace_rec_iter *iter;
  397. struct dyn_ftrace *rec;
  398. const char *report = "adding breakpoints";
  399. int count = 0;
  400. int ret;
  401. for_ftrace_rec_iter(iter) {
  402. rec = ftrace_rec_iter_record(iter);
  403. ret = add_breakpoints(rec, enable);
  404. if (ret)
  405. goto remove_breakpoints;
  406. count++;
  407. }
  408. run_sync();
  409. report = "updating code";
  410. for_ftrace_rec_iter(iter) {
  411. rec = ftrace_rec_iter_record(iter);
  412. ret = add_update(rec, enable);
  413. if (ret)
  414. goto remove_breakpoints;
  415. }
  416. run_sync();
  417. report = "removing breakpoints";
  418. for_ftrace_rec_iter(iter) {
  419. rec = ftrace_rec_iter_record(iter);
  420. ret = finish_update(rec, enable);
  421. if (ret)
  422. goto remove_breakpoints;
  423. }
  424. run_sync();
  425. return;
  426. remove_breakpoints:
  427. ftrace_bug(ret, rec ? rec->ip : 0);
  428. printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
  429. for_ftrace_rec_iter(iter) {
  430. rec = ftrace_rec_iter_record(iter);
  431. remove_breakpoint(rec);
  432. }
  433. }
  434. static int
  435. ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
  436. unsigned const char *new_code)
  437. {
  438. int ret;
  439. ret = add_break(ip, old_code);
  440. if (ret)
  441. goto out;
  442. run_sync();
  443. ret = add_update_code(ip, new_code);
  444. if (ret)
  445. goto fail_update;
  446. run_sync();
  447. ret = ftrace_write(ip, new_code, 1);
  448. if (ret) {
  449. ret = -EPERM;
  450. goto out;
  451. }
  452. run_sync();
  453. out:
  454. return ret;
  455. fail_update:
  456. probe_kernel_write((void *)ip, &old_code[0], 1);
  457. goto out;
  458. }
  459. void arch_ftrace_update_code(int command)
  460. {
  461. /* See comment above by declaration of modifying_ftrace_code */
  462. atomic_inc(&modifying_ftrace_code);
  463. ftrace_modify_all_code(command);
  464. atomic_dec(&modifying_ftrace_code);
  465. }
  466. int __init ftrace_dyn_arch_init(void *data)
  467. {
  468. /* The return code is retured via data */
  469. *(unsigned long *)data = 0;
  470. return 0;
  471. }
  472. #endif
  473. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  474. #ifdef CONFIG_DYNAMIC_FTRACE
  475. extern void ftrace_graph_call(void);
  476. static int ftrace_mod_jmp(unsigned long ip,
  477. int old_offset, int new_offset)
  478. {
  479. unsigned char code[MCOUNT_INSN_SIZE];
  480. if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
  481. return -EFAULT;
  482. if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
  483. return -EINVAL;
  484. *(int *)(&code[1]) = new_offset;
  485. if (do_ftrace_mod_code(ip, &code))
  486. return -EPERM;
  487. return 0;
  488. }
  489. int ftrace_enable_ftrace_graph_caller(void)
  490. {
  491. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  492. int old_offset, new_offset;
  493. old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  494. new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  495. return ftrace_mod_jmp(ip, old_offset, new_offset);
  496. }
  497. int ftrace_disable_ftrace_graph_caller(void)
  498. {
  499. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  500. int old_offset, new_offset;
  501. old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
  502. new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
  503. return ftrace_mod_jmp(ip, old_offset, new_offset);
  504. }
  505. #endif /* !CONFIG_DYNAMIC_FTRACE */
  506. /*
  507. * Hook the return address and push it in the stack of return addrs
  508. * in current thread info.
  509. */
  510. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  511. unsigned long frame_pointer)
  512. {
  513. unsigned long old;
  514. int faulted;
  515. struct ftrace_graph_ent trace;
  516. unsigned long return_hooker = (unsigned long)
  517. &return_to_handler;
  518. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  519. return;
  520. /*
  521. * Protect against fault, even if it shouldn't
  522. * happen. This tool is too much intrusive to
  523. * ignore such a protection.
  524. */
  525. asm volatile(
  526. "1: " _ASM_MOV " (%[parent]), %[old]\n"
  527. "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
  528. " movl $0, %[faulted]\n"
  529. "3:\n"
  530. ".section .fixup, \"ax\"\n"
  531. "4: movl $1, %[faulted]\n"
  532. " jmp 3b\n"
  533. ".previous\n"
  534. _ASM_EXTABLE(1b, 4b)
  535. _ASM_EXTABLE(2b, 4b)
  536. : [old] "=&r" (old), [faulted] "=r" (faulted)
  537. : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
  538. : "memory"
  539. );
  540. if (unlikely(faulted)) {
  541. ftrace_graph_stop();
  542. WARN_ON(1);
  543. return;
  544. }
  545. trace.func = self_addr;
  546. trace.depth = current->curr_ret_stack + 1;
  547. /* Only trace if the calling function expects to */
  548. if (!ftrace_graph_entry(&trace)) {
  549. *parent = old;
  550. return;
  551. }
  552. if (ftrace_push_return_trace(old, self_addr, &trace.depth,
  553. frame_pointer) == -EBUSY) {
  554. *parent = old;
  555. return;
  556. }
  557. }
  558. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */