ftrace.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7. *
  8. * Added function graph tracer code, taken from x86 that was written
  9. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  10. *
  11. */
  12. #include <linux/spinlock.h>
  13. #include <linux/hardirq.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/percpu.h>
  18. #include <linux/init.h>
  19. #include <linux/list.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/code-patching.h>
  22. #include <asm/ftrace.h>
  23. #include <asm/syscall.h>
  24. #ifdef CONFIG_DYNAMIC_FTRACE
  25. static unsigned int
  26. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  27. {
  28. unsigned int op;
  29. addr = ppc_function_entry((void *)addr);
  30. /* if (link) set op to 'bl' else 'b' */
  31. op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
  32. return op;
  33. }
  34. static int
  35. ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
  36. {
  37. unsigned int replaced;
  38. /*
  39. * Note: Due to modules and __init, code can
  40. * disappear and change, we need to protect against faulting
  41. * as well as code changing. We do this by using the
  42. * probe_kernel_* functions.
  43. *
  44. * No real locking needed, this code is run through
  45. * kstop_machine, or before SMP starts.
  46. */
  47. /* read the text we want to modify */
  48. if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
  49. return -EFAULT;
  50. /* Make sure it is what we expect it to be */
  51. if (replaced != old)
  52. return -EINVAL;
  53. /* replace the text with the new text */
  54. if (patch_instruction((unsigned int *)ip, new))
  55. return -EPERM;
  56. return 0;
  57. }
  58. /*
  59. * Helper functions that are the same for both PPC64 and PPC32.
  60. */
  61. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  62. {
  63. /* use the create_branch to verify that this offset can be branched */
  64. return create_branch((unsigned int *)ip, addr, 0);
  65. }
  66. #ifdef CONFIG_MODULES
  67. static int is_bl_op(unsigned int op)
  68. {
  69. return (op & 0xfc000003) == 0x48000001;
  70. }
  71. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  72. {
  73. static int offset;
  74. offset = (op & 0x03fffffc);
  75. /* make it signed */
  76. if (offset & 0x02000000)
  77. offset |= 0xfe000000;
  78. return ip + (long)offset;
  79. }
  80. #ifdef CONFIG_PPC64
  81. static int
  82. __ftrace_make_nop(struct module *mod,
  83. struct dyn_ftrace *rec, unsigned long addr)
  84. {
  85. unsigned int op;
  86. unsigned int jmp[5];
  87. unsigned long ptr;
  88. unsigned long ip = rec->ip;
  89. unsigned long tramp;
  90. int offset;
  91. /* read where this goes */
  92. if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
  93. return -EFAULT;
  94. /* Make sure that that this is still a 24bit jump */
  95. if (!is_bl_op(op)) {
  96. printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
  97. return -EINVAL;
  98. }
  99. /* lets find where the pointer goes */
  100. tramp = find_bl_target(ip, op);
  101. /*
  102. * On PPC64 the trampoline looks like:
  103. * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
  104. * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
  105. * Where the bytes 2,3,6 and 7 make up the 32bit offset
  106. * to the TOC that holds the pointer.
  107. * to jump to.
  108. * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
  109. * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
  110. * The actually address is 32 bytes from the offset
  111. * into the TOC.
  112. * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
  113. */
  114. pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
  115. /* Find where the trampoline jumps to */
  116. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  117. printk(KERN_ERR "Failed to read %lx\n", tramp);
  118. return -EFAULT;
  119. }
  120. pr_devel(" %08x %08x", jmp[0], jmp[1]);
  121. /* verify that this is what we expect it to be */
  122. if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
  123. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  124. (jmp[2] != 0xf8410028) ||
  125. (jmp[3] != 0xe96c0020) ||
  126. (jmp[4] != 0xe84c0028)) {
  127. printk(KERN_ERR "Not a trampoline\n");
  128. return -EINVAL;
  129. }
  130. /* The bottom half is signed extended */
  131. offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
  132. (int)((short)jmp[1]);
  133. pr_devel(" %x ", offset);
  134. /* get the address this jumps too */
  135. tramp = mod->arch.toc + offset + 32;
  136. pr_devel("toc: %lx", tramp);
  137. if (probe_kernel_read(jmp, (void *)tramp, 8)) {
  138. printk(KERN_ERR "Failed to read %lx\n", tramp);
  139. return -EFAULT;
  140. }
  141. pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
  142. ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
  143. /* This should match what was called */
  144. if (ptr != ppc_function_entry((void *)addr)) {
  145. printk(KERN_ERR "addr does not match %lx\n", ptr);
  146. return -EINVAL;
  147. }
  148. /*
  149. * We want to nop the line, but the next line is
  150. * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
  151. * This needs to be turned to a nop too.
  152. */
  153. if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
  154. return -EFAULT;
  155. if (op != 0xe8410028) {
  156. printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
  157. return -EINVAL;
  158. }
  159. /*
  160. * Milton Miller pointed out that we can not blindly do nops.
  161. * If a task was preempted when calling a trace function,
  162. * the nops will remove the way to restore the TOC in r2
  163. * and the r2 TOC will get corrupted.
  164. */
  165. /*
  166. * Replace:
  167. * bl <tramp> <==== will be replaced with "b 1f"
  168. * ld r2,40(r1)
  169. * 1:
  170. */
  171. op = 0x48000008; /* b +8 */
  172. if (patch_instruction((unsigned int *)ip, op))
  173. return -EPERM;
  174. return 0;
  175. }
  176. #else /* !PPC64 */
  177. static int
  178. __ftrace_make_nop(struct module *mod,
  179. struct dyn_ftrace *rec, unsigned long addr)
  180. {
  181. unsigned int op;
  182. unsigned int jmp[4];
  183. unsigned long ip = rec->ip;
  184. unsigned long tramp;
  185. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  186. return -EFAULT;
  187. /* Make sure that that this is still a 24bit jump */
  188. if (!is_bl_op(op)) {
  189. printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
  190. return -EINVAL;
  191. }
  192. /* lets find where the pointer goes */
  193. tramp = find_bl_target(ip, op);
  194. /*
  195. * On PPC32 the trampoline looks like:
  196. * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
  197. * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
  198. * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
  199. * 0x4e, 0x80, 0x04, 0x20 bctr
  200. */
  201. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  202. /* Find where the trampoline jumps to */
  203. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  204. printk(KERN_ERR "Failed to read %lx\n", tramp);
  205. return -EFAULT;
  206. }
  207. pr_devel(" %08x %08x ", jmp[0], jmp[1]);
  208. /* verify that this is what we expect it to be */
  209. if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
  210. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  211. (jmp[2] != 0x7d8903a6) ||
  212. (jmp[3] != 0x4e800420)) {
  213. printk(KERN_ERR "Not a trampoline\n");
  214. return -EINVAL;
  215. }
  216. tramp = (jmp[1] & 0xffff) |
  217. ((jmp[0] & 0xffff) << 16);
  218. if (tramp & 0x8000)
  219. tramp -= 0x10000;
  220. pr_devel(" %lx ", tramp);
  221. if (tramp != addr) {
  222. printk(KERN_ERR
  223. "Trampoline location %08lx does not match addr\n",
  224. tramp);
  225. return -EINVAL;
  226. }
  227. op = PPC_INST_NOP;
  228. if (patch_instruction((unsigned int *)ip, op))
  229. return -EPERM;
  230. return 0;
  231. }
  232. #endif /* PPC64 */
  233. #endif /* CONFIG_MODULES */
  234. int ftrace_make_nop(struct module *mod,
  235. struct dyn_ftrace *rec, unsigned long addr)
  236. {
  237. unsigned long ip = rec->ip;
  238. unsigned int old, new;
  239. /*
  240. * If the calling address is more that 24 bits away,
  241. * then we had to use a trampoline to make the call.
  242. * Otherwise just update the call site.
  243. */
  244. if (test_24bit_addr(ip, addr)) {
  245. /* within range */
  246. old = ftrace_call_replace(ip, addr, 1);
  247. new = PPC_INST_NOP;
  248. return ftrace_modify_code(ip, old, new);
  249. }
  250. #ifdef CONFIG_MODULES
  251. /*
  252. * Out of range jumps are called from modules.
  253. * We should either already have a pointer to the module
  254. * or it has been passed in.
  255. */
  256. if (!rec->arch.mod) {
  257. if (!mod) {
  258. printk(KERN_ERR "No module loaded addr=%lx\n",
  259. addr);
  260. return -EFAULT;
  261. }
  262. rec->arch.mod = mod;
  263. } else if (mod) {
  264. if (mod != rec->arch.mod) {
  265. printk(KERN_ERR
  266. "Record mod %p not equal to passed in mod %p\n",
  267. rec->arch.mod, mod);
  268. return -EINVAL;
  269. }
  270. /* nothing to do if mod == rec->arch.mod */
  271. } else
  272. mod = rec->arch.mod;
  273. return __ftrace_make_nop(mod, rec, addr);
  274. #else
  275. /* We should not get here without modules */
  276. return -EINVAL;
  277. #endif /* CONFIG_MODULES */
  278. }
  279. #ifdef CONFIG_MODULES
  280. #ifdef CONFIG_PPC64
  281. static int
  282. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  283. {
  284. unsigned int op[2];
  285. unsigned long ip = rec->ip;
  286. /* read where this goes */
  287. if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
  288. return -EFAULT;
  289. /*
  290. * It should be pointing to two nops or
  291. * b +8; ld r2,40(r1)
  292. */
  293. if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
  294. ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
  295. printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
  296. return -EINVAL;
  297. }
  298. /* If we never set up a trampoline to ftrace_caller, then bail */
  299. if (!rec->arch.mod->arch.tramp) {
  300. printk(KERN_ERR "No ftrace trampoline\n");
  301. return -EINVAL;
  302. }
  303. /* create the branch to the trampoline */
  304. op[0] = create_branch((unsigned int *)ip,
  305. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  306. if (!op[0]) {
  307. printk(KERN_ERR "REL24 out of range!\n");
  308. return -EINVAL;
  309. }
  310. /* ld r2,40(r1) */
  311. op[1] = 0xe8410028;
  312. pr_devel("write to %lx\n", rec->ip);
  313. if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
  314. return -EPERM;
  315. flush_icache_range(ip, ip + 8);
  316. return 0;
  317. }
  318. #else
  319. static int
  320. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  321. {
  322. unsigned int op;
  323. unsigned long ip = rec->ip;
  324. /* read where this goes */
  325. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  326. return -EFAULT;
  327. /* It should be pointing to a nop */
  328. if (op != PPC_INST_NOP) {
  329. printk(KERN_ERR "Expected NOP but have %x\n", op);
  330. return -EINVAL;
  331. }
  332. /* If we never set up a trampoline to ftrace_caller, then bail */
  333. if (!rec->arch.mod->arch.tramp) {
  334. printk(KERN_ERR "No ftrace trampoline\n");
  335. return -EINVAL;
  336. }
  337. /* create the branch to the trampoline */
  338. op = create_branch((unsigned int *)ip,
  339. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  340. if (!op) {
  341. printk(KERN_ERR "REL24 out of range!\n");
  342. return -EINVAL;
  343. }
  344. pr_devel("write to %lx\n", rec->ip);
  345. if (patch_instruction((unsigned int *)ip, op))
  346. return -EPERM;
  347. return 0;
  348. }
  349. #endif /* CONFIG_PPC64 */
  350. #endif /* CONFIG_MODULES */
  351. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  352. {
  353. unsigned long ip = rec->ip;
  354. unsigned int old, new;
  355. /*
  356. * If the calling address is more that 24 bits away,
  357. * then we had to use a trampoline to make the call.
  358. * Otherwise just update the call site.
  359. */
  360. if (test_24bit_addr(ip, addr)) {
  361. /* within range */
  362. old = PPC_INST_NOP;
  363. new = ftrace_call_replace(ip, addr, 1);
  364. return ftrace_modify_code(ip, old, new);
  365. }
  366. #ifdef CONFIG_MODULES
  367. /*
  368. * Out of range jumps are called from modules.
  369. * Being that we are converting from nop, it had better
  370. * already have a module defined.
  371. */
  372. if (!rec->arch.mod) {
  373. printk(KERN_ERR "No module loaded\n");
  374. return -EINVAL;
  375. }
  376. return __ftrace_make_call(rec, addr);
  377. #else
  378. /* We should not get here without modules */
  379. return -EINVAL;
  380. #endif /* CONFIG_MODULES */
  381. }
  382. int ftrace_update_ftrace_func(ftrace_func_t func)
  383. {
  384. unsigned long ip = (unsigned long)(&ftrace_call);
  385. unsigned int old, new;
  386. int ret;
  387. old = *(unsigned int *)&ftrace_call;
  388. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  389. ret = ftrace_modify_code(ip, old, new);
  390. return ret;
  391. }
  392. static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  393. {
  394. unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
  395. int ret;
  396. ret = ftrace_update_record(rec, enable);
  397. switch (ret) {
  398. case FTRACE_UPDATE_IGNORE:
  399. return 0;
  400. case FTRACE_UPDATE_MAKE_CALL:
  401. return ftrace_make_call(rec, ftrace_addr);
  402. case FTRACE_UPDATE_MAKE_NOP:
  403. return ftrace_make_nop(NULL, rec, ftrace_addr);
  404. }
  405. return 0;
  406. }
  407. void ftrace_replace_code(int enable)
  408. {
  409. struct ftrace_rec_iter *iter;
  410. struct dyn_ftrace *rec;
  411. int ret;
  412. for (iter = ftrace_rec_iter_start(); iter;
  413. iter = ftrace_rec_iter_next(iter)) {
  414. rec = ftrace_rec_iter_record(iter);
  415. ret = __ftrace_replace_code(rec, enable);
  416. if (ret) {
  417. ftrace_bug(ret, rec->ip);
  418. return;
  419. }
  420. }
  421. }
  422. void arch_ftrace_update_code(int command)
  423. {
  424. if (command & FTRACE_UPDATE_CALLS)
  425. ftrace_replace_code(1);
  426. else if (command & FTRACE_DISABLE_CALLS)
  427. ftrace_replace_code(0);
  428. if (command & FTRACE_UPDATE_TRACE_FUNC)
  429. ftrace_update_ftrace_func(ftrace_trace_function);
  430. if (command & FTRACE_START_FUNC_RET)
  431. ftrace_enable_ftrace_graph_caller();
  432. else if (command & FTRACE_STOP_FUNC_RET)
  433. ftrace_disable_ftrace_graph_caller();
  434. }
  435. int __init ftrace_dyn_arch_init(void *data)
  436. {
  437. /* caller expects data to be zero */
  438. unsigned long *p = data;
  439. *p = 0;
  440. return 0;
  441. }
  442. #endif /* CONFIG_DYNAMIC_FTRACE */
  443. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  444. #ifdef CONFIG_DYNAMIC_FTRACE
  445. extern void ftrace_graph_call(void);
  446. extern void ftrace_graph_stub(void);
  447. int ftrace_enable_ftrace_graph_caller(void)
  448. {
  449. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  450. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  451. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  452. unsigned int old, new;
  453. old = ftrace_call_replace(ip, stub, 0);
  454. new = ftrace_call_replace(ip, addr, 0);
  455. return ftrace_modify_code(ip, old, new);
  456. }
  457. int ftrace_disable_ftrace_graph_caller(void)
  458. {
  459. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  460. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  461. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  462. unsigned int old, new;
  463. old = ftrace_call_replace(ip, addr, 0);
  464. new = ftrace_call_replace(ip, stub, 0);
  465. return ftrace_modify_code(ip, old, new);
  466. }
  467. #endif /* CONFIG_DYNAMIC_FTRACE */
  468. #ifdef CONFIG_PPC64
  469. extern void mod_return_to_handler(void);
  470. #endif
  471. /*
  472. * Hook the return address and push it in the stack of return addrs
  473. * in current thread info.
  474. */
  475. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
  476. {
  477. unsigned long old;
  478. int faulted;
  479. struct ftrace_graph_ent trace;
  480. unsigned long return_hooker = (unsigned long)&return_to_handler;
  481. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  482. return;
  483. #ifdef CONFIG_PPC64
  484. /* non core kernel code needs to save and restore the TOC */
  485. if (REGION_ID(self_addr) != KERNEL_REGION_ID)
  486. return_hooker = (unsigned long)&mod_return_to_handler;
  487. #endif
  488. return_hooker = ppc_function_entry((void *)return_hooker);
  489. /*
  490. * Protect against fault, even if it shouldn't
  491. * happen. This tool is too much intrusive to
  492. * ignore such a protection.
  493. */
  494. asm volatile(
  495. "1: " PPC_LL "%[old], 0(%[parent])\n"
  496. "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
  497. " li %[faulted], 0\n"
  498. "3:\n"
  499. ".section .fixup, \"ax\"\n"
  500. "4: li %[faulted], 1\n"
  501. " b 3b\n"
  502. ".previous\n"
  503. ".section __ex_table,\"a\"\n"
  504. PPC_LONG_ALIGN "\n"
  505. PPC_LONG "1b,4b\n"
  506. PPC_LONG "2b,4b\n"
  507. ".previous"
  508. : [old] "=&r" (old), [faulted] "=r" (faulted)
  509. : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
  510. : "memory"
  511. );
  512. if (unlikely(faulted)) {
  513. ftrace_graph_stop();
  514. WARN_ON(1);
  515. return;
  516. }
  517. trace.func = self_addr;
  518. trace.depth = current->curr_ret_stack + 1;
  519. /* Only trace if the calling function expects to */
  520. if (!ftrace_graph_entry(&trace)) {
  521. *parent = old;
  522. return;
  523. }
  524. if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
  525. *parent = old;
  526. }
  527. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  528. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
  529. unsigned long __init arch_syscall_addr(int nr)
  530. {
  531. return sys_call_table[nr*2];
  532. }
  533. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */