ftrace.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7. *
  8. */
  9. #include <linux/spinlock.h>
  10. #include <linux/hardirq.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/percpu.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/code-patching.h>
  19. #include <asm/ftrace.h>
  20. #if 0
  21. #define DEBUGP printk
  22. #else
  23. #define DEBUGP(fmt , ...) do { } while (0)
  24. #endif
  25. static unsigned int ftrace_nop = PPC_NOP_INSTR;
  26. #ifdef CONFIG_PPC32
  27. # define GET_ADDR(addr) addr
  28. #else
  29. /* PowerPC64's functions are data that points to the functions */
  30. # define GET_ADDR(addr) (*(unsigned long *)addr)
  31. #endif
  32. static unsigned int ftrace_calc_offset(long ip, long addr)
  33. {
  34. return (int)(addr - ip);
  35. }
  36. static unsigned char *ftrace_nop_replace(void)
  37. {
  38. return (char *)&ftrace_nop;
  39. }
  40. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  41. {
  42. static unsigned int op;
  43. /*
  44. * It would be nice to just use create_function_call, but that will
  45. * update the code itself. Here we need to just return the
  46. * instruction that is going to be modified, without modifying the
  47. * code.
  48. */
  49. addr = GET_ADDR(addr);
  50. /* Set to "bl addr" */
  51. op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc);
  52. /*
  53. * No locking needed, this must be called via kstop_machine
  54. * which in essence is like running on a uniprocessor machine.
  55. */
  56. return (unsigned char *)&op;
  57. }
  58. #ifdef CONFIG_PPC64
  59. # define _ASM_ALIGN " .align 3 "
  60. # define _ASM_PTR " .llong "
  61. #else
  62. # define _ASM_ALIGN " .align 2 "
  63. # define _ASM_PTR " .long "
  64. #endif
  65. static int
  66. ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  67. unsigned char *new_code)
  68. {
  69. unsigned char replaced[MCOUNT_INSN_SIZE];
  70. /*
  71. * Note: Due to modules and __init, code can
  72. * disappear and change, we need to protect against faulting
  73. * as well as code changing. We do this by using the
  74. * probe_kernel_* functions.
  75. *
  76. * No real locking needed, this code is run through
  77. * kstop_machine, or before SMP starts.
  78. */
  79. /* read the text we want to modify */
  80. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  81. return -EFAULT;
  82. /* Make sure it is what we expect it to be */
  83. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  84. return -EINVAL;
  85. /* replace the text with the new text */
  86. if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
  87. return -EPERM;
  88. flush_icache_range(ip, ip + 8);
  89. return 0;
  90. }
  91. /*
  92. * Helper functions that are the same for both PPC64 and PPC32.
  93. */
  94. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  95. {
  96. /* use the create_branch to verify that this offset can be branched */
  97. return create_branch((unsigned int *)ip, addr, 0);
  98. }
  99. static int is_bl_op(unsigned int op)
  100. {
  101. return (op & 0xfc000003) == 0x48000001;
  102. }
  103. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  104. {
  105. static int offset;
  106. offset = (op & 0x03fffffc);
  107. /* make it signed */
  108. if (offset & 0x02000000)
  109. offset |= 0xfe000000;
  110. return ip + (long)offset;
  111. }
  112. #ifdef CONFIG_PPC64
  113. static int
  114. __ftrace_make_nop(struct module *mod,
  115. struct dyn_ftrace *rec, unsigned long addr)
  116. {
  117. unsigned int op;
  118. unsigned int jmp[5];
  119. unsigned long ptr;
  120. unsigned long ip = rec->ip;
  121. unsigned long tramp;
  122. int offset;
  123. /* read where this goes */
  124. if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
  125. return -EFAULT;
  126. /* Make sure that that this is still a 24bit jump */
  127. if (!is_bl_op(op)) {
  128. printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
  129. return -EINVAL;
  130. }
  131. /* lets find where the pointer goes */
  132. tramp = find_bl_target(ip, op);
  133. /*
  134. * On PPC64 the trampoline looks like:
  135. * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
  136. * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
  137. * Where the bytes 2,3,6 and 7 make up the 32bit offset
  138. * to the TOC that holds the pointer.
  139. * to jump to.
  140. * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
  141. * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
  142. * The actually address is 32 bytes from the offset
  143. * into the TOC.
  144. * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
  145. */
  146. DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
  147. /* Find where the trampoline jumps to */
  148. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  149. printk(KERN_ERR "Failed to read %lx\n", tramp);
  150. return -EFAULT;
  151. }
  152. DEBUGP(" %08x %08x", jmp[0], jmp[1]);
  153. /* verify that this is what we expect it to be */
  154. if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
  155. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  156. (jmp[2] != 0xf8410028) ||
  157. (jmp[3] != 0xe96c0020) ||
  158. (jmp[4] != 0xe84c0028)) {
  159. printk(KERN_ERR "Not a trampoline\n");
  160. return -EINVAL;
  161. }
  162. offset = (unsigned)((unsigned short)jmp[0]) << 16 |
  163. (unsigned)((unsigned short)jmp[1]);
  164. DEBUGP(" %x ", offset);
  165. /* get the address this jumps too */
  166. tramp = mod->arch.toc + offset + 32;
  167. DEBUGP("toc: %lx", tramp);
  168. if (probe_kernel_read(jmp, (void *)tramp, 8)) {
  169. printk(KERN_ERR "Failed to read %lx\n", tramp);
  170. return -EFAULT;
  171. }
  172. DEBUGP(" %08x %08x\n", jmp[0], jmp[1]);
  173. ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
  174. /* This should match what was called */
  175. if (ptr != GET_ADDR(addr)) {
  176. printk(KERN_ERR "addr does not match %lx\n", ptr);
  177. return -EINVAL;
  178. }
  179. /*
  180. * We want to nop the line, but the next line is
  181. * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
  182. * This needs to be turned to a nop too.
  183. */
  184. if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
  185. return -EFAULT;
  186. if (op != 0xe8410028) {
  187. printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
  188. return -EINVAL;
  189. }
  190. /*
  191. * Milton Miller pointed out that we can not blindly do nops.
  192. * If a task was preempted when calling a trace function,
  193. * the nops will remove the way to restore the TOC in r2
  194. * and the r2 TOC will get corrupted.
  195. */
  196. /*
  197. * Replace:
  198. * bl <tramp> <==== will be replaced with "b 1f"
  199. * ld r2,40(r1)
  200. * 1:
  201. */
  202. op = 0x48000008; /* b +8 */
  203. if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
  204. return -EPERM;
  205. flush_icache_range(ip, ip + 8);
  206. return 0;
  207. }
  208. #else /* !PPC64 */
  209. static int
  210. __ftrace_make_nop(struct module *mod,
  211. struct dyn_ftrace *rec, unsigned long addr)
  212. {
  213. unsigned int op;
  214. unsigned int jmp[4];
  215. unsigned long ip = rec->ip;
  216. unsigned long tramp;
  217. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  218. return -EFAULT;
  219. /* Make sure that that this is still a 24bit jump */
  220. if (!is_bl_op(op)) {
  221. printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
  222. return -EINVAL;
  223. }
  224. /* lets find where the pointer goes */
  225. tramp = find_bl_target(ip, op);
  226. /*
  227. * On PPC32 the trampoline looks like:
  228. * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
  229. * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
  230. * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
  231. * 0x4e, 0x80, 0x04, 0x20 bctr
  232. */
  233. DEBUGP("ip:%lx jumps to %lx", ip, tramp);
  234. /* Find where the trampoline jumps to */
  235. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  236. printk(KERN_ERR "Failed to read %lx\n", tramp);
  237. return -EFAULT;
  238. }
  239. DEBUGP(" %08x %08x ", jmp[0], jmp[1]);
  240. /* verify that this is what we expect it to be */
  241. if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
  242. ((jmp[1] & 0xffff0000) != 0x396b0000) ||
  243. (jmp[2] != 0x7d6903a6) ||
  244. (jmp[3] != 0x4e800420)) {
  245. printk(KERN_ERR "Not a trampoline\n");
  246. return -EINVAL;
  247. }
  248. tramp = (jmp[1] & 0xffff) |
  249. ((jmp[0] & 0xffff) << 16);
  250. if (tramp & 0x8000)
  251. tramp -= 0x10000;
  252. DEBUGP(" %x ", tramp);
  253. if (tramp != addr) {
  254. printk(KERN_ERR
  255. "Trampoline location %08lx does not match addr\n",
  256. tramp);
  257. return -EINVAL;
  258. }
  259. op = PPC_NOP_INSTR;
  260. if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
  261. return -EPERM;
  262. flush_icache_range(ip, ip + 8);
  263. return 0;
  264. }
  265. #endif /* PPC64 */
  266. int ftrace_make_nop(struct module *mod,
  267. struct dyn_ftrace *rec, unsigned long addr)
  268. {
  269. unsigned char *old, *new;
  270. unsigned long ip = rec->ip;
  271. /*
  272. * If the calling address is more that 24 bits away,
  273. * then we had to use a trampoline to make the call.
  274. * Otherwise just update the call site.
  275. */
  276. if (test_24bit_addr(ip, addr)) {
  277. /* within range */
  278. old = ftrace_call_replace(ip, addr);
  279. new = ftrace_nop_replace();
  280. return ftrace_modify_code(ip, old, new);
  281. }
  282. /*
  283. * Out of range jumps are called from modules.
  284. * We should either already have a pointer to the module
  285. * or it has been passed in.
  286. */
  287. if (!rec->arch.mod) {
  288. if (!mod) {
  289. printk(KERN_ERR "No module loaded addr=%lx\n",
  290. addr);
  291. return -EFAULT;
  292. }
  293. rec->arch.mod = mod;
  294. } else if (mod) {
  295. if (mod != rec->arch.mod) {
  296. printk(KERN_ERR
  297. "Record mod %p not equal to passed in mod %p\n",
  298. rec->arch.mod, mod);
  299. return -EINVAL;
  300. }
  301. /* nothing to do if mod == rec->arch.mod */
  302. } else
  303. mod = rec->arch.mod;
  304. return __ftrace_make_nop(mod, rec, addr);
  305. }
  306. #ifdef CONFIG_PPC64
  307. static int
  308. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  309. {
  310. unsigned int op[2];
  311. unsigned long ip = rec->ip;
  312. /* read where this goes */
  313. if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
  314. return -EFAULT;
  315. /*
  316. * It should be pointing to two nops or
  317. * b +8; ld r2,40(r1)
  318. */
  319. if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
  320. ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
  321. printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
  322. return -EINVAL;
  323. }
  324. /* If we never set up a trampoline to ftrace_caller, then bail */
  325. if (!rec->arch.mod->arch.tramp) {
  326. printk(KERN_ERR "No ftrace trampoline\n");
  327. return -EINVAL;
  328. }
  329. /* create the branch to the trampoline */
  330. op[0] = create_branch((unsigned int *)ip,
  331. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  332. if (!op[0]) {
  333. printk(KERN_ERR "REL24 out of range!\n");
  334. return -EINVAL;
  335. }
  336. /* ld r2,40(r1) */
  337. op[1] = 0xe8410028;
  338. DEBUGP("write to %lx\n", rec->ip);
  339. if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
  340. return -EPERM;
  341. flush_icache_range(ip, ip + 8);
  342. return 0;
  343. }
  344. #else
  345. static int
  346. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  347. {
  348. unsigned int op;
  349. unsigned long ip = rec->ip;
  350. /* read where this goes */
  351. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  352. return -EFAULT;
  353. /* It should be pointing to a nop */
  354. if (op != PPC_NOP_INSTR) {
  355. printk(KERN_ERR "Expected NOP but have %x\n", op);
  356. return -EINVAL;
  357. }
  358. /* If we never set up a trampoline to ftrace_caller, then bail */
  359. if (!rec->arch.mod->arch.tramp) {
  360. printk(KERN_ERR "No ftrace trampoline\n");
  361. return -EINVAL;
  362. }
  363. /* create the branch to the trampoline */
  364. op = create_branch((unsigned int *)ip,
  365. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  366. if (!op) {
  367. printk(KERN_ERR "REL24 out of range!\n");
  368. return -EINVAL;
  369. }
  370. DEBUGP("write to %lx\n", rec->ip);
  371. if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
  372. return -EPERM;
  373. flush_icache_range(ip, ip + 8);
  374. return 0;
  375. }
  376. #endif /* CONFIG_PPC64 */
  377. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  378. {
  379. unsigned char *old, *new;
  380. unsigned long ip = rec->ip;
  381. /*
  382. * If the calling address is more that 24 bits away,
  383. * then we had to use a trampoline to make the call.
  384. * Otherwise just update the call site.
  385. */
  386. if (test_24bit_addr(ip, addr)) {
  387. /* within range */
  388. old = ftrace_nop_replace();
  389. new = ftrace_call_replace(ip, addr);
  390. return ftrace_modify_code(ip, old, new);
  391. }
  392. /*
  393. * Out of range jumps are called from modules.
  394. * Being that we are converting from nop, it had better
  395. * already have a module defined.
  396. */
  397. if (!rec->arch.mod) {
  398. printk(KERN_ERR "No module loaded\n");
  399. return -EINVAL;
  400. }
  401. return __ftrace_make_call(rec, addr);
  402. }
  403. int ftrace_update_ftrace_func(ftrace_func_t func)
  404. {
  405. unsigned long ip = (unsigned long)(&ftrace_call);
  406. unsigned char old[MCOUNT_INSN_SIZE], *new;
  407. int ret;
  408. memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
  409. new = ftrace_call_replace(ip, (unsigned long)func);
  410. ret = ftrace_modify_code(ip, old, new);
  411. return ret;
  412. }
  413. int __init ftrace_dyn_arch_init(void *data)
  414. {
  415. /* caller expects data to be zero */
  416. unsigned long *p = data;
  417. *p = 0;
  418. return 0;
  419. }