ftrace.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. *
  6. * For licencing details, see COPYING.
  7. *
  8. * Defines low-level handling of mcount calls when the kernel
  9. * is compiled with the -pg flag. When using dynamic ftrace, the
  10. * mcount call-sites get patched lazily with NOP till they are
  11. * enabled. All code mutation routines here take effect atomically.
  12. */
  13. #include <linux/ftrace.h>
  14. #include <asm/cacheflush.h>
  15. #define INSN_SIZE 4
  16. #define PC_OFFSET 8
  17. #define BL_OPCODE 0xeb000000
  18. #define BL_OFFSET_MASK 0x00ffffff
  19. static unsigned long bl_insn;
  20. static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
  21. /* return true if mcount call site is already patched/no-op'ed */
  22. int ftrace_ip_converted(unsigned long pc)
  23. {
  24. unsigned long save;
  25. pc -= INSN_SIZE;
  26. save = *(unsigned long *)pc;
  27. return save == NOP;
  28. }
  29. unsigned char *ftrace_nop_replace(void)
  30. {
  31. return (char *)&NOP;
  32. }
  33. /* construct a branch (BL) instruction to addr */
  34. unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
  35. {
  36. long offset;
  37. offset = (long)addr - (long)(pc - INSN_SIZE + PC_OFFSET);
  38. if (unlikely(offset < -33554432 || offset > 33554428)) {
  39. /* Can't generate branches that far (from ARM ARM). Ftrace
  40. * doesn't generate branches outside of core kernel text.
  41. */
  42. WARN_ON_ONCE(1);
  43. return NULL;
  44. }
  45. offset = (offset >> 2) & BL_OFFSET_MASK;
  46. bl_insn = BL_OPCODE | offset;
  47. return (unsigned char *)&bl_insn;
  48. }
  49. int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
  50. unsigned char *new_code)
  51. {
  52. unsigned long err = 0, replaced = 0, old, new;
  53. old = *(unsigned long *)old_code;
  54. new = *(unsigned long *)new_code;
  55. pc -= INSN_SIZE;
  56. __asm__ __volatile__ (
  57. "1: ldr %1, [%2] \n"
  58. " cmp %1, %4 \n"
  59. "2: streq %3, [%2] \n"
  60. " cmpne %1, %3 \n"
  61. " movne %0, #2 \n"
  62. "3:\n"
  63. ".section .fixup, \"ax\"\n"
  64. "4: mov %0, #1 \n"
  65. " b 3b \n"
  66. ".previous\n"
  67. ".section __ex_table, \"a\"\n"
  68. " .long 1b, 4b \n"
  69. " .long 2b, 4b \n"
  70. ".previous\n"
  71. : "=r"(err), "=r"(replaced)
  72. : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
  73. : "memory");
  74. if (!err && (replaced == old))
  75. flush_icache_range(pc, pc + INSN_SIZE);
  76. return err;
  77. }
  78. int ftrace_update_ftrace_func(ftrace_func_t func)
  79. {
  80. int ret;
  81. unsigned long pc, old;
  82. unsigned char *new;
  83. pc = (unsigned long)&ftrace_call;
  84. pc += INSN_SIZE;
  85. memcpy(&old, &ftrace_call, INSN_SIZE);
  86. new = ftrace_call_replace(pc, (unsigned long)func);
  87. ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
  88. return ret;
  89. }
  90. int ftrace_mcount_set(unsigned long *data)
  91. {
  92. unsigned long pc, old;
  93. unsigned long *addr = data;
  94. unsigned char *new;
  95. pc = (unsigned long)&mcount_call;
  96. pc += INSN_SIZE;
  97. memcpy(&old, &mcount_call, INSN_SIZE);
  98. new = ftrace_call_replace(pc, *addr);
  99. *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
  100. return 0;
  101. }
  102. /* run from kstop_machine */
  103. int __init ftrace_dyn_arch_init(void *data)
  104. {
  105. ftrace_mcount_set(data);
  106. return 0;
  107. }