ftrace.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. *
  6. * For licencing details, see COPYING.
  7. *
  8. * Defines low-level handling of mcount calls when the kernel
  9. * is compiled with the -pg flag. When using dynamic ftrace, the
  10. * mcount call-sites get patched lazily with NOP till they are
  11. * enabled. All code mutation routines here take effect atomically.
  12. */
  13. #include <linux/ftrace.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/ftrace.h>
  16. #define PC_OFFSET 8
  17. #define BL_OPCODE 0xeb000000
  18. #define BL_OFFSET_MASK 0x00ffffff
  19. static unsigned long bl_insn;
  20. static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
  21. unsigned char *ftrace_nop_replace(void)
  22. {
  23. return (char *)&NOP;
  24. }
  25. /* construct a branch (BL) instruction to addr */
  26. unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
  27. {
  28. long offset;
  29. offset = (long)addr - (long)(pc + PC_OFFSET);
  30. if (unlikely(offset < -33554432 || offset > 33554428)) {
  31. /* Can't generate branches that far (from ARM ARM). Ftrace
  32. * doesn't generate branches outside of kernel text.
  33. */
  34. WARN_ON_ONCE(1);
  35. return NULL;
  36. }
  37. offset = (offset >> 2) & BL_OFFSET_MASK;
  38. bl_insn = BL_OPCODE | offset;
  39. return (unsigned char *)&bl_insn;
  40. }
  41. int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
  42. unsigned char *new_code)
  43. {
  44. unsigned long err = 0, replaced = 0, old, new;
  45. old = *(unsigned long *)old_code;
  46. new = *(unsigned long *)new_code;
  47. __asm__ __volatile__ (
  48. "1: ldr %1, [%2] \n"
  49. " cmp %1, %4 \n"
  50. "2: streq %3, [%2] \n"
  51. " cmpne %1, %3 \n"
  52. " movne %0, #2 \n"
  53. "3:\n"
  54. ".section .fixup, \"ax\"\n"
  55. "4: mov %0, #1 \n"
  56. " b 3b \n"
  57. ".previous\n"
  58. ".section __ex_table, \"a\"\n"
  59. " .long 1b, 4b \n"
  60. " .long 2b, 4b \n"
  61. ".previous\n"
  62. : "=r"(err), "=r"(replaced)
  63. : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
  64. : "memory");
  65. if (!err && (replaced == old))
  66. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  67. return err;
  68. }
  69. int ftrace_update_ftrace_func(ftrace_func_t func)
  70. {
  71. int ret;
  72. unsigned long pc, old;
  73. unsigned char *new;
  74. pc = (unsigned long)&ftrace_call;
  75. memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
  76. new = ftrace_call_replace(pc, (unsigned long)func);
  77. ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
  78. return ret;
  79. }
  80. /* run from ftrace_init with irqs disabled */
  81. int __init ftrace_dyn_arch_init(void *data)
  82. {
  83. ftrace_mcount_set(data);
  84. return 0;
  85. }