ftrace.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes to Ingo Molnar, for suggesting the idea.
  7. * Mathieu Desnoyers, for suggesting postponing the modifications.
  8. * Arjan van de Ven, for keeping me straight, and explaining to me
  9. * the dangers of modifying code on the run.
  10. */
  11. #include <linux/spinlock.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/percpu.h>
  15. #include <linux/init.h>
  16. #include <linux/list.h>
  17. #include <asm/alternative.h>
  18. #define CALL_BACK 5
  19. /* Long is fine, even if it is only 4 bytes ;-) */
  20. static long *ftrace_nop;
  21. struct ftrace_record {
  22. struct dyn_ftrace rec;
  23. int failed;
  24. } __attribute__((packed));
  25. struct ftrace_page {
  26. struct ftrace_page *next;
  27. int index;
  28. struct ftrace_record records[];
  29. } __attribute__((packed));
  30. #define ENTRIES_PER_PAGE \
  31. ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
  32. /* estimate from running different kernels */
  33. #define NR_TO_INIT 10000
  34. #define MCOUNT_ADDR ((long)(&mcount))
  35. union ftrace_code_union {
  36. char code[5];
  37. struct {
  38. char e8;
  39. int offset;
  40. } __attribute__((packed));
  41. };
  42. static struct ftrace_page *ftrace_pages_start;
  43. static struct ftrace_page *ftrace_pages;
  44. notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
  45. {
  46. struct ftrace_record *rec;
  47. unsigned long save;
  48. ip -= CALL_BACK;
  49. save = *(long *)ip;
  50. /* If this was already converted, skip it */
  51. if (save == *ftrace_nop)
  52. return NULL;
  53. if (ftrace_pages->index == ENTRIES_PER_PAGE) {
  54. if (!ftrace_pages->next)
  55. return NULL;
  56. ftrace_pages = ftrace_pages->next;
  57. }
  58. rec = &ftrace_pages->records[ftrace_pages->index++];
  59. return &rec->rec;
  60. }
  61. static int notrace
  62. ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  63. unsigned char *new_code)
  64. {
  65. unsigned replaced;
  66. unsigned old = *(unsigned *)old_code; /* 4 bytes */
  67. unsigned new = *(unsigned *)new_code; /* 4 bytes */
  68. unsigned char newch = new_code[4];
  69. int faulted = 0;
  70. /*
  71. * Note: Due to modules and __init, code can
  72. * disappear and change, we need to protect against faulting
  73. * as well as code changing.
  74. *
  75. * No real locking needed, this code is run through
  76. * kstop_machine.
  77. */
  78. asm volatile (
  79. "1: lock\n"
  80. " cmpxchg %3, (%2)\n"
  81. " jnz 2f\n"
  82. " movb %b4, 4(%2)\n"
  83. "2:\n"
  84. ".section .fixup, \"ax\"\n"
  85. " movl $1, %0\n"
  86. "3: jmp 2b\n"
  87. ".previous\n"
  88. _ASM_EXTABLE(1b, 3b)
  89. : "=r"(faulted), "=a"(replaced)
  90. : "r"(ip), "r"(new), "r"(newch),
  91. "0"(faulted), "a"(old)
  92. : "memory");
  93. sync_core();
  94. if (replaced != old && replaced != new)
  95. faulted = 2;
  96. return faulted;
  97. }
  98. static int notrace ftrace_calc_offset(long ip)
  99. {
  100. return (int)(MCOUNT_ADDR - ip);
  101. }
  102. notrace void ftrace_code_disable(struct dyn_ftrace *rec)
  103. {
  104. unsigned long ip;
  105. union ftrace_code_union save;
  106. struct ftrace_record *r =
  107. container_of(rec, struct ftrace_record, rec);
  108. ip = rec->ip;
  109. save.e8 = 0xe8;
  110. save.offset = ftrace_calc_offset(ip);
  111. /* move the IP back to the start of the call */
  112. ip -= CALL_BACK;
  113. r->failed = ftrace_modify_code(ip, save.code, (char *)ftrace_nop);
  114. }
  115. static void notrace ftrace_replace_code(int saved)
  116. {
  117. unsigned char *new = NULL, *old = NULL;
  118. struct ftrace_record *rec;
  119. struct ftrace_page *pg;
  120. unsigned long ip;
  121. int i;
  122. if (saved)
  123. old = (char *)ftrace_nop;
  124. else
  125. new = (char *)ftrace_nop;
  126. for (pg = ftrace_pages_start; pg; pg = pg->next) {
  127. for (i = 0; i < pg->index; i++) {
  128. union ftrace_code_union calc;
  129. rec = &pg->records[i];
  130. /* don't modify code that has already faulted */
  131. if (rec->failed)
  132. continue;
  133. ip = rec->rec.ip;
  134. calc.e8 = 0xe8;
  135. calc.offset = ftrace_calc_offset(ip);
  136. if (saved)
  137. new = calc.code;
  138. else
  139. old = calc.code;
  140. ip -= CALL_BACK;
  141. rec->failed = ftrace_modify_code(ip, old, new);
  142. }
  143. }
  144. }
  145. notrace void ftrace_startup_code(void)
  146. {
  147. ftrace_replace_code(1);
  148. }
  149. notrace void ftrace_shutdown_code(void)
  150. {
  151. ftrace_replace_code(0);
  152. }
  153. notrace void ftrace_shutdown_replenish(void)
  154. {
  155. if (ftrace_pages->next)
  156. return;
  157. /* allocate another page */
  158. ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
  159. }
  160. notrace int __init ftrace_shutdown_arch_init(void)
  161. {
  162. const unsigned char *const *noptable = find_nop_table();
  163. struct ftrace_page *pg;
  164. int cnt;
  165. int i;
  166. ftrace_nop = (unsigned long *)noptable[CALL_BACK];
  167. /* allocate a few pages */
  168. ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
  169. if (!ftrace_pages_start)
  170. return -1;
  171. /*
  172. * Allocate a few more pages.
  173. *
  174. * TODO: have some parser search vmlinux before
  175. * final linking to find all calls to ftrace.
  176. * Then we can:
  177. * a) know how many pages to allocate.
  178. * and/or
  179. * b) set up the table then.
  180. *
  181. * The dynamic code is still necessary for
  182. * modules.
  183. */
  184. pg = ftrace_pages = ftrace_pages_start;
  185. cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
  186. for (i = 0; i < cnt; i++) {
  187. pg->next = (void *)get_zeroed_page(GFP_KERNEL);
  188. /* If we fail, we'll try later anyway */
  189. if (!pg->next)
  190. break;
  191. pg = pg->next;
  192. }
  193. return 0;
  194. }