trace_sysprof.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * trace stack traces
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. * Copyright (C) 2004, 2005, Soeren Sandmann
  7. */
  8. #include <linux/kallsyms.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/hrtimer.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/module.h>
  14. #include <linux/irq.h>
  15. #include <linux/fs.h>
  16. #include <asm/stacktrace.h>
  17. #include "trace.h"
  18. static struct trace_array *sysprof_trace;
  19. static int __read_mostly tracer_enabled;
  20. /*
  21. * 1 msec sample interval by default:
  22. */
  23. static unsigned long sample_period = 1000000;
  24. static const unsigned int sample_max_depth = 512;
  25. static DEFINE_MUTEX(sample_timer_lock);
  26. /*
  27. * Per CPU hrtimers that do the profiling:
  28. */
  29. static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
  30. struct stack_frame {
  31. const void __user *next_fp;
  32. unsigned long return_address;
  33. };
  34. static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
  35. {
  36. int ret;
  37. if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  38. return 0;
  39. ret = 1;
  40. pagefault_disable();
  41. if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
  42. ret = 0;
  43. pagefault_enable();
  44. return ret;
  45. }
  46. struct backtrace_info {
  47. struct trace_array_cpu *data;
  48. struct trace_array *tr;
  49. int pos;
  50. };
  51. static void
  52. backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
  53. {
  54. /* Ignore warnings */
  55. }
  56. static void backtrace_warning(void *data, char *msg)
  57. {
  58. /* Ignore warnings */
  59. }
  60. static int backtrace_stack(void *data, char *name)
  61. {
  62. /* Don't bother with IRQ stacks for now */
  63. return -1;
  64. }
  65. static void backtrace_address(void *data, unsigned long addr, int reliable)
  66. {
  67. struct backtrace_info *info = data;
  68. if (info->pos < sample_max_depth && reliable) {
  69. __trace_special(info->tr, info->data, 1, addr, 0);
  70. info->pos++;
  71. }
  72. }
  73. const static struct stacktrace_ops backtrace_ops = {
  74. .warning = backtrace_warning,
  75. .warning_symbol = backtrace_warning_symbol,
  76. .stack = backtrace_stack,
  77. .address = backtrace_address,
  78. };
  79. static struct pt_regs *
  80. trace_kernel(struct pt_regs *regs, struct trace_array *tr,
  81. struct trace_array_cpu *data)
  82. {
  83. struct backtrace_info info;
  84. unsigned long bp;
  85. char *user_stack;
  86. char *stack;
  87. info.tr = tr;
  88. info.data = data;
  89. info.pos = 1;
  90. __trace_special(info.tr, info.data, 1, regs->ip, 0);
  91. stack = ((char *)regs + sizeof(struct pt_regs));
  92. #ifdef CONFIG_FRAME_POINTER
  93. bp = regs->bp;
  94. #else
  95. bp = 0;
  96. #endif
  97. dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
  98. /* Now trace the user stack */
  99. user_stack = ((char *)current->thread.sp0 - sizeof(struct pt_regs));
  100. return (struct pt_regs *)user_stack;
  101. }
  102. static void timer_notify(struct pt_regs *regs, int cpu)
  103. {
  104. struct trace_array_cpu *data;
  105. struct stack_frame frame;
  106. struct trace_array *tr;
  107. const void __user *fp;
  108. int is_user;
  109. int i;
  110. if (!regs)
  111. return;
  112. tr = sysprof_trace;
  113. data = tr->data[cpu];
  114. is_user = user_mode(regs);
  115. if (!current || current->pid == 0)
  116. return;
  117. if (is_user && current->state != TASK_RUNNING)
  118. return;
  119. __trace_special(tr, data, 0, 0, current->pid);
  120. if (!is_user)
  121. regs = trace_kernel(regs, tr, data);
  122. fp = (void __user *)regs->bp;
  123. __trace_special(tr, data, 2, regs->ip, 0);
  124. for (i = 0; i < sample_max_depth; i++) {
  125. frame.next_fp = 0;
  126. frame.return_address = 0;
  127. if (!copy_stack_frame(fp, &frame))
  128. break;
  129. if ((unsigned long)fp < regs->sp)
  130. break;
  131. __trace_special(tr, data, 2, frame.return_address,
  132. (unsigned long)fp);
  133. fp = frame.next_fp;
  134. }
  135. __trace_special(tr, data, 3, current->pid, i);
  136. /*
  137. * Special trace entry if we overflow the max depth:
  138. */
  139. if (i == sample_max_depth)
  140. __trace_special(tr, data, -1, -1, -1);
  141. }
  142. static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
  143. {
  144. /* trace here */
  145. timer_notify(get_irq_regs(), smp_processor_id());
  146. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  147. return HRTIMER_RESTART;
  148. }
  149. static void start_stack_timer(int cpu)
  150. {
  151. struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
  152. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  153. hrtimer->function = stack_trace_timer_fn;
  154. hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
  155. hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
  156. }
  157. static void start_stack_timers(void)
  158. {
  159. cpumask_t saved_mask = current->cpus_allowed;
  160. int cpu;
  161. for_each_online_cpu(cpu) {
  162. set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  163. start_stack_timer(cpu);
  164. }
  165. set_cpus_allowed_ptr(current, &saved_mask);
  166. }
  167. static void stop_stack_timer(int cpu)
  168. {
  169. struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
  170. hrtimer_cancel(hrtimer);
  171. }
  172. static void stop_stack_timers(void)
  173. {
  174. int cpu;
  175. for_each_online_cpu(cpu)
  176. stop_stack_timer(cpu);
  177. }
  178. static void stack_reset(struct trace_array *tr)
  179. {
  180. int cpu;
  181. tr->time_start = ftrace_now(tr->cpu);
  182. for_each_online_cpu(cpu)
  183. tracing_reset(tr->data[cpu]);
  184. }
  185. static void start_stack_trace(struct trace_array *tr)
  186. {
  187. mutex_lock(&sample_timer_lock);
  188. stack_reset(tr);
  189. start_stack_timers();
  190. tracer_enabled = 1;
  191. mutex_unlock(&sample_timer_lock);
  192. }
  193. static void stop_stack_trace(struct trace_array *tr)
  194. {
  195. mutex_lock(&sample_timer_lock);
  196. stop_stack_timers();
  197. tracer_enabled = 0;
  198. mutex_unlock(&sample_timer_lock);
  199. }
  200. static void stack_trace_init(struct trace_array *tr)
  201. {
  202. sysprof_trace = tr;
  203. if (tr->ctrl)
  204. start_stack_trace(tr);
  205. }
  206. static void stack_trace_reset(struct trace_array *tr)
  207. {
  208. if (tr->ctrl)
  209. stop_stack_trace(tr);
  210. }
  211. static void stack_trace_ctrl_update(struct trace_array *tr)
  212. {
  213. /* When starting a new trace, reset the buffers */
  214. if (tr->ctrl)
  215. start_stack_trace(tr);
  216. else
  217. stop_stack_trace(tr);
  218. }
  219. static struct tracer stack_trace __read_mostly =
  220. {
  221. .name = "sysprof",
  222. .init = stack_trace_init,
  223. .reset = stack_trace_reset,
  224. .ctrl_update = stack_trace_ctrl_update,
  225. #ifdef CONFIG_FTRACE_SELFTEST
  226. .selftest = trace_selftest_startup_sysprof,
  227. #endif
  228. };
  229. __init static int init_stack_trace(void)
  230. {
  231. return register_tracer(&stack_trace);
  232. }
  233. device_initcall(init_stack_trace);
  234. #define MAX_LONG_DIGITS 22
  235. static ssize_t
  236. sysprof_sample_read(struct file *filp, char __user *ubuf,
  237. size_t cnt, loff_t *ppos)
  238. {
  239. char buf[MAX_LONG_DIGITS];
  240. int r;
  241. r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period));
  242. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  243. }
  244. static ssize_t
  245. sysprof_sample_write(struct file *filp, const char __user *ubuf,
  246. size_t cnt, loff_t *ppos)
  247. {
  248. char buf[MAX_LONG_DIGITS];
  249. unsigned long val;
  250. if (cnt > MAX_LONG_DIGITS-1)
  251. cnt = MAX_LONG_DIGITS-1;
  252. if (copy_from_user(&buf, ubuf, cnt))
  253. return -EFAULT;
  254. buf[cnt] = 0;
  255. val = simple_strtoul(buf, NULL, 10);
  256. /*
  257. * Enforce a minimum sample period of 100 usecs:
  258. */
  259. if (val < 100)
  260. val = 100;
  261. mutex_lock(&sample_timer_lock);
  262. stop_stack_timers();
  263. sample_period = val * 1000;
  264. start_stack_timers();
  265. mutex_unlock(&sample_timer_lock);
  266. return cnt;
  267. }
  268. static struct file_operations sysprof_sample_fops = {
  269. .read = sysprof_sample_read,
  270. .write = sysprof_sample_write,
  271. };
  272. void init_tracer_sysprof_debugfs(struct dentry *d_tracer)
  273. {
  274. struct dentry *entry;
  275. entry = debugfs_create_file("sysprof_sample_period", 0644,
  276. d_tracer, NULL, &sysprof_sample_fops);
  277. if (entry)
  278. return;
  279. pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n");
  280. }