trace_stack.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3. *
  4. */
  5. #include <linux/stacktrace.h>
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/module.h>
  13. #include <linux/sysctl.h>
  14. #include <linux/init.h>
  15. #include <linux/fs.h>
  16. #include <asm/setup.h>
  17. #include "trace.h"
  18. #define STACK_TRACE_ENTRIES 500
  19. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  20. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  21. static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
  22. static struct stack_trace max_stack_trace = {
  23. .max_entries = STACK_TRACE_ENTRIES,
  24. .entries = stack_dump_trace,
  25. };
  26. static unsigned long max_stack_size;
  27. static arch_spinlock_t max_stack_lock =
  28. (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  29. static int stack_trace_disabled __read_mostly;
  30. static DEFINE_PER_CPU(int, trace_active);
  31. static DEFINE_MUTEX(stack_sysctl_mutex);
  32. int stack_tracer_enabled;
  33. static int last_stack_tracer_enabled;
  34. static inline void check_stack(void)
  35. {
  36. unsigned long this_size, flags;
  37. unsigned long *p, *top, *start;
  38. int i;
  39. this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
  40. this_size = THREAD_SIZE - this_size;
  41. if (this_size <= max_stack_size)
  42. return;
  43. /* we do not handle interrupt stacks yet */
  44. if (!object_is_on_stack(&this_size))
  45. return;
  46. local_irq_save(flags);
  47. arch_spin_lock(&max_stack_lock);
  48. /* a race could have already updated it */
  49. if (this_size <= max_stack_size)
  50. goto out;
  51. max_stack_size = this_size;
  52. max_stack_trace.nr_entries = 0;
  53. max_stack_trace.skip = 3;
  54. save_stack_trace(&max_stack_trace);
  55. /*
  56. * Now find where in the stack these are.
  57. */
  58. i = 0;
  59. start = &this_size;
  60. top = (unsigned long *)
  61. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  62. /*
  63. * Loop through all the entries. One of the entries may
  64. * for some reason be missed on the stack, so we may
  65. * have to account for them. If they are all there, this
  66. * loop will only happen once. This code only takes place
  67. * on a new max, so it is far from a fast path.
  68. */
  69. while (i < max_stack_trace.nr_entries) {
  70. int found = 0;
  71. stack_dump_index[i] = this_size;
  72. p = start;
  73. for (; p < top && i < max_stack_trace.nr_entries; p++) {
  74. if (*p == stack_dump_trace[i]) {
  75. this_size = stack_dump_index[i++] =
  76. (top - p) * sizeof(unsigned long);
  77. found = 1;
  78. /* Start the search from here */
  79. start = p + 1;
  80. }
  81. }
  82. if (!found)
  83. i++;
  84. }
  85. out:
  86. arch_spin_unlock(&max_stack_lock);
  87. local_irq_restore(flags);
  88. }
  89. static void
  90. stack_trace_call(unsigned long ip, unsigned long parent_ip,
  91. struct ftrace_ops *op, struct pt_regs *pt_regs)
  92. {
  93. int cpu;
  94. if (unlikely(!ftrace_enabled || stack_trace_disabled))
  95. return;
  96. preempt_disable_notrace();
  97. cpu = raw_smp_processor_id();
  98. /* no atomic needed, we only modify this variable by this cpu */
  99. if (per_cpu(trace_active, cpu)++ != 0)
  100. goto out;
  101. check_stack();
  102. out:
  103. per_cpu(trace_active, cpu)--;
  104. /* prevent recursion in schedule */
  105. preempt_enable_notrace();
  106. }
  107. static struct ftrace_ops trace_ops __read_mostly =
  108. {
  109. .func = stack_trace_call,
  110. };
  111. static ssize_t
  112. stack_max_size_read(struct file *filp, char __user *ubuf,
  113. size_t count, loff_t *ppos)
  114. {
  115. unsigned long *ptr = filp->private_data;
  116. char buf[64];
  117. int r;
  118. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  119. if (r > sizeof(buf))
  120. r = sizeof(buf);
  121. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  122. }
  123. static ssize_t
  124. stack_max_size_write(struct file *filp, const char __user *ubuf,
  125. size_t count, loff_t *ppos)
  126. {
  127. long *ptr = filp->private_data;
  128. unsigned long val, flags;
  129. int ret;
  130. int cpu;
  131. ret = kstrtoul_from_user(ubuf, count, 10, &val);
  132. if (ret)
  133. return ret;
  134. local_irq_save(flags);
  135. /*
  136. * In case we trace inside arch_spin_lock() or after (NMI),
  137. * we will cause circular lock, so we also need to increase
  138. * the percpu trace_active here.
  139. */
  140. cpu = smp_processor_id();
  141. per_cpu(trace_active, cpu)++;
  142. arch_spin_lock(&max_stack_lock);
  143. *ptr = val;
  144. arch_spin_unlock(&max_stack_lock);
  145. per_cpu(trace_active, cpu)--;
  146. local_irq_restore(flags);
  147. return count;
  148. }
  149. static const struct file_operations stack_max_size_fops = {
  150. .open = tracing_open_generic,
  151. .read = stack_max_size_read,
  152. .write = stack_max_size_write,
  153. .llseek = default_llseek,
  154. };
  155. static void *
  156. __next(struct seq_file *m, loff_t *pos)
  157. {
  158. long n = *pos - 1;
  159. if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
  160. return NULL;
  161. m->private = (void *)n;
  162. return &m->private;
  163. }
  164. static void *
  165. t_next(struct seq_file *m, void *v, loff_t *pos)
  166. {
  167. (*pos)++;
  168. return __next(m, pos);
  169. }
  170. static void *t_start(struct seq_file *m, loff_t *pos)
  171. {
  172. int cpu;
  173. local_irq_disable();
  174. cpu = smp_processor_id();
  175. per_cpu(trace_active, cpu)++;
  176. arch_spin_lock(&max_stack_lock);
  177. if (*pos == 0)
  178. return SEQ_START_TOKEN;
  179. return __next(m, pos);
  180. }
  181. static void t_stop(struct seq_file *m, void *p)
  182. {
  183. int cpu;
  184. arch_spin_unlock(&max_stack_lock);
  185. cpu = smp_processor_id();
  186. per_cpu(trace_active, cpu)--;
  187. local_irq_enable();
  188. }
  189. static int trace_lookup_stack(struct seq_file *m, long i)
  190. {
  191. unsigned long addr = stack_dump_trace[i];
  192. return seq_printf(m, "%pS\n", (void *)addr);
  193. }
  194. static void print_disabled(struct seq_file *m)
  195. {
  196. seq_puts(m, "#\n"
  197. "# Stack tracer disabled\n"
  198. "#\n"
  199. "# To enable the stack tracer, either add 'stacktrace' to the\n"
  200. "# kernel command line\n"
  201. "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
  202. "#\n");
  203. }
  204. static int t_show(struct seq_file *m, void *v)
  205. {
  206. long i;
  207. int size;
  208. if (v == SEQ_START_TOKEN) {
  209. seq_printf(m, " Depth Size Location"
  210. " (%d entries)\n"
  211. " ----- ---- --------\n",
  212. max_stack_trace.nr_entries - 1);
  213. if (!stack_tracer_enabled && !max_stack_size)
  214. print_disabled(m);
  215. return 0;
  216. }
  217. i = *(long *)v;
  218. if (i >= max_stack_trace.nr_entries ||
  219. stack_dump_trace[i] == ULONG_MAX)
  220. return 0;
  221. if (i+1 == max_stack_trace.nr_entries ||
  222. stack_dump_trace[i+1] == ULONG_MAX)
  223. size = stack_dump_index[i];
  224. else
  225. size = stack_dump_index[i] - stack_dump_index[i+1];
  226. seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
  227. trace_lookup_stack(m, i);
  228. return 0;
  229. }
  230. static const struct seq_operations stack_trace_seq_ops = {
  231. .start = t_start,
  232. .next = t_next,
  233. .stop = t_stop,
  234. .show = t_show,
  235. };
  236. static int stack_trace_open(struct inode *inode, struct file *file)
  237. {
  238. return seq_open(file, &stack_trace_seq_ops);
  239. }
  240. static const struct file_operations stack_trace_fops = {
  241. .open = stack_trace_open,
  242. .read = seq_read,
  243. .llseek = seq_lseek,
  244. .release = seq_release,
  245. };
  246. static int
  247. stack_trace_filter_open(struct inode *inode, struct file *file)
  248. {
  249. return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
  250. inode, file);
  251. }
  252. static const struct file_operations stack_trace_filter_fops = {
  253. .open = stack_trace_filter_open,
  254. .read = seq_read,
  255. .write = ftrace_filter_write,
  256. .llseek = ftrace_regex_lseek,
  257. .release = ftrace_regex_release,
  258. };
  259. int
  260. stack_trace_sysctl(struct ctl_table *table, int write,
  261. void __user *buffer, size_t *lenp,
  262. loff_t *ppos)
  263. {
  264. int ret;
  265. mutex_lock(&stack_sysctl_mutex);
  266. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  267. if (ret || !write ||
  268. (last_stack_tracer_enabled == !!stack_tracer_enabled))
  269. goto out;
  270. last_stack_tracer_enabled = !!stack_tracer_enabled;
  271. if (stack_tracer_enabled)
  272. register_ftrace_function(&trace_ops);
  273. else
  274. unregister_ftrace_function(&trace_ops);
  275. out:
  276. mutex_unlock(&stack_sysctl_mutex);
  277. return ret;
  278. }
  279. static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
  280. static __init int enable_stacktrace(char *str)
  281. {
  282. if (strncmp(str, "_filter=", 8) == 0)
  283. strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
  284. stack_tracer_enabled = 1;
  285. last_stack_tracer_enabled = 1;
  286. return 1;
  287. }
  288. __setup("stacktrace", enable_stacktrace);
  289. static __init int stack_trace_init(void)
  290. {
  291. struct dentry *d_tracer;
  292. d_tracer = tracing_init_dentry();
  293. trace_create_file("stack_max_size", 0644, d_tracer,
  294. &max_stack_size, &stack_max_size_fops);
  295. trace_create_file("stack_trace", 0444, d_tracer,
  296. NULL, &stack_trace_fops);
  297. trace_create_file("stack_trace_filter", 0444, d_tracer,
  298. NULL, &stack_trace_filter_fops);
  299. if (stack_trace_filter_buf[0])
  300. ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
  301. if (stack_tracer_enabled)
  302. register_ftrace_function(&trace_ops);
  303. return 0;
  304. }
  305. device_initcall(stack_trace_init);