trace_stack.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3. *
  4. */
  5. #include <linux/stacktrace.h>
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/fs.h>
  15. #include "trace.h"
  16. #define STACK_TRACE_ENTRIES 500
  17. static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  18. { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  19. static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
  20. static struct stack_trace max_stack_trace = {
  21. .max_entries = STACK_TRACE_ENTRIES,
  22. .entries = stack_dump_trace,
  23. };
  24. static unsigned long max_stack_size;
  25. static raw_spinlock_t max_stack_lock =
  26. (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
  27. static int stack_trace_disabled __read_mostly;
  28. static DEFINE_PER_CPU(int, trace_active);
  29. static inline void check_stack(void)
  30. {
  31. unsigned long this_size, flags;
  32. unsigned long *p, *top, *start;
  33. int i;
  34. this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
  35. this_size = THREAD_SIZE - this_size;
  36. if (this_size <= max_stack_size)
  37. return;
  38. raw_local_irq_save(flags);
  39. __raw_spin_lock(&max_stack_lock);
  40. /* a race could have already updated it */
  41. if (this_size <= max_stack_size)
  42. goto out;
  43. max_stack_size = this_size;
  44. max_stack_trace.nr_entries = 0;
  45. max_stack_trace.skip = 3;
  46. save_stack_trace(&max_stack_trace);
  47. /*
  48. * Now find where in the stack these are.
  49. */
  50. i = 0;
  51. start = &this_size;
  52. top = (unsigned long *)
  53. (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  54. /*
  55. * Loop through all the entries. One of the entries may
  56. * for some reason be missed on the stack, so we may
  57. * have to account for them. If they are all there, this
  58. * loop will only happen once. This code only takes place
  59. * on a new max, so it is far from a fast path.
  60. */
  61. while (i < max_stack_trace.nr_entries) {
  62. stack_dump_index[i] = this_size;
  63. p = start;
  64. for (; p < top && i < max_stack_trace.nr_entries; p++) {
  65. if (*p == stack_dump_trace[i]) {
  66. this_size = stack_dump_index[i++] =
  67. (top - p) * sizeof(unsigned long);
  68. /* Start the search from here */
  69. start = p + 1;
  70. }
  71. }
  72. i++;
  73. }
  74. out:
  75. __raw_spin_unlock(&max_stack_lock);
  76. raw_local_irq_restore(flags);
  77. }
  78. static void
  79. stack_trace_call(unsigned long ip, unsigned long parent_ip)
  80. {
  81. int cpu, resched;
  82. if (unlikely(!ftrace_enabled || stack_trace_disabled))
  83. return;
  84. resched = need_resched();
  85. preempt_disable_notrace();
  86. cpu = raw_smp_processor_id();
  87. /* no atomic needed, we only modify this variable by this cpu */
  88. if (per_cpu(trace_active, cpu)++ != 0)
  89. goto out;
  90. check_stack();
  91. out:
  92. per_cpu(trace_active, cpu)--;
  93. /* prevent recursion in schedule */
  94. if (resched)
  95. preempt_enable_no_resched_notrace();
  96. else
  97. preempt_enable_notrace();
  98. }
  99. static struct ftrace_ops trace_ops __read_mostly =
  100. {
  101. .func = stack_trace_call,
  102. };
  103. static ssize_t
  104. stack_max_size_read(struct file *filp, char __user *ubuf,
  105. size_t count, loff_t *ppos)
  106. {
  107. unsigned long *ptr = filp->private_data;
  108. char buf[64];
  109. int r;
  110. r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
  111. if (r > sizeof(buf))
  112. r = sizeof(buf);
  113. return simple_read_from_buffer(ubuf, count, ppos, buf, r);
  114. }
  115. static ssize_t
  116. stack_max_size_write(struct file *filp, const char __user *ubuf,
  117. size_t count, loff_t *ppos)
  118. {
  119. long *ptr = filp->private_data;
  120. unsigned long val, flags;
  121. char buf[64];
  122. int ret;
  123. if (count >= sizeof(buf))
  124. return -EINVAL;
  125. if (copy_from_user(&buf, ubuf, count))
  126. return -EFAULT;
  127. buf[count] = 0;
  128. ret = strict_strtoul(buf, 10, &val);
  129. if (ret < 0)
  130. return ret;
  131. raw_local_irq_save(flags);
  132. __raw_spin_lock(&max_stack_lock);
  133. *ptr = val;
  134. __raw_spin_unlock(&max_stack_lock);
  135. raw_local_irq_restore(flags);
  136. return count;
  137. }
  138. static struct file_operations stack_max_size_fops = {
  139. .open = tracing_open_generic,
  140. .read = stack_max_size_read,
  141. .write = stack_max_size_write,
  142. };
  143. static void *
  144. t_next(struct seq_file *m, void *v, loff_t *pos)
  145. {
  146. long i = (long)m->private;
  147. (*pos)++;
  148. i++;
  149. if (i >= max_stack_trace.nr_entries ||
  150. stack_dump_trace[i] == ULONG_MAX)
  151. return NULL;
  152. m->private = (void *)i;
  153. return &m->private;
  154. }
  155. static void *t_start(struct seq_file *m, loff_t *pos)
  156. {
  157. void *t = &m->private;
  158. loff_t l = 0;
  159. local_irq_disable();
  160. __raw_spin_lock(&max_stack_lock);
  161. for (; t && l < *pos; t = t_next(m, t, &l))
  162. ;
  163. return t;
  164. }
  165. static void t_stop(struct seq_file *m, void *p)
  166. {
  167. __raw_spin_unlock(&max_stack_lock);
  168. local_irq_enable();
  169. }
  170. static int trace_lookup_stack(struct seq_file *m, long i)
  171. {
  172. unsigned long addr = stack_dump_trace[i];
  173. #ifdef CONFIG_KALLSYMS
  174. char str[KSYM_SYMBOL_LEN];
  175. sprint_symbol(str, addr);
  176. return seq_printf(m, "%s\n", str);
  177. #else
  178. return seq_printf(m, "%p\n", (void*)addr);
  179. #endif
  180. }
  181. static int t_show(struct seq_file *m, void *v)
  182. {
  183. long i = *(long *)v;
  184. int size;
  185. if (i < 0) {
  186. seq_printf(m, " Depth Size Location"
  187. " (%d entries)\n"
  188. " ----- ---- --------\n",
  189. max_stack_trace.nr_entries);
  190. return 0;
  191. }
  192. if (i >= max_stack_trace.nr_entries ||
  193. stack_dump_trace[i] == ULONG_MAX)
  194. return 0;
  195. if (i+1 == max_stack_trace.nr_entries ||
  196. stack_dump_trace[i+1] == ULONG_MAX)
  197. size = stack_dump_index[i];
  198. else
  199. size = stack_dump_index[i] - stack_dump_index[i+1];
  200. seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
  201. trace_lookup_stack(m, i);
  202. return 0;
  203. }
  204. static struct seq_operations stack_trace_seq_ops = {
  205. .start = t_start,
  206. .next = t_next,
  207. .stop = t_stop,
  208. .show = t_show,
  209. };
  210. static int stack_trace_open(struct inode *inode, struct file *file)
  211. {
  212. int ret;
  213. ret = seq_open(file, &stack_trace_seq_ops);
  214. if (!ret) {
  215. struct seq_file *m = file->private_data;
  216. m->private = (void *)-1;
  217. }
  218. return ret;
  219. }
  220. static struct file_operations stack_trace_fops = {
  221. .open = stack_trace_open,
  222. .read = seq_read,
  223. .llseek = seq_lseek,
  224. };
  225. static __init int stack_trace_init(void)
  226. {
  227. struct dentry *d_tracer;
  228. struct dentry *entry;
  229. d_tracer = tracing_init_dentry();
  230. entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
  231. &max_stack_size, &stack_max_size_fops);
  232. if (!entry)
  233. pr_warning("Could not create debugfs 'stack_max_size' entry\n");
  234. entry = debugfs_create_file("stack_trace", 0444, d_tracer,
  235. NULL, &stack_trace_fops);
  236. if (!entry)
  237. pr_warning("Could not create debugfs 'stack_trace' entry\n");
  238. register_ftrace_function(&trace_ops);
  239. return 0;
  240. }
  241. device_initcall(stack_trace_init);