lockdep_proc.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /*
  2. * kernel/lockdep_proc.c
  3. *
  4. * Runtime locking correctness validator
  5. *
  6. * Started by Ingo Molnar:
  7. *
  8. * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9. *
  10. * Code for /proc/lockdep and /proc/lockdep_stats:
  11. *
  12. */
  13. #include <linux/sched.h>
  14. #include <linux/module.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/kallsyms.h>
  18. #include <linux/debug_locks.h>
  19. #include "lockdep_internals.h"
  20. static void *l_next(struct seq_file *m, void *v, loff_t *pos)
  21. {
  22. struct lock_class *class = v;
  23. (*pos)++;
  24. if (class->lock_entry.next != &all_lock_classes)
  25. class = list_entry(class->lock_entry.next, struct lock_class,
  26. lock_entry);
  27. else
  28. class = NULL;
  29. m->private = class;
  30. return class;
  31. }
  32. static void *l_start(struct seq_file *m, loff_t *pos)
  33. {
  34. struct lock_class *class = m->private;
  35. if (&class->lock_entry == all_lock_classes.next)
  36. seq_printf(m, "all lock classes:\n");
  37. return class;
  38. }
  39. static void l_stop(struct seq_file *m, void *v)
  40. {
  41. }
  42. static unsigned long count_forward_deps(struct lock_class *class)
  43. {
  44. struct lock_list *entry;
  45. unsigned long ret = 1;
  46. /*
  47. * Recurse this class's dependency list:
  48. */
  49. list_for_each_entry(entry, &class->locks_after, entry)
  50. ret += count_forward_deps(entry->class);
  51. return ret;
  52. }
  53. static unsigned long count_backward_deps(struct lock_class *class)
  54. {
  55. struct lock_list *entry;
  56. unsigned long ret = 1;
  57. /*
  58. * Recurse this class's dependency list:
  59. */
  60. list_for_each_entry(entry, &class->locks_before, entry)
  61. ret += count_backward_deps(entry->class);
  62. return ret;
  63. }
  64. static int l_show(struct seq_file *m, void *v)
  65. {
  66. unsigned long nr_forward_deps, nr_backward_deps;
  67. struct lock_class *class = m->private;
  68. char str[128], c1, c2, c3, c4;
  69. const char *name;
  70. seq_printf(m, "%p", class->key);
  71. #ifdef CONFIG_DEBUG_LOCKDEP
  72. seq_printf(m, " OPS:%8ld", class->ops);
  73. #endif
  74. nr_forward_deps = count_forward_deps(class);
  75. seq_printf(m, " FD:%5ld", nr_forward_deps);
  76. nr_backward_deps = count_backward_deps(class);
  77. seq_printf(m, " BD:%5ld", nr_backward_deps);
  78. get_usage_chars(class, &c1, &c2, &c3, &c4);
  79. seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
  80. name = class->name;
  81. if (!name) {
  82. name = __get_key_name(class->key, str);
  83. seq_printf(m, ": %s", name);
  84. } else{
  85. seq_printf(m, ": %s", name);
  86. if (class->name_version > 1)
  87. seq_printf(m, "#%d", class->name_version);
  88. if (class->subclass)
  89. seq_printf(m, "/%d", class->subclass);
  90. }
  91. seq_puts(m, "\n");
  92. return 0;
  93. }
  94. static struct seq_operations lockdep_ops = {
  95. .start = l_start,
  96. .next = l_next,
  97. .stop = l_stop,
  98. .show = l_show,
  99. };
  100. static int lockdep_open(struct inode *inode, struct file *file)
  101. {
  102. int res = seq_open(file, &lockdep_ops);
  103. if (!res) {
  104. struct seq_file *m = file->private_data;
  105. if (!list_empty(&all_lock_classes))
  106. m->private = list_entry(all_lock_classes.next,
  107. struct lock_class, lock_entry);
  108. else
  109. m->private = NULL;
  110. }
  111. return res;
  112. }
  113. static struct file_operations proc_lockdep_operations = {
  114. .open = lockdep_open,
  115. .read = seq_read,
  116. .llseek = seq_lseek,
  117. .release = seq_release,
  118. };
  119. static void lockdep_stats_debug_show(struct seq_file *m)
  120. {
  121. #ifdef CONFIG_DEBUG_LOCKDEP
  122. unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
  123. hi2 = debug_atomic_read(&hardirqs_off_events),
  124. hr1 = debug_atomic_read(&redundant_hardirqs_on),
  125. hr2 = debug_atomic_read(&redundant_hardirqs_off),
  126. si1 = debug_atomic_read(&softirqs_on_events),
  127. si2 = debug_atomic_read(&softirqs_off_events),
  128. sr1 = debug_atomic_read(&redundant_softirqs_on),
  129. sr2 = debug_atomic_read(&redundant_softirqs_off);
  130. seq_printf(m, " chain lookup misses: %11u\n",
  131. debug_atomic_read(&chain_lookup_misses));
  132. seq_printf(m, " chain lookup hits: %11u\n",
  133. debug_atomic_read(&chain_lookup_hits));
  134. seq_printf(m, " cyclic checks: %11u\n",
  135. debug_atomic_read(&nr_cyclic_checks));
  136. seq_printf(m, " cyclic-check recursions: %11u\n",
  137. debug_atomic_read(&nr_cyclic_check_recursions));
  138. seq_printf(m, " find-mask forwards checks: %11u\n",
  139. debug_atomic_read(&nr_find_usage_forwards_checks));
  140. seq_printf(m, " find-mask forwards recursions: %11u\n",
  141. debug_atomic_read(&nr_find_usage_forwards_recursions));
  142. seq_printf(m, " find-mask backwards checks: %11u\n",
  143. debug_atomic_read(&nr_find_usage_backwards_checks));
  144. seq_printf(m, " find-mask backwards recursions:%11u\n",
  145. debug_atomic_read(&nr_find_usage_backwards_recursions));
  146. seq_printf(m, " hardirq on events: %11u\n", hi1);
  147. seq_printf(m, " hardirq off events: %11u\n", hi2);
  148. seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
  149. seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
  150. seq_printf(m, " softirq on events: %11u\n", si1);
  151. seq_printf(m, " softirq off events: %11u\n", si2);
  152. seq_printf(m, " redundant softirq ons: %11u\n", sr1);
  153. seq_printf(m, " redundant softirq offs: %11u\n", sr2);
  154. #endif
  155. }
  156. static int lockdep_stats_show(struct seq_file *m, void *v)
  157. {
  158. struct lock_class *class;
  159. unsigned long nr_unused = 0, nr_uncategorized = 0,
  160. nr_irq_safe = 0, nr_irq_unsafe = 0,
  161. nr_softirq_safe = 0, nr_softirq_unsafe = 0,
  162. nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
  163. nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
  164. nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
  165. nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
  166. sum_forward_deps = 0, factor = 0;
  167. list_for_each_entry(class, &all_lock_classes, lock_entry) {
  168. if (class->usage_mask == 0)
  169. nr_unused++;
  170. if (class->usage_mask == LOCKF_USED)
  171. nr_uncategorized++;
  172. if (class->usage_mask & LOCKF_USED_IN_IRQ)
  173. nr_irq_safe++;
  174. if (class->usage_mask & LOCKF_ENABLED_IRQS)
  175. nr_irq_unsafe++;
  176. if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
  177. nr_softirq_safe++;
  178. if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
  179. nr_softirq_unsafe++;
  180. if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
  181. nr_hardirq_safe++;
  182. if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
  183. nr_hardirq_unsafe++;
  184. if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
  185. nr_irq_read_safe++;
  186. if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
  187. nr_irq_read_unsafe++;
  188. if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
  189. nr_softirq_read_safe++;
  190. if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
  191. nr_softirq_read_unsafe++;
  192. if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
  193. nr_hardirq_read_safe++;
  194. if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
  195. nr_hardirq_read_unsafe++;
  196. sum_forward_deps += count_forward_deps(class);
  197. }
  198. #ifdef CONFIG_LOCKDEP_DEBUG
  199. DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
  200. #endif
  201. seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
  202. nr_lock_classes, MAX_LOCKDEP_KEYS);
  203. seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
  204. nr_list_entries, MAX_LOCKDEP_ENTRIES);
  205. seq_printf(m, " indirect dependencies: %11lu\n",
  206. sum_forward_deps);
  207. /*
  208. * Total number of dependencies:
  209. *
  210. * All irq-safe locks may nest inside irq-unsafe locks,
  211. * plus all the other known dependencies:
  212. */
  213. seq_printf(m, " all direct dependencies: %11lu\n",
  214. nr_irq_unsafe * nr_irq_safe +
  215. nr_hardirq_unsafe * nr_hardirq_safe +
  216. nr_list_entries);
  217. /*
  218. * Estimated factor between direct and indirect
  219. * dependencies:
  220. */
  221. if (nr_list_entries)
  222. factor = sum_forward_deps / nr_list_entries;
  223. seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
  224. nr_lock_chains, MAX_LOCKDEP_CHAINS);
  225. #ifdef CONFIG_TRACE_IRQFLAGS
  226. seq_printf(m, " in-hardirq chains: %11u\n",
  227. nr_hardirq_chains);
  228. seq_printf(m, " in-softirq chains: %11u\n",
  229. nr_softirq_chains);
  230. #endif
  231. seq_printf(m, " in-process chains: %11u\n",
  232. nr_process_chains);
  233. seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
  234. nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
  235. seq_printf(m, " combined max dependencies: %11u\n",
  236. (nr_hardirq_chains + 1) *
  237. (nr_softirq_chains + 1) *
  238. (nr_process_chains + 1)
  239. );
  240. seq_printf(m, " hardirq-safe locks: %11lu\n",
  241. nr_hardirq_safe);
  242. seq_printf(m, " hardirq-unsafe locks: %11lu\n",
  243. nr_hardirq_unsafe);
  244. seq_printf(m, " softirq-safe locks: %11lu\n",
  245. nr_softirq_safe);
  246. seq_printf(m, " softirq-unsafe locks: %11lu\n",
  247. nr_softirq_unsafe);
  248. seq_printf(m, " irq-safe locks: %11lu\n",
  249. nr_irq_safe);
  250. seq_printf(m, " irq-unsafe locks: %11lu\n",
  251. nr_irq_unsafe);
  252. seq_printf(m, " hardirq-read-safe locks: %11lu\n",
  253. nr_hardirq_read_safe);
  254. seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
  255. nr_hardirq_read_unsafe);
  256. seq_printf(m, " softirq-read-safe locks: %11lu\n",
  257. nr_softirq_read_safe);
  258. seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
  259. nr_softirq_read_unsafe);
  260. seq_printf(m, " irq-read-safe locks: %11lu\n",
  261. nr_irq_read_safe);
  262. seq_printf(m, " irq-read-unsafe locks: %11lu\n",
  263. nr_irq_read_unsafe);
  264. seq_printf(m, " uncategorized locks: %11lu\n",
  265. nr_uncategorized);
  266. seq_printf(m, " unused locks: %11lu\n",
  267. nr_unused);
  268. seq_printf(m, " max locking depth: %11u\n",
  269. max_lockdep_depth);
  270. seq_printf(m, " max recursion depth: %11u\n",
  271. max_recursion_depth);
  272. lockdep_stats_debug_show(m);
  273. seq_printf(m, " debug_locks: %11u\n",
  274. debug_locks);
  275. return 0;
  276. }
  277. static int lockdep_stats_open(struct inode *inode, struct file *file)
  278. {
  279. return single_open(file, lockdep_stats_show, NULL);
  280. }
  281. static struct file_operations proc_lockdep_stats_operations = {
  282. .open = lockdep_stats_open,
  283. .read = seq_read,
  284. .llseek = seq_lseek,
  285. .release = seq_release,
  286. };
  287. static int __init lockdep_proc_init(void)
  288. {
  289. struct proc_dir_entry *entry;
  290. entry = create_proc_entry("lockdep", S_IRUSR, NULL);
  291. if (entry)
  292. entry->proc_fops = &proc_lockdep_operations;
  293. entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
  294. if (entry)
  295. entry->proc_fops = &proc_lockdep_stats_operations;
  296. return 0;
  297. }
  298. __initcall(lockdep_proc_init);