lockdep_internals.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * kernel/lockdep_internals.h
  3. *
  4. * Runtime locking correctness validator
  5. *
  6. * lockdep subsystem internal functions and variables.
  7. */
  8. /*
  9. * Lock-class usage-state bits:
  10. */
  11. enum lock_usage_bit {
  12. #define LOCKDEP_STATE(__STATE) \
  13. LOCK_USED_IN_##__STATE, \
  14. LOCK_USED_IN_##__STATE##_READ, \
  15. LOCK_ENABLED_##__STATE, \
  16. LOCK_ENABLED_##__STATE##_READ,
  17. #include "lockdep_states.h"
  18. #undef LOCKDEP_STATE
  19. LOCK_USED,
  20. LOCK_USAGE_STATES
  21. };
  22. /*
  23. * Usage-state bitmasks:
  24. */
  25. #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  26. enum {
  27. #define LOCKDEP_STATE(__STATE) \
  28. __LOCKF(USED_IN_##__STATE) \
  29. __LOCKF(USED_IN_##__STATE##_READ) \
  30. __LOCKF(ENABLED_##__STATE) \
  31. __LOCKF(ENABLED_##__STATE##_READ)
  32. #include "lockdep_states.h"
  33. #undef LOCKDEP_STATE
  34. __LOCKF(USED)
  35. };
  36. #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  37. #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  38. #define LOCKF_ENABLED_IRQ_READ \
  39. (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  40. #define LOCKF_USED_IN_IRQ_READ \
  41. (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  42. /*
  43. * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  44. * we track.
  45. *
  46. * We use the per-lock dependency maps in two ways: we grow it by adding
  47. * every to-be-taken lock to all currently held lock's own dependency
  48. * table (if it's not there yet), and we check it for lock order
  49. * conflicts and deadlocks.
  50. */
  51. #define MAX_LOCKDEP_ENTRIES 16384UL
  52. #define MAX_LOCKDEP_CHAINS_BITS 15
  53. #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
  54. #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  55. /*
  56. * Stack-trace: tightly packed array of stack backtrace
  57. * addresses. Protected by the hash_lock.
  58. */
  59. #define MAX_STACK_TRACE_ENTRIES 262144UL
  60. extern struct list_head all_lock_classes;
  61. extern struct lock_chain lock_chains[];
  62. #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  63. extern void get_usage_chars(struct lock_class *class,
  64. char usage[LOCK_USAGE_CHARS]);
  65. extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
  66. struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  67. extern unsigned long nr_lock_classes;
  68. extern unsigned long nr_list_entries;
  69. extern unsigned long nr_lock_chains;
  70. extern int nr_chain_hlocks;
  71. extern unsigned long nr_stack_trace_entries;
  72. extern unsigned int nr_hardirq_chains;
  73. extern unsigned int nr_softirq_chains;
  74. extern unsigned int nr_process_chains;
  75. extern unsigned int max_lockdep_depth;
  76. extern unsigned int max_recursion_depth;
  77. #ifdef CONFIG_PROVE_LOCKING
  78. extern unsigned long lockdep_count_forward_deps(struct lock_class *);
  79. extern unsigned long lockdep_count_backward_deps(struct lock_class *);
  80. #else
  81. static inline unsigned long
  82. lockdep_count_forward_deps(struct lock_class *class)
  83. {
  84. return 0;
  85. }
  86. static inline unsigned long
  87. lockdep_count_backward_deps(struct lock_class *class)
  88. {
  89. return 0;
  90. }
  91. #endif
  92. #ifdef CONFIG_DEBUG_LOCKDEP
  93. /*
  94. * Various lockdep statistics:
  95. */
  96. extern atomic_t chain_lookup_hits;
  97. extern atomic_t chain_lookup_misses;
  98. extern atomic_t hardirqs_on_events;
  99. extern atomic_t hardirqs_off_events;
  100. extern atomic_t redundant_hardirqs_on;
  101. extern atomic_t redundant_hardirqs_off;
  102. extern atomic_t softirqs_on_events;
  103. extern atomic_t softirqs_off_events;
  104. extern atomic_t redundant_softirqs_on;
  105. extern atomic_t redundant_softirqs_off;
  106. extern atomic_t nr_unused_locks;
  107. extern atomic_t nr_cyclic_checks;
  108. extern atomic_t nr_cyclic_check_recursions;
  109. extern atomic_t nr_find_usage_forwards_checks;
  110. extern atomic_t nr_find_usage_forwards_recursions;
  111. extern atomic_t nr_find_usage_backwards_checks;
  112. extern atomic_t nr_find_usage_backwards_recursions;
  113. # define debug_atomic_inc(ptr) atomic_inc(ptr)
  114. # define debug_atomic_dec(ptr) atomic_dec(ptr)
  115. # define debug_atomic_read(ptr) atomic_read(ptr)
  116. #else
  117. # define debug_atomic_inc(ptr) do { } while (0)
  118. # define debug_atomic_dec(ptr) do { } while (0)
  119. # define debug_atomic_read(ptr) 0
  120. #endif
  121. /* The circular_queue and helpers is used to implement the
  122. * breadth-first search(BFS)algorithem, by which we can build
  123. * the shortest path from the next lock to be acquired to the
  124. * previous held lock if there is a circular between them.
  125. * */
  126. #define MAX_CIRCULAR_QUE_SIZE 4096UL
  127. struct circular_queue{
  128. unsigned long element[MAX_CIRCULAR_QUE_SIZE];
  129. unsigned int front, rear;
  130. };
  131. #define LOCK_ACCESSED 1UL
  132. #define LOCK_ACCESSED_MASK (~LOCK_ACCESSED)
  133. static inline void __cq_init(struct circular_queue *cq)
  134. {
  135. cq->front = cq->rear = 0;
  136. }
  137. static inline int __cq_empty(struct circular_queue *cq)
  138. {
  139. return (cq->front == cq->rear);
  140. }
  141. static inline int __cq_full(struct circular_queue *cq)
  142. {
  143. return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE) == cq->front;
  144. }
  145. static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
  146. {
  147. if (__cq_full(cq))
  148. return -1;
  149. cq->element[cq->rear] = elem;
  150. cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
  151. return 0;
  152. }
  153. static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
  154. {
  155. if (__cq_empty(cq))
  156. return -1;
  157. *elem = cq->element[cq->front];
  158. cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
  159. return 0;
  160. }
  161. static inline int __cq_get_elem_count(struct circular_queue *cq)
  162. {
  163. return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
  164. }
  165. static inline void mark_lock_accessed(struct lock_list *lock,
  166. struct lock_list *parent)
  167. {
  168. lock->parent = (void *) parent + LOCK_ACCESSED;
  169. }
  170. static inline unsigned long lock_accessed(struct lock_list *lock)
  171. {
  172. return (unsigned long)lock->parent & LOCK_ACCESSED;
  173. }
  174. static inline struct lock_list *get_lock_parent(struct lock_list *child)
  175. {
  176. return (struct lock_list *)
  177. ((unsigned long)child->parent & LOCK_ACCESSED_MASK);
  178. }
  179. static inline unsigned long get_lock_depth(struct lock_list *child)
  180. {
  181. unsigned long depth = 0;
  182. struct lock_list *parent;
  183. while ((parent = get_lock_parent(child))) {
  184. child = parent;
  185. depth++;
  186. }
  187. return depth;
  188. }