lockdep_internals.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * kernel/lockdep_internals.h
  3. *
  4. * Runtime locking correctness validator
  5. *
  6. * lockdep subsystem internal functions and variables.
  7. */
  8. /*
  9. * Lock-class usage-state bits:
  10. */
  11. enum lock_usage_bit {
  12. #define LOCKDEP_STATE(__STATE) \
  13. LOCK_USED_IN_##__STATE, \
  14. LOCK_USED_IN_##__STATE##_READ, \
  15. LOCK_ENABLED_##__STATE, \
  16. LOCK_ENABLED_##__STATE##_READ,
  17. #include "lockdep_states.h"
  18. #undef LOCKDEP_STATE
  19. LOCK_USED,
  20. LOCK_USAGE_STATES
  21. };
  22. /*
  23. * Usage-state bitmasks:
  24. */
  25. #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
  26. enum {
  27. #define LOCKDEP_STATE(__STATE) \
  28. __LOCKF(USED_IN_##__STATE) \
  29. __LOCKF(USED_IN_##__STATE##_READ) \
  30. __LOCKF(ENABLED_##__STATE) \
  31. __LOCKF(ENABLED_##__STATE##_READ)
  32. #include "lockdep_states.h"
  33. #undef LOCKDEP_STATE
  34. __LOCKF(USED)
  35. };
  36. #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
  37. #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
  38. #define LOCKF_ENABLED_IRQ_READ \
  39. (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
  40. #define LOCKF_USED_IN_IRQ_READ \
  41. (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
  42. /*
  43. * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
  44. * we track.
  45. *
  46. * We use the per-lock dependency maps in two ways: we grow it by adding
  47. * every to-be-taken lock to all currently held lock's own dependency
  48. * table (if it's not there yet), and we check it for lock order
  49. * conflicts and deadlocks.
  50. */
  51. #define MAX_LOCKDEP_ENTRIES 16384UL
  52. #define MAX_LOCKDEP_CHAINS_BITS 15
  53. #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
  54. #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
  55. /*
  56. * Stack-trace: tightly packed array of stack backtrace
  57. * addresses. Protected by the hash_lock.
  58. */
  59. #define MAX_STACK_TRACE_ENTRIES 262144UL
  60. extern struct list_head all_lock_classes;
  61. extern struct lock_chain lock_chains[];
  62. #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
  63. extern void get_usage_chars(struct lock_class *class,
  64. char usage[LOCK_USAGE_CHARS]);
  65. extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
  66. struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
  67. extern unsigned long nr_lock_classes;
  68. extern unsigned long nr_list_entries;
  69. extern unsigned long nr_lock_chains;
  70. extern int nr_chain_hlocks;
  71. extern unsigned long nr_stack_trace_entries;
  72. extern unsigned int nr_hardirq_chains;
  73. extern unsigned int nr_softirq_chains;
  74. extern unsigned int nr_process_chains;
  75. extern unsigned int max_lockdep_depth;
  76. extern unsigned int max_recursion_depth;
  77. extern unsigned int max_bfs_queue_depth;
  78. #ifdef CONFIG_PROVE_LOCKING
  79. extern unsigned long lockdep_count_forward_deps(struct lock_class *);
  80. extern unsigned long lockdep_count_backward_deps(struct lock_class *);
  81. #else
  82. static inline unsigned long
  83. lockdep_count_forward_deps(struct lock_class *class)
  84. {
  85. return 0;
  86. }
  87. static inline unsigned long
  88. lockdep_count_backward_deps(struct lock_class *class)
  89. {
  90. return 0;
  91. }
  92. #endif
  93. #ifdef CONFIG_DEBUG_LOCKDEP
  94. #include <asm/local.h>
  95. /*
  96. * Various lockdep statistics.
  97. * We want them per cpu as they are often accessed in fast path
  98. * and we want to avoid too much cache bouncing.
  99. */
  100. struct lockdep_stats {
  101. int chain_lookup_hits;
  102. int chain_lookup_misses;
  103. int hardirqs_on_events;
  104. int hardirqs_off_events;
  105. int redundant_hardirqs_on;
  106. int redundant_hardirqs_off;
  107. int softirqs_on_events;
  108. int softirqs_off_events;
  109. int redundant_softirqs_on;
  110. int redundant_softirqs_off;
  111. int nr_unused_locks;
  112. int nr_cyclic_checks;
  113. int nr_cyclic_check_recursions;
  114. int nr_find_usage_forwards_checks;
  115. int nr_find_usage_forwards_recursions;
  116. int nr_find_usage_backwards_checks;
  117. int nr_find_usage_backwards_recursions;
  118. };
  119. DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
  120. #define __debug_atomic_inc(ptr) \
  121. this_cpu_inc(lockdep_stats.ptr);
  122. #define debug_atomic_inc(ptr) { \
  123. WARN_ON_ONCE(!irqs_disabled()); \
  124. __this_cpu_inc(lockdep_stats.ptr); \
  125. }
  126. #define debug_atomic_dec(ptr) { \
  127. WARN_ON_ONCE(!irqs_disabled()); \
  128. __this_cpu_dec(lockdep_stats.ptr); \
  129. }
  130. #define debug_atomic_read(ptr) ({ \
  131. struct lockdep_stats *__cpu_lockdep_stats; \
  132. unsigned long long __total = 0; \
  133. int __cpu; \
  134. for_each_possible_cpu(__cpu) { \
  135. __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
  136. __total += __cpu_lockdep_stats->ptr; \
  137. } \
  138. __total; \
  139. })
  140. #else
  141. # define __debug_atomic_inc(ptr) do { } while (0)
  142. # define debug_atomic_inc(ptr) do { } while (0)
  143. # define debug_atomic_dec(ptr) do { } while (0)
  144. # define debug_atomic_read(ptr) 0
  145. #endif