spinlock_debug.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright 2005, Red Hat, Inc., Ingo Molnar
  3. * Released under the General Public License (GPL).
  4. *
  5. * This file contains the spinlock/rwlock implementations for
  6. * DEBUG_SPINLOCK.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/nmi.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/debug_locks.h>
  12. #include <linux/delay.h>
  13. #include <linux/export.h>
  14. #include <linux/bug.h>
  15. #ifdef CONFIG_MACH_ATLANTICLTE_ATT
  16. #include <mach/msm_watchdog_v2.h>
  17. #endif
  18. void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
  19. struct lock_class_key *key)
  20. {
  21. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  22. /*
  23. * Make sure we are not reinitializing a held lock:
  24. */
  25. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  26. lockdep_init_map(&lock->dep_map, name, key, 0);
  27. #endif
  28. lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  29. lock->magic = SPINLOCK_MAGIC;
  30. lock->owner = SPINLOCK_OWNER_INIT;
  31. lock->owner_cpu = -1;
  32. }
  33. EXPORT_SYMBOL(__raw_spin_lock_init);
  34. void __rwlock_init(rwlock_t *lock, const char *name,
  35. struct lock_class_key *key)
  36. {
  37. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  38. /*
  39. * Make sure we are not reinitializing a held lock:
  40. */
  41. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  42. lockdep_init_map(&lock->dep_map, name, key, 0);
  43. #endif
  44. lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
  45. lock->magic = RWLOCK_MAGIC;
  46. lock->owner = SPINLOCK_OWNER_INIT;
  47. lock->owner_cpu = -1;
  48. }
  49. EXPORT_SYMBOL(__rwlock_init);
  50. #ifdef CONFIG_SEC_DEBUG_SPINLOCK_PANIC
  51. #define DBG_HRT_MAX 10
  52. raw_spinlock_t debug_hrtimer_spinlock[DBG_HRT_MAX];
  53. #endif
  54. static void spin_dump(raw_spinlock_t *lock, const char *msg)
  55. {
  56. struct task_struct *owner = NULL;
  57. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  58. owner = lock->owner;
  59. printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
  60. msg, raw_smp_processor_id(),
  61. current->comm, task_pid_nr(current));
  62. printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
  63. ".owner_cpu: %d\n",
  64. lock, lock->magic,
  65. owner ? owner->comm : "<none>",
  66. owner ? task_pid_nr(owner) : -1,
  67. lock->owner_cpu);
  68. #ifdef CONFIG_MACH_ATLANTICLTE_ATT
  69. msm_cause_bite();
  70. dump_stack();
  71. #else
  72. #ifdef CONFIG_SEC_DEBUG_SPINLOCK_PANIC
  73. panic("spinlock bug");
  74. #else
  75. dump_stack();
  76. #endif
  77. #endif
  78. }
  79. static void spin_bug(raw_spinlock_t *lock, const char *msg)
  80. {
  81. if (!debug_locks_off())
  82. return;
  83. spin_dump(lock, msg);
  84. }
  85. #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
  86. static inline void
  87. debug_spin_lock_before(raw_spinlock_t *lock)
  88. {
  89. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  90. SPIN_BUG_ON(lock->owner == current, lock, "recursion");
  91. SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  92. lock, "cpu recursion");
  93. }
  94. static inline void debug_spin_lock_after(raw_spinlock_t *lock)
  95. {
  96. lock->owner_cpu = raw_smp_processor_id();
  97. lock->owner = current;
  98. }
  99. static inline void debug_spin_unlock(raw_spinlock_t *lock)
  100. {
  101. SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
  102. SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
  103. SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
  104. SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  105. lock, "wrong CPU");
  106. lock->owner = SPINLOCK_OWNER_INIT;
  107. lock->owner_cpu = -1;
  108. }
  109. #if 0
  110. static void __spin_lock_debug(raw_spinlock_t *lock)
  111. {
  112. u64 i;
  113. u64 loops = (loops_per_jiffy * HZ);
  114. for (i = 0; i < loops; i++) {
  115. if (arch_spin_trylock(&lock->raw_lock))
  116. return;
  117. __delay(1);
  118. }
  119. /* lockup suspected: */
  120. spin_dump(lock, "lockup");
  121. #ifdef CONFIG_SMP
  122. trigger_all_cpu_backtrace();
  123. #endif
  124. /*
  125. * The trylock above was causing a livelock. Give the lower level arch
  126. * specific lock code a chance to acquire the lock. We have already
  127. * printed a warning/backtrace at this point. The non-debug arch
  128. * specific code might actually succeed in acquiring the lock. If it is
  129. * not successful, the end-result is the same - there is no forward
  130. * progress.
  131. */
  132. arch_spin_lock(&lock->raw_lock);
  133. }
  134. #endif
  135. void do_raw_spin_lock(raw_spinlock_t *lock)
  136. {
  137. debug_spin_lock_before(lock);
  138. #if 0 /* Temporarily comment out for testing hrtimer spinlock issue */
  139. if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
  140. __spin_lock_debug(lock);
  141. #else
  142. arch_spin_lock(&lock->raw_lock);
  143. #endif
  144. debug_spin_lock_after(lock);
  145. }
  146. int do_raw_spin_trylock(raw_spinlock_t *lock)
  147. {
  148. int ret = arch_spin_trylock(&lock->raw_lock);
  149. if (ret)
  150. debug_spin_lock_after(lock);
  151. #ifndef CONFIG_SMP
  152. /*
  153. * Must not happen on UP:
  154. */
  155. SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
  156. #endif
  157. return ret;
  158. }
  159. void do_raw_spin_unlock(raw_spinlock_t *lock)
  160. {
  161. debug_spin_unlock(lock);
  162. arch_spin_unlock(&lock->raw_lock);
  163. }
  164. static void rwlock_bug(rwlock_t *lock, const char *msg)
  165. {
  166. if (!debug_locks_off())
  167. return;
  168. printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
  169. msg, raw_smp_processor_id(), current->comm,
  170. task_pid_nr(current), lock);
  171. dump_stack();
  172. }
  173. #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
  174. #if 0 /* __write_lock_debug() can lock up - maybe this can too? */
  175. static void __read_lock_debug(rwlock_t *lock)
  176. {
  177. u64 i;
  178. u64 loops = loops_per_jiffy * HZ;
  179. int print_once = 1;
  180. for (;;) {
  181. for (i = 0; i < loops; i++) {
  182. if (arch_read_trylock(&lock->raw_lock))
  183. return;
  184. __delay(1);
  185. }
  186. /* lockup suspected: */
  187. if (print_once) {
  188. print_once = 0;
  189. printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
  190. "%s/%d, %p\n",
  191. raw_smp_processor_id(), current->comm,
  192. current->pid, lock);
  193. dump_stack();
  194. }
  195. }
  196. }
  197. #endif
  198. void do_raw_read_lock(rwlock_t *lock)
  199. {
  200. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  201. arch_read_lock(&lock->raw_lock);
  202. }
  203. int do_raw_read_trylock(rwlock_t *lock)
  204. {
  205. int ret = arch_read_trylock(&lock->raw_lock);
  206. #ifndef CONFIG_SMP
  207. /*
  208. * Must not happen on UP:
  209. */
  210. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  211. #endif
  212. return ret;
  213. }
  214. void do_raw_read_unlock(rwlock_t *lock)
  215. {
  216. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  217. arch_read_unlock(&lock->raw_lock);
  218. }
  219. static inline void debug_write_lock_before(rwlock_t *lock)
  220. {
  221. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  222. RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
  223. RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
  224. lock, "cpu recursion");
  225. }
  226. static inline void debug_write_lock_after(rwlock_t *lock)
  227. {
  228. lock->owner_cpu = raw_smp_processor_id();
  229. lock->owner = current;
  230. }
  231. static inline void debug_write_unlock(rwlock_t *lock)
  232. {
  233. RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
  234. RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
  235. RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
  236. lock, "wrong CPU");
  237. lock->owner = SPINLOCK_OWNER_INIT;
  238. lock->owner_cpu = -1;
  239. }
  240. #if 0 /* This can cause lockups */
  241. static void __write_lock_debug(rwlock_t *lock)
  242. {
  243. u64 i;
  244. u64 loops = loops_per_jiffy * HZ;
  245. int print_once = 1;
  246. for (;;) {
  247. for (i = 0; i < loops; i++) {
  248. if (arch_write_trylock(&lock->raw_lock))
  249. return;
  250. __delay(1);
  251. }
  252. /* lockup suspected: */
  253. if (print_once) {
  254. print_once = 0;
  255. printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
  256. "%s/%d, %p\n",
  257. raw_smp_processor_id(), current->comm,
  258. current->pid, lock);
  259. dump_stack();
  260. }
  261. }
  262. }
  263. #endif
  264. void do_raw_write_lock(rwlock_t *lock)
  265. {
  266. debug_write_lock_before(lock);
  267. arch_write_lock(&lock->raw_lock);
  268. debug_write_lock_after(lock);
  269. }
  270. int do_raw_write_trylock(rwlock_t *lock)
  271. {
  272. int ret = arch_write_trylock(&lock->raw_lock);
  273. if (ret)
  274. debug_write_lock_after(lock);
  275. #ifndef CONFIG_SMP
  276. /*
  277. * Must not happen on UP:
  278. */
  279. RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
  280. #endif
  281. return ret;
  282. }
  283. void do_raw_write_unlock(rwlock_t *lock)
  284. {
  285. debug_write_unlock(lock);
  286. arch_write_unlock(&lock->raw_lock);
  287. }