spinlock.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #ifndef _ASM_M32R_SPINLOCK_H
  2. #define _ASM_M32R_SPINLOCK_H
  3. /*
  4. * linux/include/asm-m32r/spinlock.h
  5. *
  6. * M32R version:
  7. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  8. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  9. */
  10. #include <linux/compiler.h>
  11. #include <linux/atomic.h>
  12. #include <asm/dcache_clear.h>
  13. #include <asm/page.h>
  14. #include <asm/barrier.h>
  15. #include <asm/processor.h>
  16. /*
  17. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  18. *
  19. * (the type definitions are in asm/spinlock_types.h)
  20. *
  21. * Simple spin lock operations. There are two variants, one clears IRQ's
  22. * on the local processor, one does not.
  23. *
  24. * We make no fairness assumptions. They have a cost.
  25. */
  26. #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
  27. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  28. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  29. {
  30. smp_cond_load_acquire(&lock->slock, VAL > 0);
  31. }
  32. /**
  33. * arch_spin_trylock - Try spin lock and return a result
  34. * @lock: Pointer to the lock variable
  35. *
  36. * arch_spin_trylock() tries to get the lock and returns a result.
  37. * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  38. */
  39. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  40. {
  41. int oldval;
  42. unsigned long tmp1, tmp2;
  43. /*
  44. * lock->slock : =1 : unlock
  45. * : <=0 : lock
  46. * {
  47. * oldval = lock->slock; <--+ need atomic operation
  48. * lock->slock = 0; <--+
  49. * }
  50. */
  51. __asm__ __volatile__ (
  52. "# arch_spin_trylock \n\t"
  53. "ldi %1, #0; \n\t"
  54. "mvfc %2, psw; \n\t"
  55. "clrpsw #0x40 -> nop; \n\t"
  56. DCACHE_CLEAR("%0", "r6", "%3")
  57. "lock %0, @%3; \n\t"
  58. "unlock %1, @%3; \n\t"
  59. "mvtc %2, psw; \n\t"
  60. : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
  61. : "r" (&lock->slock)
  62. : "memory"
  63. #ifdef CONFIG_CHIP_M32700_TS1
  64. , "r6"
  65. #endif /* CONFIG_CHIP_M32700_TS1 */
  66. );
  67. return (oldval > 0);
  68. }
  69. static inline void arch_spin_lock(arch_spinlock_t *lock)
  70. {
  71. unsigned long tmp0, tmp1;
  72. /*
  73. * lock->slock : =1 : unlock
  74. * : <=0 : lock
  75. *
  76. * for ( ; ; ) {
  77. * lock->slock -= 1; <-- need atomic operation
  78. * if (lock->slock == 0) break;
  79. * for ( ; lock->slock <= 0 ; );
  80. * }
  81. */
  82. __asm__ __volatile__ (
  83. "# arch_spin_lock \n\t"
  84. ".fillinsn \n"
  85. "1: \n\t"
  86. "mvfc %1, psw; \n\t"
  87. "clrpsw #0x40 -> nop; \n\t"
  88. DCACHE_CLEAR("%0", "r6", "%2")
  89. "lock %0, @%2; \n\t"
  90. "addi %0, #-1; \n\t"
  91. "unlock %0, @%2; \n\t"
  92. "mvtc %1, psw; \n\t"
  93. "bltz %0, 2f; \n\t"
  94. LOCK_SECTION_START(".balign 4 \n\t")
  95. ".fillinsn \n"
  96. "2: \n\t"
  97. "ld %0, @%2; \n\t"
  98. "bgtz %0, 1b; \n\t"
  99. "bra 2b; \n\t"
  100. LOCK_SECTION_END
  101. : "=&r" (tmp0), "=&r" (tmp1)
  102. : "r" (&lock->slock)
  103. : "memory"
  104. #ifdef CONFIG_CHIP_M32700_TS1
  105. , "r6"
  106. #endif /* CONFIG_CHIP_M32700_TS1 */
  107. );
  108. }
  109. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  110. {
  111. mb();
  112. lock->slock = 1;
  113. }
  114. /*
  115. * Read-write spinlocks, allowing multiple readers
  116. * but only one writer.
  117. *
  118. * NOTE! it is quite common to have readers in interrupts
  119. * but no interrupt writers. For those circumstances we
  120. * can "mix" irq-safe locks - any writer needs to get a
  121. * irq-safe write-lock, but readers can get non-irqsafe
  122. * read-locks.
  123. *
  124. * On x86, we implement read-write locks as a 32-bit counter
  125. * with the high bit (sign) being the "contended" bit.
  126. *
  127. * The inline assembly is non-obvious. Think about it.
  128. *
  129. * Changed to use the same technique as rw semaphores. See
  130. * semaphore.h for details. -ben
  131. */
  132. /**
  133. * read_can_lock - would read_trylock() succeed?
  134. * @lock: the rwlock in question.
  135. */
  136. #define arch_read_can_lock(x) ((int)(x)->lock > 0)
  137. /**
  138. * write_can_lock - would write_trylock() succeed?
  139. * @lock: the rwlock in question.
  140. */
  141. #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  142. static inline void arch_read_lock(arch_rwlock_t *rw)
  143. {
  144. unsigned long tmp0, tmp1;
  145. /*
  146. * rw->lock : >0 : unlock
  147. * : <=0 : lock
  148. *
  149. * for ( ; ; ) {
  150. * rw->lock -= 1; <-- need atomic operation
  151. * if (rw->lock >= 0) break;
  152. * rw->lock += 1; <-- need atomic operation
  153. * for ( ; rw->lock <= 0 ; );
  154. * }
  155. */
  156. __asm__ __volatile__ (
  157. "# read_lock \n\t"
  158. ".fillinsn \n"
  159. "1: \n\t"
  160. "mvfc %1, psw; \n\t"
  161. "clrpsw #0x40 -> nop; \n\t"
  162. DCACHE_CLEAR("%0", "r6", "%2")
  163. "lock %0, @%2; \n\t"
  164. "addi %0, #-1; \n\t"
  165. "unlock %0, @%2; \n\t"
  166. "mvtc %1, psw; \n\t"
  167. "bltz %0, 2f; \n\t"
  168. LOCK_SECTION_START(".balign 4 \n\t")
  169. ".fillinsn \n"
  170. "2: \n\t"
  171. "clrpsw #0x40 -> nop; \n\t"
  172. DCACHE_CLEAR("%0", "r6", "%2")
  173. "lock %0, @%2; \n\t"
  174. "addi %0, #1; \n\t"
  175. "unlock %0, @%2; \n\t"
  176. "mvtc %1, psw; \n\t"
  177. ".fillinsn \n"
  178. "3: \n\t"
  179. "ld %0, @%2; \n\t"
  180. "bgtz %0, 1b; \n\t"
  181. "bra 3b; \n\t"
  182. LOCK_SECTION_END
  183. : "=&r" (tmp0), "=&r" (tmp1)
  184. : "r" (&rw->lock)
  185. : "memory"
  186. #ifdef CONFIG_CHIP_M32700_TS1
  187. , "r6"
  188. #endif /* CONFIG_CHIP_M32700_TS1 */
  189. );
  190. }
  191. static inline void arch_write_lock(arch_rwlock_t *rw)
  192. {
  193. unsigned long tmp0, tmp1, tmp2;
  194. /*
  195. * rw->lock : =RW_LOCK_BIAS_STR : unlock
  196. * : !=RW_LOCK_BIAS_STR : lock
  197. *
  198. * for ( ; ; ) {
  199. * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
  200. * if (rw->lock == 0) break;
  201. * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
  202. * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
  203. * }
  204. */
  205. __asm__ __volatile__ (
  206. "# write_lock \n\t"
  207. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  208. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  209. ".fillinsn \n"
  210. "1: \n\t"
  211. "mvfc %2, psw; \n\t"
  212. "clrpsw #0x40 -> nop; \n\t"
  213. DCACHE_CLEAR("%0", "r7", "%3")
  214. "lock %0, @%3; \n\t"
  215. "sub %0, %1; \n\t"
  216. "unlock %0, @%3; \n\t"
  217. "mvtc %2, psw; \n\t"
  218. "bnez %0, 2f; \n\t"
  219. LOCK_SECTION_START(".balign 4 \n\t")
  220. ".fillinsn \n"
  221. "2: \n\t"
  222. "clrpsw #0x40 -> nop; \n\t"
  223. DCACHE_CLEAR("%0", "r7", "%3")
  224. "lock %0, @%3; \n\t"
  225. "add %0, %1; \n\t"
  226. "unlock %0, @%3; \n\t"
  227. "mvtc %2, psw; \n\t"
  228. ".fillinsn \n"
  229. "3: \n\t"
  230. "ld %0, @%3; \n\t"
  231. "beq %0, %1, 1b; \n\t"
  232. "bra 3b; \n\t"
  233. LOCK_SECTION_END
  234. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  235. : "r" (&rw->lock)
  236. : "memory"
  237. #ifdef CONFIG_CHIP_M32700_TS1
  238. , "r7"
  239. #endif /* CONFIG_CHIP_M32700_TS1 */
  240. );
  241. }
  242. static inline void arch_read_unlock(arch_rwlock_t *rw)
  243. {
  244. unsigned long tmp0, tmp1;
  245. __asm__ __volatile__ (
  246. "# read_unlock \n\t"
  247. "mvfc %1, psw; \n\t"
  248. "clrpsw #0x40 -> nop; \n\t"
  249. DCACHE_CLEAR("%0", "r6", "%2")
  250. "lock %0, @%2; \n\t"
  251. "addi %0, #1; \n\t"
  252. "unlock %0, @%2; \n\t"
  253. "mvtc %1, psw; \n\t"
  254. : "=&r" (tmp0), "=&r" (tmp1)
  255. : "r" (&rw->lock)
  256. : "memory"
  257. #ifdef CONFIG_CHIP_M32700_TS1
  258. , "r6"
  259. #endif /* CONFIG_CHIP_M32700_TS1 */
  260. );
  261. }
  262. static inline void arch_write_unlock(arch_rwlock_t *rw)
  263. {
  264. unsigned long tmp0, tmp1, tmp2;
  265. __asm__ __volatile__ (
  266. "# write_unlock \n\t"
  267. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  268. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  269. "mvfc %2, psw; \n\t"
  270. "clrpsw #0x40 -> nop; \n\t"
  271. DCACHE_CLEAR("%0", "r7", "%3")
  272. "lock %0, @%3; \n\t"
  273. "add %0, %1; \n\t"
  274. "unlock %0, @%3; \n\t"
  275. "mvtc %2, psw; \n\t"
  276. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  277. : "r" (&rw->lock)
  278. : "memory"
  279. #ifdef CONFIG_CHIP_M32700_TS1
  280. , "r7"
  281. #endif /* CONFIG_CHIP_M32700_TS1 */
  282. );
  283. }
  284. static inline int arch_read_trylock(arch_rwlock_t *lock)
  285. {
  286. atomic_t *count = (atomic_t*)lock;
  287. if (atomic_dec_return(count) >= 0)
  288. return 1;
  289. atomic_inc(count);
  290. return 0;
  291. }
  292. static inline int arch_write_trylock(arch_rwlock_t *lock)
  293. {
  294. atomic_t *count = (atomic_t *)lock;
  295. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  296. return 1;
  297. atomic_add(RW_LOCK_BIAS, count);
  298. return 0;
  299. }
  300. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  301. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  302. #define arch_spin_relax(lock) cpu_relax()
  303. #define arch_read_relax(lock) cpu_relax()
  304. #define arch_write_relax(lock) cpu_relax()
  305. #endif /* _ASM_M32R_SPINLOCK_H */