spinlock.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * include/asm-sh/spinlock.h
  3. *
  4. * Copyright (C) 2002, 2003 Paul Mundt
  5. * Copyright (C) 2006, 2007 Akio Idehara
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #ifndef __ASM_SH_SPINLOCK_H
  12. #define __ASM_SH_SPINLOCK_H
  13. /*
  14. * The only locking implemented here uses SH-4A opcodes. For others,
  15. * split this out as per atomic-*.h.
  16. */
  17. #ifndef CONFIG_CPU_SH4A
  18. #error "Need movli.l/movco.l for spinlocks"
  19. #endif
  20. /*
  21. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  22. */
  23. #define arch_spin_is_locked(x) ((x)->lock <= 0)
  24. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  25. #define arch_spin_unlock_wait(x) \
  26. do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
  27. /*
  28. * Simple spin lock operations. There are two variants, one clears IRQ's
  29. * on the local processor, one does not.
  30. *
  31. * We make no fairness assumptions. They have a cost.
  32. */
  33. static inline void arch_spin_lock(arch_spinlock_t *lock)
  34. {
  35. unsigned long tmp;
  36. unsigned long oldval;
  37. __asm__ __volatile__ (
  38. "1: \n\t"
  39. "movli.l @%2, %0 ! arch_spin_lock \n\t"
  40. "mov %0, %1 \n\t"
  41. "mov #0, %0 \n\t"
  42. "movco.l %0, @%2 \n\t"
  43. "bf 1b \n\t"
  44. "cmp/pl %1 \n\t"
  45. "bf 1b \n\t"
  46. : "=&z" (tmp), "=&r" (oldval)
  47. : "r" (&lock->lock)
  48. : "t", "memory"
  49. );
  50. }
  51. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  52. {
  53. unsigned long tmp;
  54. __asm__ __volatile__ (
  55. "mov #1, %0 ! arch_spin_unlock \n\t"
  56. "mov.l %0, @%1 \n\t"
  57. : "=&z" (tmp)
  58. : "r" (&lock->lock)
  59. : "t", "memory"
  60. );
  61. }
  62. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  63. {
  64. unsigned long tmp, oldval;
  65. __asm__ __volatile__ (
  66. "1: \n\t"
  67. "movli.l @%2, %0 ! arch_spin_trylock \n\t"
  68. "mov %0, %1 \n\t"
  69. "mov #0, %0 \n\t"
  70. "movco.l %0, @%2 \n\t"
  71. "bf 1b \n\t"
  72. "synco \n\t"
  73. : "=&z" (tmp), "=&r" (oldval)
  74. : "r" (&lock->lock)
  75. : "t", "memory"
  76. );
  77. return oldval;
  78. }
  79. /*
  80. * Read-write spinlocks, allowing multiple readers but only one writer.
  81. *
  82. * NOTE! it is quite common to have readers in interrupts but no interrupt
  83. * writers. For those circumstances we can "mix" irq-safe locks - any writer
  84. * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  85. * read-locks.
  86. */
  87. /**
  88. * read_can_lock - would read_trylock() succeed?
  89. * @lock: the rwlock in question.
  90. */
  91. #define arch_read_can_lock(x) ((x)->lock > 0)
  92. /**
  93. * write_can_lock - would write_trylock() succeed?
  94. * @lock: the rwlock in question.
  95. */
  96. #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  97. static inline void arch_read_lock(arch_rwlock_t *rw)
  98. {
  99. unsigned long tmp;
  100. __asm__ __volatile__ (
  101. "1: \n\t"
  102. "movli.l @%1, %0 ! arch_read_lock \n\t"
  103. "cmp/pl %0 \n\t"
  104. "bf 1b \n\t"
  105. "add #-1, %0 \n\t"
  106. "movco.l %0, @%1 \n\t"
  107. "bf 1b \n\t"
  108. : "=&z" (tmp)
  109. : "r" (&rw->lock)
  110. : "t", "memory"
  111. );
  112. }
  113. static inline void arch_read_unlock(arch_rwlock_t *rw)
  114. {
  115. unsigned long tmp;
  116. __asm__ __volatile__ (
  117. "1: \n\t"
  118. "movli.l @%1, %0 ! arch_read_unlock \n\t"
  119. "add #1, %0 \n\t"
  120. "movco.l %0, @%1 \n\t"
  121. "bf 1b \n\t"
  122. : "=&z" (tmp)
  123. : "r" (&rw->lock)
  124. : "t", "memory"
  125. );
  126. }
  127. static inline void arch_write_lock(arch_rwlock_t *rw)
  128. {
  129. unsigned long tmp;
  130. __asm__ __volatile__ (
  131. "1: \n\t"
  132. "movli.l @%1, %0 ! arch_write_lock \n\t"
  133. "cmp/hs %2, %0 \n\t"
  134. "bf 1b \n\t"
  135. "sub %2, %0 \n\t"
  136. "movco.l %0, @%1 \n\t"
  137. "bf 1b \n\t"
  138. : "=&z" (tmp)
  139. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  140. : "t", "memory"
  141. );
  142. }
  143. static inline void arch_write_unlock(arch_rwlock_t *rw)
  144. {
  145. __asm__ __volatile__ (
  146. "mov.l %1, @%0 ! arch_write_unlock \n\t"
  147. :
  148. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  149. : "t", "memory"
  150. );
  151. }
  152. static inline int arch_read_trylock(arch_rwlock_t *rw)
  153. {
  154. unsigned long tmp, oldval;
  155. __asm__ __volatile__ (
  156. "1: \n\t"
  157. "movli.l @%2, %0 ! arch_read_trylock \n\t"
  158. "mov %0, %1 \n\t"
  159. "cmp/pl %0 \n\t"
  160. "bf 2f \n\t"
  161. "add #-1, %0 \n\t"
  162. "movco.l %0, @%2 \n\t"
  163. "bf 1b \n\t"
  164. "2: \n\t"
  165. "synco \n\t"
  166. : "=&z" (tmp), "=&r" (oldval)
  167. : "r" (&rw->lock)
  168. : "t", "memory"
  169. );
  170. return (oldval > 0);
  171. }
  172. static inline int arch_write_trylock(arch_rwlock_t *rw)
  173. {
  174. unsigned long tmp, oldval;
  175. __asm__ __volatile__ (
  176. "1: \n\t"
  177. "movli.l @%2, %0 ! arch_write_trylock \n\t"
  178. "mov %0, %1 \n\t"
  179. "cmp/hs %3, %0 \n\t"
  180. "bf 2f \n\t"
  181. "sub %3, %0 \n\t"
  182. "2: \n\t"
  183. "movco.l %0, @%2 \n\t"
  184. "bf 1b \n\t"
  185. "synco \n\t"
  186. : "=&z" (tmp), "=&r" (oldval)
  187. : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
  188. : "t", "memory"
  189. );
  190. return (oldval > (RW_LOCK_BIAS - 1));
  191. }
  192. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  193. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  194. #define arch_spin_relax(lock) cpu_relax()
  195. #define arch_read_relax(lock) cpu_relax()
  196. #define arch_write_relax(lock) cpu_relax()
  197. #endif /* __ASM_SH_SPINLOCK_H */