spinlock_64.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /* spinlock.h: 64-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #ifndef __SPARC64_SPINLOCK_H
  6. #define __SPARC64_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. #include <asm/processor.h>
  9. #include <asm/barrier.h>
  10. /* To get debugging spinlocks which detect and catch
  11. * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  12. * and rebuild your kernel.
  13. */
  14. /* Because we play games to save cycles in the non-contention case, we
  15. * need to be extra careful about branch targets into the "spinning"
  16. * code. They live in their own section, but the newer V9 branches
  17. * have a shorter range than the traditional 32-bit sparc branch
  18. * variants. The rule is that the branches that go into and out of
  19. * the spinner sections must be pre-V9 branches.
  20. */
  21. #define arch_spin_is_locked(lp) ((lp)->lock != 0)
  22. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  23. {
  24. smp_cond_load_acquire(&lock->lock, !VAL);
  25. }
  26. static inline void arch_spin_lock(arch_spinlock_t *lock)
  27. {
  28. unsigned long tmp;
  29. __asm__ __volatile__(
  30. "1: ldstub [%1], %0\n"
  31. " brnz,pn %0, 2f\n"
  32. " nop\n"
  33. " .subsection 2\n"
  34. "2: ldub [%1], %0\n"
  35. " brnz,pt %0, 2b\n"
  36. " nop\n"
  37. " ba,a,pt %%xcc, 1b\n"
  38. " .previous"
  39. : "=&r" (tmp)
  40. : "r" (lock)
  41. : "memory");
  42. }
  43. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  44. {
  45. unsigned long result;
  46. __asm__ __volatile__(
  47. " ldstub [%1], %0\n"
  48. : "=r" (result)
  49. : "r" (lock)
  50. : "memory");
  51. return (result == 0UL);
  52. }
  53. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  54. {
  55. __asm__ __volatile__(
  56. " stb %%g0, [%0]"
  57. : /* No outputs */
  58. : "r" (lock)
  59. : "memory");
  60. }
  61. static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
  62. {
  63. unsigned long tmp1, tmp2;
  64. __asm__ __volatile__(
  65. "1: ldstub [%2], %0\n"
  66. " brnz,pn %0, 2f\n"
  67. " nop\n"
  68. " .subsection 2\n"
  69. "2: rdpr %%pil, %1\n"
  70. " wrpr %3, %%pil\n"
  71. "3: ldub [%2], %0\n"
  72. " brnz,pt %0, 3b\n"
  73. " nop\n"
  74. " ba,pt %%xcc, 1b\n"
  75. " wrpr %1, %%pil\n"
  76. " .previous"
  77. : "=&r" (tmp1), "=&r" (tmp2)
  78. : "r"(lock), "r"(flags)
  79. : "memory");
  80. }
  81. /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
  82. static inline void arch_read_lock(arch_rwlock_t *lock)
  83. {
  84. unsigned long tmp1, tmp2;
  85. __asm__ __volatile__ (
  86. "1: ldsw [%2], %0\n"
  87. " brlz,pn %0, 2f\n"
  88. "4: add %0, 1, %1\n"
  89. " cas [%2], %0, %1\n"
  90. " cmp %0, %1\n"
  91. " bne,pn %%icc, 1b\n"
  92. " nop\n"
  93. " .subsection 2\n"
  94. "2: ldsw [%2], %0\n"
  95. " brlz,pt %0, 2b\n"
  96. " nop\n"
  97. " ba,a,pt %%xcc, 4b\n"
  98. " .previous"
  99. : "=&r" (tmp1), "=&r" (tmp2)
  100. : "r" (lock)
  101. : "memory");
  102. }
  103. static inline int arch_read_trylock(arch_rwlock_t *lock)
  104. {
  105. int tmp1, tmp2;
  106. __asm__ __volatile__ (
  107. "1: ldsw [%2], %0\n"
  108. " brlz,a,pn %0, 2f\n"
  109. " mov 0, %0\n"
  110. " add %0, 1, %1\n"
  111. " cas [%2], %0, %1\n"
  112. " cmp %0, %1\n"
  113. " bne,pn %%icc, 1b\n"
  114. " mov 1, %0\n"
  115. "2:"
  116. : "=&r" (tmp1), "=&r" (tmp2)
  117. : "r" (lock)
  118. : "memory");
  119. return tmp1;
  120. }
  121. static inline void arch_read_unlock(arch_rwlock_t *lock)
  122. {
  123. unsigned long tmp1, tmp2;
  124. __asm__ __volatile__(
  125. "1: lduw [%2], %0\n"
  126. " sub %0, 1, %1\n"
  127. " cas [%2], %0, %1\n"
  128. " cmp %0, %1\n"
  129. " bne,pn %%xcc, 1b\n"
  130. " nop"
  131. : "=&r" (tmp1), "=&r" (tmp2)
  132. : "r" (lock)
  133. : "memory");
  134. }
  135. static inline void arch_write_lock(arch_rwlock_t *lock)
  136. {
  137. unsigned long mask, tmp1, tmp2;
  138. mask = 0x80000000UL;
  139. __asm__ __volatile__(
  140. "1: lduw [%2], %0\n"
  141. " brnz,pn %0, 2f\n"
  142. "4: or %0, %3, %1\n"
  143. " cas [%2], %0, %1\n"
  144. " cmp %0, %1\n"
  145. " bne,pn %%icc, 1b\n"
  146. " nop\n"
  147. " .subsection 2\n"
  148. "2: lduw [%2], %0\n"
  149. " brnz,pt %0, 2b\n"
  150. " nop\n"
  151. " ba,a,pt %%xcc, 4b\n"
  152. " .previous"
  153. : "=&r" (tmp1), "=&r" (tmp2)
  154. : "r" (lock), "r" (mask)
  155. : "memory");
  156. }
  157. static inline void arch_write_unlock(arch_rwlock_t *lock)
  158. {
  159. __asm__ __volatile__(
  160. " stw %%g0, [%0]"
  161. : /* no outputs */
  162. : "r" (lock)
  163. : "memory");
  164. }
  165. static inline int arch_write_trylock(arch_rwlock_t *lock)
  166. {
  167. unsigned long mask, tmp1, tmp2, result;
  168. mask = 0x80000000UL;
  169. __asm__ __volatile__(
  170. " mov 0, %2\n"
  171. "1: lduw [%3], %0\n"
  172. " brnz,pn %0, 2f\n"
  173. " or %0, %4, %1\n"
  174. " cas [%3], %0, %1\n"
  175. " cmp %0, %1\n"
  176. " bne,pn %%icc, 1b\n"
  177. " nop\n"
  178. " mov 1, %2\n"
  179. "2:"
  180. : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
  181. : "r" (lock), "r" (mask)
  182. : "memory");
  183. return result;
  184. }
  185. #define arch_read_lock_flags(p, f) arch_read_lock(p)
  186. #define arch_write_lock_flags(p, f) arch_write_lock(p)
  187. #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
  188. #define arch_write_can_lock(rw) (!(rw)->lock)
  189. #define arch_spin_relax(lock) cpu_relax()
  190. #define arch_read_relax(lock) cpu_relax()
  191. #define arch_write_relax(lock) cpu_relax()
  192. #endif /* !(__ASSEMBLY__) */
  193. #endif /* !(__SPARC64_SPINLOCK_H) */