rwsem.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. #ifndef _ALPHA_RWSEM_H
  2. #define _ALPHA_RWSEM_H
  3. /*
  4. * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
  5. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  6. */
  7. #ifndef _LINUX_RWSEM_H
  8. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  9. #endif
  10. #ifdef __KERNEL__
  11. #include <linux/compiler.h>
  12. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  13. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  14. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  15. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  16. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  17. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  18. static inline void __down_read(struct rw_semaphore *sem)
  19. {
  20. long oldcount;
  21. #ifndef CONFIG_SMP
  22. oldcount = sem->count.counter;
  23. sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
  24. #else
  25. long temp;
  26. __asm__ __volatile__(
  27. "1: ldq_l %0,%1\n"
  28. " addq %0,%3,%2\n"
  29. " stq_c %2,%1\n"
  30. " beq %2,2f\n"
  31. " mb\n"
  32. ".subsection 2\n"
  33. "2: br 1b\n"
  34. ".previous"
  35. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  36. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  37. #endif
  38. if (unlikely(oldcount < 0))
  39. rwsem_down_read_failed(sem);
  40. }
  41. /*
  42. * trylock for reading -- returns 1 if successful, 0 if contention
  43. */
  44. static inline int __down_read_trylock(struct rw_semaphore *sem)
  45. {
  46. long old, new, res;
  47. res = atomic_long_read(&sem->count);
  48. do {
  49. new = res + RWSEM_ACTIVE_READ_BIAS;
  50. if (new <= 0)
  51. break;
  52. old = res;
  53. res = atomic_long_cmpxchg(&sem->count, old, new);
  54. } while (res != old);
  55. return res >= 0 ? 1 : 0;
  56. }
  57. static inline long ___down_write(struct rw_semaphore *sem)
  58. {
  59. long oldcount;
  60. #ifndef CONFIG_SMP
  61. oldcount = sem->count.counter;
  62. sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
  63. #else
  64. long temp;
  65. __asm__ __volatile__(
  66. "1: ldq_l %0,%1\n"
  67. " addq %0,%3,%2\n"
  68. " stq_c %2,%1\n"
  69. " beq %2,2f\n"
  70. " mb\n"
  71. ".subsection 2\n"
  72. "2: br 1b\n"
  73. ".previous"
  74. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  75. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  76. #endif
  77. return oldcount;
  78. }
  79. static inline void __down_write(struct rw_semaphore *sem)
  80. {
  81. if (unlikely(___down_write(sem)))
  82. rwsem_down_write_failed(sem);
  83. }
  84. static inline int __down_write_killable(struct rw_semaphore *sem)
  85. {
  86. if (unlikely(___down_write(sem)))
  87. if (IS_ERR(rwsem_down_write_failed_killable(sem)))
  88. return -EINTR;
  89. return 0;
  90. }
  91. /*
  92. * trylock for writing -- returns 1 if successful, 0 if contention
  93. */
  94. static inline int __down_write_trylock(struct rw_semaphore *sem)
  95. {
  96. long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  97. RWSEM_ACTIVE_WRITE_BIAS);
  98. if (ret == RWSEM_UNLOCKED_VALUE)
  99. return 1;
  100. return 0;
  101. }
  102. static inline void __up_read(struct rw_semaphore *sem)
  103. {
  104. long oldcount;
  105. #ifndef CONFIG_SMP
  106. oldcount = sem->count.counter;
  107. sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
  108. #else
  109. long temp;
  110. __asm__ __volatile__(
  111. " mb\n"
  112. "1: ldq_l %0,%1\n"
  113. " subq %0,%3,%2\n"
  114. " stq_c %2,%1\n"
  115. " beq %2,2f\n"
  116. ".subsection 2\n"
  117. "2: br 1b\n"
  118. ".previous"
  119. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  120. :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  121. #endif
  122. if (unlikely(oldcount < 0))
  123. if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
  124. rwsem_wake(sem);
  125. }
  126. static inline void __up_write(struct rw_semaphore *sem)
  127. {
  128. long count;
  129. #ifndef CONFIG_SMP
  130. sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
  131. count = sem->count.counter;
  132. #else
  133. long temp;
  134. __asm__ __volatile__(
  135. " mb\n"
  136. "1: ldq_l %0,%1\n"
  137. " subq %0,%3,%2\n"
  138. " stq_c %2,%1\n"
  139. " beq %2,2f\n"
  140. " subq %0,%3,%0\n"
  141. ".subsection 2\n"
  142. "2: br 1b\n"
  143. ".previous"
  144. :"=&r" (count), "=m" (sem->count), "=&r" (temp)
  145. :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
  146. #endif
  147. if (unlikely(count))
  148. if ((int)count == 0)
  149. rwsem_wake(sem);
  150. }
  151. /*
  152. * downgrade write lock to read lock
  153. */
  154. static inline void __downgrade_write(struct rw_semaphore *sem)
  155. {
  156. long oldcount;
  157. #ifndef CONFIG_SMP
  158. oldcount = sem->count.counter;
  159. sem->count.counter -= RWSEM_WAITING_BIAS;
  160. #else
  161. long temp;
  162. __asm__ __volatile__(
  163. "1: ldq_l %0,%1\n"
  164. " addq %0,%3,%2\n"
  165. " stq_c %2,%1\n"
  166. " beq %2,2f\n"
  167. " mb\n"
  168. ".subsection 2\n"
  169. "2: br 1b\n"
  170. ".previous"
  171. :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
  172. :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
  173. #endif
  174. if (unlikely(oldcount < 0))
  175. rwsem_downgrade_wake(sem);
  176. }
  177. #endif /* __KERNEL__ */
  178. #endif /* _ALPHA_RWSEM_H */