rwsem.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. #ifndef _S390_RWSEM_H
  2. #define _S390_RWSEM_H
  3. /*
  4. * include/asm-s390/rwsem.h
  5. *
  6. * S390 version
  7. * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
  8. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  11. */
  12. /*
  13. *
  14. * The MSW of the count is the negated number of active writers and waiting
  15. * lockers, and the LSW is the total number of active locks
  16. *
  17. * The lock count is initialized to 0 (no active and no waiting lockers).
  18. *
  19. * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  20. * uncontended lock. This can be determined because XADD returns the old value.
  21. * Readers increment by 1 and see a positive value when uncontended, negative
  22. * if there are writers (and maybe) readers waiting (in which case it goes to
  23. * sleep).
  24. *
  25. * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  26. * be extended to 65534 by manually checking the whole MSW rather than relying
  27. * on the S flag.
  28. *
  29. * The value of ACTIVE_BIAS supports up to 65535 active processes.
  30. *
  31. * This should be totally fair - if anything is waiting, a process that wants a
  32. * lock will go to the back of the queue. When the currently active lock is
  33. * released, if there's a writer at the front of the queue, then that and only
  34. * that will be woken up; if there's a bunch of consequtive readers at the
  35. * front, then they'll all be woken up, but no other readers will be.
  36. */
  37. #ifndef _LINUX_RWSEM_H
  38. #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  39. #endif
  40. #ifdef __KERNEL__
  41. #ifndef __s390x__
  42. #define RWSEM_UNLOCKED_VALUE 0x00000000
  43. #define RWSEM_ACTIVE_BIAS 0x00000001
  44. #define RWSEM_ACTIVE_MASK 0x0000ffff
  45. #define RWSEM_WAITING_BIAS (-0x00010000)
  46. #else /* __s390x__ */
  47. #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
  48. #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
  49. #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
  50. #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
  51. #endif /* __s390x__ */
  52. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  53. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  54. /*
  55. * lock for reading
  56. */
  57. static inline void __down_read(struct rw_semaphore *sem)
  58. {
  59. signed long old, new;
  60. asm volatile(
  61. #ifndef __s390x__
  62. " l %0,%2\n"
  63. "0: lr %1,%0\n"
  64. " ahi %1,%4\n"
  65. " cs %0,%1,%2\n"
  66. " jl 0b"
  67. #else /* __s390x__ */
  68. " lg %0,%2\n"
  69. "0: lgr %1,%0\n"
  70. " aghi %1,%4\n"
  71. " csg %0,%1,%2\n"
  72. " jl 0b"
  73. #endif /* __s390x__ */
  74. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  75. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  76. : "cc", "memory");
  77. if (old < 0)
  78. rwsem_down_read_failed(sem);
  79. }
  80. /*
  81. * trylock for reading -- returns 1 if successful, 0 if contention
  82. */
  83. static inline int __down_read_trylock(struct rw_semaphore *sem)
  84. {
  85. signed long old, new;
  86. asm volatile(
  87. #ifndef __s390x__
  88. " l %0,%2\n"
  89. "0: ltr %1,%0\n"
  90. " jm 1f\n"
  91. " ahi %1,%4\n"
  92. " cs %0,%1,%2\n"
  93. " jl 0b\n"
  94. "1:"
  95. #else /* __s390x__ */
  96. " lg %0,%2\n"
  97. "0: ltgr %1,%0\n"
  98. " jm 1f\n"
  99. " aghi %1,%4\n"
  100. " csg %0,%1,%2\n"
  101. " jl 0b\n"
  102. "1:"
  103. #endif /* __s390x__ */
  104. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  105. : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
  106. : "cc", "memory");
  107. return old >= 0 ? 1 : 0;
  108. }
  109. /*
  110. * lock for writing
  111. */
  112. static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  113. {
  114. signed long old, new, tmp;
  115. tmp = RWSEM_ACTIVE_WRITE_BIAS;
  116. asm volatile(
  117. #ifndef __s390x__
  118. " l %0,%2\n"
  119. "0: lr %1,%0\n"
  120. " a %1,%4\n"
  121. " cs %0,%1,%2\n"
  122. " jl 0b"
  123. #else /* __s390x__ */
  124. " lg %0,%2\n"
  125. "0: lgr %1,%0\n"
  126. " ag %1,%4\n"
  127. " csg %0,%1,%2\n"
  128. " jl 0b"
  129. #endif /* __s390x__ */
  130. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  131. : "Q" (sem->count), "m" (tmp)
  132. : "cc", "memory");
  133. if (old != 0)
  134. rwsem_down_write_failed(sem);
  135. }
  136. static inline void __down_write(struct rw_semaphore *sem)
  137. {
  138. __down_write_nested(sem, 0);
  139. }
  140. /*
  141. * trylock for writing -- returns 1 if successful, 0 if contention
  142. */
  143. static inline int __down_write_trylock(struct rw_semaphore *sem)
  144. {
  145. signed long old;
  146. asm volatile(
  147. #ifndef __s390x__
  148. " l %0,%1\n"
  149. "0: ltr %0,%0\n"
  150. " jnz 1f\n"
  151. " cs %0,%3,%1\n"
  152. " jl 0b\n"
  153. #else /* __s390x__ */
  154. " lg %0,%1\n"
  155. "0: ltgr %0,%0\n"
  156. " jnz 1f\n"
  157. " csg %0,%3,%1\n"
  158. " jl 0b\n"
  159. #endif /* __s390x__ */
  160. "1:"
  161. : "=&d" (old), "=Q" (sem->count)
  162. : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
  163. : "cc", "memory");
  164. return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
  165. }
  166. /*
  167. * unlock after reading
  168. */
  169. static inline void __up_read(struct rw_semaphore *sem)
  170. {
  171. signed long old, new;
  172. asm volatile(
  173. #ifndef __s390x__
  174. " l %0,%2\n"
  175. "0: lr %1,%0\n"
  176. " ahi %1,%4\n"
  177. " cs %0,%1,%2\n"
  178. " jl 0b"
  179. #else /* __s390x__ */
  180. " lg %0,%2\n"
  181. "0: lgr %1,%0\n"
  182. " aghi %1,%4\n"
  183. " csg %0,%1,%2\n"
  184. " jl 0b"
  185. #endif /* __s390x__ */
  186. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  187. : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
  188. : "cc", "memory");
  189. if (new < 0)
  190. if ((new & RWSEM_ACTIVE_MASK) == 0)
  191. rwsem_wake(sem);
  192. }
  193. /*
  194. * unlock after writing
  195. */
  196. static inline void __up_write(struct rw_semaphore *sem)
  197. {
  198. signed long old, new, tmp;
  199. tmp = -RWSEM_ACTIVE_WRITE_BIAS;
  200. asm volatile(
  201. #ifndef __s390x__
  202. " l %0,%2\n"
  203. "0: lr %1,%0\n"
  204. " a %1,%4\n"
  205. " cs %0,%1,%2\n"
  206. " jl 0b"
  207. #else /* __s390x__ */
  208. " lg %0,%2\n"
  209. "0: lgr %1,%0\n"
  210. " ag %1,%4\n"
  211. " csg %0,%1,%2\n"
  212. " jl 0b"
  213. #endif /* __s390x__ */
  214. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  215. : "Q" (sem->count), "m" (tmp)
  216. : "cc", "memory");
  217. if (new < 0)
  218. if ((new & RWSEM_ACTIVE_MASK) == 0)
  219. rwsem_wake(sem);
  220. }
  221. /*
  222. * downgrade write lock to read lock
  223. */
  224. static inline void __downgrade_write(struct rw_semaphore *sem)
  225. {
  226. signed long old, new, tmp;
  227. tmp = -RWSEM_WAITING_BIAS;
  228. asm volatile(
  229. #ifndef __s390x__
  230. " l %0,%2\n"
  231. "0: lr %1,%0\n"
  232. " a %1,%4\n"
  233. " cs %0,%1,%2\n"
  234. " jl 0b"
  235. #else /* __s390x__ */
  236. " lg %0,%2\n"
  237. "0: lgr %1,%0\n"
  238. " ag %1,%4\n"
  239. " csg %0,%1,%2\n"
  240. " jl 0b"
  241. #endif /* __s390x__ */
  242. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  243. : "Q" (sem->count), "m" (tmp)
  244. : "cc", "memory");
  245. if (new > 1)
  246. rwsem_downgrade_wake(sem);
  247. }
  248. /*
  249. * implement atomic add functionality
  250. */
  251. static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
  252. {
  253. signed long old, new;
  254. asm volatile(
  255. #ifndef __s390x__
  256. " l %0,%2\n"
  257. "0: lr %1,%0\n"
  258. " ar %1,%4\n"
  259. " cs %0,%1,%2\n"
  260. " jl 0b"
  261. #else /* __s390x__ */
  262. " lg %0,%2\n"
  263. "0: lgr %1,%0\n"
  264. " agr %1,%4\n"
  265. " csg %0,%1,%2\n"
  266. " jl 0b"
  267. #endif /* __s390x__ */
  268. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  269. : "Q" (sem->count), "d" (delta)
  270. : "cc", "memory");
  271. }
  272. /*
  273. * implement exchange and add functionality
  274. */
  275. static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
  276. {
  277. signed long old, new;
  278. asm volatile(
  279. #ifndef __s390x__
  280. " l %0,%2\n"
  281. "0: lr %1,%0\n"
  282. " ar %1,%4\n"
  283. " cs %0,%1,%2\n"
  284. " jl 0b"
  285. #else /* __s390x__ */
  286. " lg %0,%2\n"
  287. "0: lgr %1,%0\n"
  288. " agr %1,%4\n"
  289. " csg %0,%1,%2\n"
  290. " jl 0b"
  291. #endif /* __s390x__ */
  292. : "=&d" (old), "=&d" (new), "=Q" (sem->count)
  293. : "Q" (sem->count), "d" (delta)
  294. : "cc", "memory");
  295. return new;
  296. }
  297. #endif /* __KERNEL__ */
  298. #endif /* _S390_RWSEM_H */