atomic.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. * Copyright IBM Corp. 1999, 2009
  3. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  4. * Denis Joseph Barrow,
  5. * Arnd Bergmann <arndb@de.ibm.com>,
  6. *
  7. * Atomic operations that C can't guarantee us.
  8. * Useful for resource counting etc.
  9. * s390 uses 'Compare And Swap' for atomicity in SMP environment.
  10. *
  11. */
  12. #ifndef __ARCH_S390_ATOMIC__
  13. #define __ARCH_S390_ATOMIC__
  14. #include <linux/compiler.h>
  15. #include <linux/types.h>
  16. #include <asm/barrier.h>
  17. #include <asm/cmpxchg.h>
  18. #define ATOMIC_INIT(i) { (i) }
  19. #define __ATOMIC_NO_BARRIER "\n"
  20. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  21. #define __ATOMIC_OR "lao"
  22. #define __ATOMIC_AND "lan"
  23. #define __ATOMIC_ADD "laa"
  24. #define __ATOMIC_XOR "lax"
  25. #define __ATOMIC_BARRIER "bcr 14,0\n"
  26. #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
  27. ({ \
  28. int old_val; \
  29. \
  30. typecheck(atomic_t *, ptr); \
  31. asm volatile( \
  32. op_string " %0,%2,%1\n" \
  33. __barrier \
  34. : "=d" (old_val), "+Q" ((ptr)->counter) \
  35. : "d" (op_val) \
  36. : "cc", "memory"); \
  37. old_val; \
  38. })
  39. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  40. #define __ATOMIC_OR "or"
  41. #define __ATOMIC_AND "nr"
  42. #define __ATOMIC_ADD "ar"
  43. #define __ATOMIC_XOR "xr"
  44. #define __ATOMIC_BARRIER "\n"
  45. #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
  46. ({ \
  47. int old_val, new_val; \
  48. \
  49. typecheck(atomic_t *, ptr); \
  50. asm volatile( \
  51. " l %0,%2\n" \
  52. "0: lr %1,%0\n" \
  53. op_string " %1,%3\n" \
  54. " cs %0,%1,%2\n" \
  55. " jl 0b" \
  56. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  57. : "d" (op_val) \
  58. : "cc", "memory"); \
  59. old_val; \
  60. })
  61. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  62. static inline int atomic_read(const atomic_t *v)
  63. {
  64. int c;
  65. asm volatile(
  66. " l %0,%1\n"
  67. : "=d" (c) : "Q" (v->counter));
  68. return c;
  69. }
  70. static inline void atomic_set(atomic_t *v, int i)
  71. {
  72. asm volatile(
  73. " st %1,%0\n"
  74. : "=Q" (v->counter) : "d" (i));
  75. }
  76. static inline int atomic_add_return(int i, atomic_t *v)
  77. {
  78. return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
  79. }
  80. static inline int atomic_fetch_add(int i, atomic_t *v)
  81. {
  82. return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
  83. }
  84. static inline void atomic_add(int i, atomic_t *v)
  85. {
  86. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  87. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  88. asm volatile(
  89. "asi %0,%1\n"
  90. : "+Q" (v->counter)
  91. : "i" (i)
  92. : "cc", "memory");
  93. return;
  94. }
  95. #endif
  96. __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
  97. }
  98. #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
  99. #define atomic_inc(_v) atomic_add(1, _v)
  100. #define atomic_inc_return(_v) atomic_add_return(1, _v)
  101. #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
  102. #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
  103. #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
  104. #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
  105. #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
  106. #define atomic_dec(_v) atomic_sub(1, _v)
  107. #define atomic_dec_return(_v) atomic_sub_return(1, _v)
  108. #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
  109. #define ATOMIC_OPS(op, OP) \
  110. static inline void atomic_##op(int i, atomic_t *v) \
  111. { \
  112. __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
  113. } \
  114. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  115. { \
  116. return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
  117. }
  118. ATOMIC_OPS(and, AND)
  119. ATOMIC_OPS(or, OR)
  120. ATOMIC_OPS(xor, XOR)
  121. #undef ATOMIC_OPS
  122. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  123. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  124. {
  125. asm volatile(
  126. " cs %0,%2,%1"
  127. : "+d" (old), "+Q" (v->counter)
  128. : "d" (new)
  129. : "cc", "memory");
  130. return old;
  131. }
  132. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  133. {
  134. int c, old;
  135. c = atomic_read(v);
  136. for (;;) {
  137. if (unlikely(c == u))
  138. break;
  139. old = atomic_cmpxchg(v, c, c + a);
  140. if (likely(old == c))
  141. break;
  142. c = old;
  143. }
  144. return c;
  145. }
  146. #undef __ATOMIC_LOOP
  147. #define ATOMIC64_INIT(i) { (i) }
  148. #define __ATOMIC64_NO_BARRIER "\n"
  149. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  150. #define __ATOMIC64_OR "laog"
  151. #define __ATOMIC64_AND "lang"
  152. #define __ATOMIC64_ADD "laag"
  153. #define __ATOMIC64_XOR "laxg"
  154. #define __ATOMIC64_BARRIER "bcr 14,0\n"
  155. #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
  156. ({ \
  157. long long old_val; \
  158. \
  159. typecheck(atomic64_t *, ptr); \
  160. asm volatile( \
  161. op_string " %0,%2,%1\n" \
  162. __barrier \
  163. : "=d" (old_val), "+Q" ((ptr)->counter) \
  164. : "d" (op_val) \
  165. : "cc", "memory"); \
  166. old_val; \
  167. })
  168. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  169. #define __ATOMIC64_OR "ogr"
  170. #define __ATOMIC64_AND "ngr"
  171. #define __ATOMIC64_ADD "agr"
  172. #define __ATOMIC64_XOR "xgr"
  173. #define __ATOMIC64_BARRIER "\n"
  174. #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
  175. ({ \
  176. long long old_val, new_val; \
  177. \
  178. typecheck(atomic64_t *, ptr); \
  179. asm volatile( \
  180. " lg %0,%2\n" \
  181. "0: lgr %1,%0\n" \
  182. op_string " %1,%3\n" \
  183. " csg %0,%1,%2\n" \
  184. " jl 0b" \
  185. : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  186. : "d" (op_val) \
  187. : "cc", "memory"); \
  188. old_val; \
  189. })
  190. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  191. static inline long long atomic64_read(const atomic64_t *v)
  192. {
  193. long long c;
  194. asm volatile(
  195. " lg %0,%1\n"
  196. : "=d" (c) : "Q" (v->counter));
  197. return c;
  198. }
  199. static inline void atomic64_set(atomic64_t *v, long long i)
  200. {
  201. asm volatile(
  202. " stg %1,%0\n"
  203. : "=Q" (v->counter) : "d" (i));
  204. }
  205. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  206. {
  207. return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
  208. }
  209. static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
  210. {
  211. return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
  212. }
  213. static inline void atomic64_add(long long i, atomic64_t *v)
  214. {
  215. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  216. if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
  217. asm volatile(
  218. "agsi %0,%1\n"
  219. : "+Q" (v->counter)
  220. : "i" (i)
  221. : "cc", "memory");
  222. return;
  223. }
  224. #endif
  225. __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
  226. }
  227. #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
  228. static inline long long atomic64_cmpxchg(atomic64_t *v,
  229. long long old, long long new)
  230. {
  231. asm volatile(
  232. " csg %0,%2,%1"
  233. : "+d" (old), "+Q" (v->counter)
  234. : "d" (new)
  235. : "cc", "memory");
  236. return old;
  237. }
  238. #define ATOMIC64_OPS(op, OP) \
  239. static inline void atomic64_##op(long i, atomic64_t *v) \
  240. { \
  241. __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
  242. } \
  243. static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
  244. { \
  245. return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
  246. }
  247. ATOMIC64_OPS(and, AND)
  248. ATOMIC64_OPS(or, OR)
  249. ATOMIC64_OPS(xor, XOR)
  250. #undef ATOMIC64_OPS
  251. #undef __ATOMIC64_LOOP
  252. static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
  253. {
  254. long long c, old;
  255. c = atomic64_read(v);
  256. for (;;) {
  257. if (unlikely(c == u))
  258. break;
  259. old = atomic64_cmpxchg(v, c, c + i);
  260. if (likely(old == c))
  261. break;
  262. c = old;
  263. }
  264. return c != u;
  265. }
  266. static inline long long atomic64_dec_if_positive(atomic64_t *v)
  267. {
  268. long long c, old, dec;
  269. c = atomic64_read(v);
  270. for (;;) {
  271. dec = c - 1;
  272. if (unlikely(dec < 0))
  273. break;
  274. old = atomic64_cmpxchg((v), c, dec);
  275. if (likely(old == c))
  276. break;
  277. c = old;
  278. }
  279. return dec;
  280. }
  281. #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
  282. #define atomic64_inc(_v) atomic64_add(1, _v)
  283. #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
  284. #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
  285. #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
  286. #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
  287. #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
  288. #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
  289. #define atomic64_dec(_v) atomic64_sub(1, _v)
  290. #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
  291. #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
  292. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  293. #endif /* __ARCH_S390_ATOMIC__ */