cmpxchg.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. /*
  2. * Based on arch/arm/include/asm/cmpxchg.h
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #ifndef __ASM_CMPXCHG_H
  19. #define __ASM_CMPXCHG_H
  20. #include <linux/bug.h>
  21. #include <asm/atomic.h>
  22. #include <asm/barrier.h>
  23. #include <asm/lse.h>
  24. /*
  25. * We need separate acquire parameters for ll/sc and lse, since the full
  26. * barrier case is generated as release+dmb for the former and
  27. * acquire+release for the latter.
  28. */
  29. #define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
  30. static inline unsigned long __xchg_case_##name(unsigned long x, \
  31. volatile void *ptr) \
  32. { \
  33. unsigned long ret, tmp; \
  34. \
  35. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  36. /* LL/SC */ \
  37. " prfm pstl1strm, %2\n" \
  38. "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
  39. " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
  40. " cbnz %w1, 1b\n" \
  41. " " #mb, \
  42. /* LSE atomics */ \
  43. " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
  44. __nops(3) \
  45. " " #nop_lse) \
  46. : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
  47. : "r" (x) \
  48. : cl); \
  49. \
  50. return ret; \
  51. }
  52. __XCHG_CASE(w, b, 1, , , , , , )
  53. __XCHG_CASE(w, h, 2, , , , , , )
  54. __XCHG_CASE(w, , 4, , , , , , )
  55. __XCHG_CASE( , , 8, , , , , , )
  56. __XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
  57. __XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
  58. __XCHG_CASE(w, , acq_4, , , a, a, , "memory")
  59. __XCHG_CASE( , , acq_8, , , a, a, , "memory")
  60. __XCHG_CASE(w, b, rel_1, , , , , l, "memory")
  61. __XCHG_CASE(w, h, rel_2, , , , , l, "memory")
  62. __XCHG_CASE(w, , rel_4, , , , , l, "memory")
  63. __XCHG_CASE( , , rel_8, , , , , l, "memory")
  64. __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
  65. __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
  66. __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
  67. __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
  68. #undef __XCHG_CASE
  69. #define __XCHG_GEN(sfx) \
  70. static inline unsigned long __xchg##sfx(unsigned long x, \
  71. volatile void *ptr, \
  72. int size) \
  73. { \
  74. switch (size) { \
  75. case 1: \
  76. return __xchg_case##sfx##_1(x, ptr); \
  77. case 2: \
  78. return __xchg_case##sfx##_2(x, ptr); \
  79. case 4: \
  80. return __xchg_case##sfx##_4(x, ptr); \
  81. case 8: \
  82. return __xchg_case##sfx##_8(x, ptr); \
  83. default: \
  84. BUILD_BUG(); \
  85. } \
  86. \
  87. unreachable(); \
  88. }
  89. __XCHG_GEN()
  90. __XCHG_GEN(_acq)
  91. __XCHG_GEN(_rel)
  92. __XCHG_GEN(_mb)
  93. #undef __XCHG_GEN
  94. #define __xchg_wrapper(sfx, ptr, x) \
  95. ({ \
  96. __typeof__(*(ptr)) __ret; \
  97. __ret = (__typeof__(*(ptr))) \
  98. __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
  99. __ret; \
  100. })
  101. /* xchg */
  102. #define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
  103. #define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
  104. #define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
  105. #define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
  106. #define __CMPXCHG_GEN(sfx) \
  107. static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
  108. unsigned long old, \
  109. unsigned long new, \
  110. int size) \
  111. { \
  112. switch (size) { \
  113. case 1: \
  114. return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
  115. case 2: \
  116. return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
  117. case 4: \
  118. return __cmpxchg_case##sfx##_4(ptr, old, new); \
  119. case 8: \
  120. return __cmpxchg_case##sfx##_8(ptr, old, new); \
  121. default: \
  122. BUILD_BUG(); \
  123. } \
  124. \
  125. unreachable(); \
  126. }
  127. __CMPXCHG_GEN()
  128. __CMPXCHG_GEN(_acq)
  129. __CMPXCHG_GEN(_rel)
  130. __CMPXCHG_GEN(_mb)
  131. #undef __CMPXCHG_GEN
  132. #define __cmpxchg_wrapper(sfx, ptr, o, n) \
  133. ({ \
  134. __typeof__(*(ptr)) __ret; \
  135. __ret = (__typeof__(*(ptr))) \
  136. __cmpxchg##sfx((ptr), (unsigned long)(o), \
  137. (unsigned long)(n), sizeof(*(ptr))); \
  138. __ret; \
  139. })
  140. /* cmpxchg */
  141. #define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
  142. #define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
  143. #define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
  144. #define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
  145. #define cmpxchg_local cmpxchg_relaxed
  146. /* cmpxchg64 */
  147. #define cmpxchg64_relaxed cmpxchg_relaxed
  148. #define cmpxchg64_acquire cmpxchg_acquire
  149. #define cmpxchg64_release cmpxchg_release
  150. #define cmpxchg64 cmpxchg
  151. #define cmpxchg64_local cmpxchg_local
  152. /* cmpxchg_double */
  153. #define system_has_cmpxchg_double() 1
  154. #define __cmpxchg_double_check(ptr1, ptr2) \
  155. ({ \
  156. if (sizeof(*(ptr1)) != 8) \
  157. BUILD_BUG(); \
  158. VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
  159. })
  160. #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
  161. ({\
  162. int __ret;\
  163. __cmpxchg_double_check(ptr1, ptr2); \
  164. __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
  165. (unsigned long)(n1), (unsigned long)(n2), \
  166. ptr1); \
  167. __ret; \
  168. })
  169. #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
  170. ({\
  171. int __ret;\
  172. __cmpxchg_double_check(ptr1, ptr2); \
  173. __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
  174. (unsigned long)(n1), (unsigned long)(n2), \
  175. ptr1); \
  176. __ret; \
  177. })
  178. /* this_cpu_cmpxchg */
  179. #define _protect_cmpxchg_local(pcp, o, n) \
  180. ({ \
  181. typeof(*raw_cpu_ptr(&(pcp))) __ret; \
  182. preempt_disable(); \
  183. __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
  184. preempt_enable(); \
  185. __ret; \
  186. })
  187. #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  188. #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  189. #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  190. #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
  191. #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
  192. ({ \
  193. int __ret; \
  194. preempt_disable(); \
  195. __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
  196. raw_cpu_ptr(&(ptr2)), \
  197. o1, o2, n1, n2); \
  198. preempt_enable(); \
  199. __ret; \
  200. })
  201. #define __CMPWAIT_CASE(w, sz, name) \
  202. static inline void __cmpwait_case_##name(volatile void *ptr, \
  203. unsigned long val) \
  204. { \
  205. unsigned long tmp; \
  206. \
  207. asm volatile( \
  208. " sevl\n" \
  209. " wfe\n" \
  210. " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
  211. " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
  212. " cbnz %" #w "[tmp], 1f\n" \
  213. " wfe\n" \
  214. "1:" \
  215. : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
  216. : [val] "r" (val)); \
  217. }
  218. __CMPWAIT_CASE(w, b, 1);
  219. __CMPWAIT_CASE(w, h, 2);
  220. __CMPWAIT_CASE(w, , 4);
  221. __CMPWAIT_CASE( , , 8);
  222. #undef __CMPWAIT_CASE
  223. #define __CMPWAIT_GEN(sfx) \
  224. static inline void __cmpwait##sfx(volatile void *ptr, \
  225. unsigned long val, \
  226. int size) \
  227. { \
  228. switch (size) { \
  229. case 1: \
  230. return __cmpwait_case##sfx##_1(ptr, (u8)val); \
  231. case 2: \
  232. return __cmpwait_case##sfx##_2(ptr, (u16)val); \
  233. case 4: \
  234. return __cmpwait_case##sfx##_4(ptr, val); \
  235. case 8: \
  236. return __cmpwait_case##sfx##_8(ptr, val); \
  237. default: \
  238. BUILD_BUG(); \
  239. } \
  240. \
  241. unreachable(); \
  242. }
  243. __CMPWAIT_GEN()
  244. #undef __CMPWAIT_GEN
  245. #define __cmpwait_relaxed(ptr, val) \
  246. __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
  247. #endif /* __ASM_CMPXCHG_H */