cmpxchg.h 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. #ifndef _ASM_POWERPC_CMPXCHG_H_
  2. #define _ASM_POWERPC_CMPXCHG_H_
  3. #ifdef __KERNEL__
  4. #include <linux/compiler.h>
  5. #include <asm/synch.h>
  6. #include <asm/asm-compat.h>
  7. #include <linux/bug.h>
  8. /*
  9. * Atomic exchange
  10. *
  11. * Changes the memory location '*p' to be val and returns
  12. * the previous value stored there.
  13. */
  14. static __always_inline unsigned long
  15. __xchg_u32_local(volatile void *p, unsigned long val)
  16. {
  17. unsigned long prev;
  18. __asm__ __volatile__(
  19. "1: lwarx %0,0,%2 \n"
  20. PPC405_ERR77(0,%2)
  21. " stwcx. %3,0,%2 \n\
  22. bne- 1b"
  23. : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
  24. : "r" (p), "r" (val)
  25. : "cc", "memory");
  26. return prev;
  27. }
  28. static __always_inline unsigned long
  29. __xchg_u32_relaxed(u32 *p, unsigned long val)
  30. {
  31. unsigned long prev;
  32. __asm__ __volatile__(
  33. "1: lwarx %0,0,%2\n"
  34. PPC405_ERR77(0, %2)
  35. " stwcx. %3,0,%2\n"
  36. " bne- 1b"
  37. : "=&r" (prev), "+m" (*p)
  38. : "r" (p), "r" (val)
  39. : "cc");
  40. return prev;
  41. }
  42. #ifdef CONFIG_PPC64
  43. static __always_inline unsigned long
  44. __xchg_u64_local(volatile void *p, unsigned long val)
  45. {
  46. unsigned long prev;
  47. __asm__ __volatile__(
  48. "1: ldarx %0,0,%2 \n"
  49. PPC405_ERR77(0,%2)
  50. " stdcx. %3,0,%2 \n\
  51. bne- 1b"
  52. : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
  53. : "r" (p), "r" (val)
  54. : "cc", "memory");
  55. return prev;
  56. }
  57. static __always_inline unsigned long
  58. __xchg_u64_relaxed(u64 *p, unsigned long val)
  59. {
  60. unsigned long prev;
  61. __asm__ __volatile__(
  62. "1: ldarx %0,0,%2\n"
  63. PPC405_ERR77(0, %2)
  64. " stdcx. %3,0,%2\n"
  65. " bne- 1b"
  66. : "=&r" (prev), "+m" (*p)
  67. : "r" (p), "r" (val)
  68. : "cc");
  69. return prev;
  70. }
  71. #endif
  72. static __always_inline unsigned long
  73. __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
  74. {
  75. switch (size) {
  76. case 4:
  77. return __xchg_u32_local(ptr, x);
  78. #ifdef CONFIG_PPC64
  79. case 8:
  80. return __xchg_u64_local(ptr, x);
  81. #endif
  82. }
  83. BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
  84. return x;
  85. }
  86. static __always_inline unsigned long
  87. __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
  88. {
  89. switch (size) {
  90. case 4:
  91. return __xchg_u32_relaxed(ptr, x);
  92. #ifdef CONFIG_PPC64
  93. case 8:
  94. return __xchg_u64_relaxed(ptr, x);
  95. #endif
  96. }
  97. BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
  98. return x;
  99. }
  100. #define xchg_local(ptr,x) \
  101. ({ \
  102. __typeof__(*(ptr)) _x_ = (x); \
  103. (__typeof__(*(ptr))) __xchg_local((ptr), \
  104. (unsigned long)_x_, sizeof(*(ptr))); \
  105. })
  106. #define xchg_relaxed(ptr, x) \
  107. ({ \
  108. __typeof__(*(ptr)) _x_ = (x); \
  109. (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
  110. (unsigned long)_x_, sizeof(*(ptr))); \
  111. })
  112. /*
  113. * Compare and exchange - if *p == old, set it to new,
  114. * and return the old value of *p.
  115. */
  116. static __always_inline unsigned long
  117. __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
  118. {
  119. unsigned int prev;
  120. __asm__ __volatile__ (
  121. PPC_ATOMIC_ENTRY_BARRIER
  122. "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
  123. cmpw 0,%0,%3\n\
  124. bne- 2f\n"
  125. PPC405_ERR77(0,%2)
  126. " stwcx. %4,0,%2\n\
  127. bne- 1b"
  128. PPC_ATOMIC_EXIT_BARRIER
  129. "\n\
  130. 2:"
  131. : "=&r" (prev), "+m" (*p)
  132. : "r" (p), "r" (old), "r" (new)
  133. : "cc", "memory");
  134. return prev;
  135. }
  136. static __always_inline unsigned long
  137. __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
  138. unsigned long new)
  139. {
  140. unsigned int prev;
  141. __asm__ __volatile__ (
  142. "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
  143. cmpw 0,%0,%3\n\
  144. bne- 2f\n"
  145. PPC405_ERR77(0,%2)
  146. " stwcx. %4,0,%2\n\
  147. bne- 1b"
  148. "\n\
  149. 2:"
  150. : "=&r" (prev), "+m" (*p)
  151. : "r" (p), "r" (old), "r" (new)
  152. : "cc", "memory");
  153. return prev;
  154. }
  155. static __always_inline unsigned long
  156. __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
  157. {
  158. unsigned long prev;
  159. __asm__ __volatile__ (
  160. "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
  161. " cmpw 0,%0,%3\n"
  162. " bne- 2f\n"
  163. PPC405_ERR77(0, %2)
  164. " stwcx. %4,0,%2\n"
  165. " bne- 1b\n"
  166. "2:"
  167. : "=&r" (prev), "+m" (*p)
  168. : "r" (p), "r" (old), "r" (new)
  169. : "cc");
  170. return prev;
  171. }
  172. /*
  173. * cmpxchg family don't have order guarantee if cmp part fails, therefore we
  174. * can avoid superfluous barriers if we use assembly code to implement
  175. * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
  176. * cmpxchg_release() because that will result in putting a barrier in the
  177. * middle of a ll/sc loop, which is probably a bad idea. For example, this
  178. * might cause the conditional store more likely to fail.
  179. */
  180. static __always_inline unsigned long
  181. __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
  182. {
  183. unsigned long prev;
  184. __asm__ __volatile__ (
  185. "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
  186. " cmpw 0,%0,%3\n"
  187. " bne- 2f\n"
  188. PPC405_ERR77(0, %2)
  189. " stwcx. %4,0,%2\n"
  190. " bne- 1b\n"
  191. PPC_ACQUIRE_BARRIER
  192. "\n"
  193. "2:"
  194. : "=&r" (prev), "+m" (*p)
  195. : "r" (p), "r" (old), "r" (new)
  196. : "cc", "memory");
  197. return prev;
  198. }
  199. #ifdef CONFIG_PPC64
  200. static __always_inline unsigned long
  201. __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
  202. {
  203. unsigned long prev;
  204. __asm__ __volatile__ (
  205. PPC_ATOMIC_ENTRY_BARRIER
  206. "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
  207. cmpd 0,%0,%3\n\
  208. bne- 2f\n\
  209. stdcx. %4,0,%2\n\
  210. bne- 1b"
  211. PPC_ATOMIC_EXIT_BARRIER
  212. "\n\
  213. 2:"
  214. : "=&r" (prev), "+m" (*p)
  215. : "r" (p), "r" (old), "r" (new)
  216. : "cc", "memory");
  217. return prev;
  218. }
  219. static __always_inline unsigned long
  220. __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
  221. unsigned long new)
  222. {
  223. unsigned long prev;
  224. __asm__ __volatile__ (
  225. "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
  226. cmpd 0,%0,%3\n\
  227. bne- 2f\n\
  228. stdcx. %4,0,%2\n\
  229. bne- 1b"
  230. "\n\
  231. 2:"
  232. : "=&r" (prev), "+m" (*p)
  233. : "r" (p), "r" (old), "r" (new)
  234. : "cc", "memory");
  235. return prev;
  236. }
  237. static __always_inline unsigned long
  238. __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
  239. {
  240. unsigned long prev;
  241. __asm__ __volatile__ (
  242. "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
  243. " cmpd 0,%0,%3\n"
  244. " bne- 2f\n"
  245. " stdcx. %4,0,%2\n"
  246. " bne- 1b\n"
  247. "2:"
  248. : "=&r" (prev), "+m" (*p)
  249. : "r" (p), "r" (old), "r" (new)
  250. : "cc");
  251. return prev;
  252. }
  253. static __always_inline unsigned long
  254. __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
  255. {
  256. unsigned long prev;
  257. __asm__ __volatile__ (
  258. "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
  259. " cmpd 0,%0,%3\n"
  260. " bne- 2f\n"
  261. " stdcx. %4,0,%2\n"
  262. " bne- 1b\n"
  263. PPC_ACQUIRE_BARRIER
  264. "\n"
  265. "2:"
  266. : "=&r" (prev), "+m" (*p)
  267. : "r" (p), "r" (old), "r" (new)
  268. : "cc", "memory");
  269. return prev;
  270. }
  271. #endif
  272. static __always_inline unsigned long
  273. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
  274. unsigned int size)
  275. {
  276. switch (size) {
  277. case 4:
  278. return __cmpxchg_u32(ptr, old, new);
  279. #ifdef CONFIG_PPC64
  280. case 8:
  281. return __cmpxchg_u64(ptr, old, new);
  282. #endif
  283. }
  284. BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
  285. return old;
  286. }
  287. static __always_inline unsigned long
  288. __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
  289. unsigned int size)
  290. {
  291. switch (size) {
  292. case 4:
  293. return __cmpxchg_u32_local(ptr, old, new);
  294. #ifdef CONFIG_PPC64
  295. case 8:
  296. return __cmpxchg_u64_local(ptr, old, new);
  297. #endif
  298. }
  299. BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
  300. return old;
  301. }
  302. static __always_inline unsigned long
  303. __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
  304. unsigned int size)
  305. {
  306. switch (size) {
  307. case 4:
  308. return __cmpxchg_u32_relaxed(ptr, old, new);
  309. #ifdef CONFIG_PPC64
  310. case 8:
  311. return __cmpxchg_u64_relaxed(ptr, old, new);
  312. #endif
  313. }
  314. BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
  315. return old;
  316. }
  317. static __always_inline unsigned long
  318. __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
  319. unsigned int size)
  320. {
  321. switch (size) {
  322. case 4:
  323. return __cmpxchg_u32_acquire(ptr, old, new);
  324. #ifdef CONFIG_PPC64
  325. case 8:
  326. return __cmpxchg_u64_acquire(ptr, old, new);
  327. #endif
  328. }
  329. BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
  330. return old;
  331. }
  332. #define cmpxchg(ptr, o, n) \
  333. ({ \
  334. __typeof__(*(ptr)) _o_ = (o); \
  335. __typeof__(*(ptr)) _n_ = (n); \
  336. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  337. (unsigned long)_n_, sizeof(*(ptr))); \
  338. })
  339. #define cmpxchg_local(ptr, o, n) \
  340. ({ \
  341. __typeof__(*(ptr)) _o_ = (o); \
  342. __typeof__(*(ptr)) _n_ = (n); \
  343. (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
  344. (unsigned long)_n_, sizeof(*(ptr))); \
  345. })
  346. #define cmpxchg_relaxed(ptr, o, n) \
  347. ({ \
  348. __typeof__(*(ptr)) _o_ = (o); \
  349. __typeof__(*(ptr)) _n_ = (n); \
  350. (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
  351. (unsigned long)_o_, (unsigned long)_n_, \
  352. sizeof(*(ptr))); \
  353. })
  354. #define cmpxchg_acquire(ptr, o, n) \
  355. ({ \
  356. __typeof__(*(ptr)) _o_ = (o); \
  357. __typeof__(*(ptr)) _n_ = (n); \
  358. (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
  359. (unsigned long)_o_, (unsigned long)_n_, \
  360. sizeof(*(ptr))); \
  361. })
  362. #ifdef CONFIG_PPC64
  363. #define cmpxchg64(ptr, o, n) \
  364. ({ \
  365. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  366. cmpxchg((ptr), (o), (n)); \
  367. })
  368. #define cmpxchg64_local(ptr, o, n) \
  369. ({ \
  370. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  371. cmpxchg_local((ptr), (o), (n)); \
  372. })
  373. #define cmpxchg64_relaxed(ptr, o, n) \
  374. ({ \
  375. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  376. cmpxchg_relaxed((ptr), (o), (n)); \
  377. })
  378. #define cmpxchg64_acquire(ptr, o, n) \
  379. ({ \
  380. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  381. cmpxchg_acquire((ptr), (o), (n)); \
  382. })
  383. #else
  384. #include <asm-generic/cmpxchg-local.h>
  385. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  386. #endif
  387. #endif /* __KERNEL__ */
  388. #endif /* _ASM_POWERPC_CMPXCHG_H_ */