atomic_lse.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * Based on arch/arm/include/asm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. * Copyright (C) 2012 ARM Ltd.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef __ASM_ATOMIC_LSE_H
  21. #define __ASM_ATOMIC_LSE_H
  22. #ifndef __ARM64_IN_ATOMIC_IMPL
  23. #error "please don't include this file directly"
  24. #endif
  25. #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
  26. #define ATOMIC_OP(op, asm_op) \
  27. static inline void atomic_##op(int i, atomic_t *v) \
  28. { \
  29. register int w0 asm ("w0") = i; \
  30. register atomic_t *x1 asm ("x1") = v; \
  31. \
  32. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
  33. " " #asm_op " %w[i], %[v]\n") \
  34. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  35. : "r" (x1) \
  36. : __LL_SC_CLOBBERS); \
  37. }
  38. ATOMIC_OP(andnot, stclr)
  39. ATOMIC_OP(or, stset)
  40. ATOMIC_OP(xor, steor)
  41. ATOMIC_OP(add, stadd)
  42. #undef ATOMIC_OP
  43. #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
  44. static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
  45. { \
  46. register int w0 asm ("w0") = i; \
  47. register atomic_t *x1 asm ("x1") = v; \
  48. \
  49. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  50. /* LL/SC */ \
  51. __LL_SC_ATOMIC(fetch_##op##name), \
  52. /* LSE atomics */ \
  53. " " #asm_op #mb " %w[i], %w[i], %[v]") \
  54. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  55. : "r" (x1) \
  56. : __LL_SC_CLOBBERS, ##cl); \
  57. \
  58. return w0; \
  59. }
  60. #define ATOMIC_FETCH_OPS(op, asm_op) \
  61. ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \
  62. ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  63. ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \
  64. ATOMIC_FETCH_OP( , al, op, asm_op, "memory")
  65. ATOMIC_FETCH_OPS(andnot, ldclr)
  66. ATOMIC_FETCH_OPS(or, ldset)
  67. ATOMIC_FETCH_OPS(xor, ldeor)
  68. ATOMIC_FETCH_OPS(add, ldadd)
  69. #undef ATOMIC_FETCH_OP
  70. #undef ATOMIC_FETCH_OPS
  71. #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
  72. static inline int atomic_add_return##name(int i, atomic_t *v) \
  73. { \
  74. register int w0 asm ("w0") = i; \
  75. register atomic_t *x1 asm ("x1") = v; \
  76. \
  77. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  78. /* LL/SC */ \
  79. __LL_SC_ATOMIC(add_return##name) \
  80. __nops(1), \
  81. /* LSE atomics */ \
  82. " ldadd" #mb " %w[i], w30, %[v]\n" \
  83. " add %w[i], %w[i], w30") \
  84. : [i] "+r" (w0), [v] "+Q" (v->counter) \
  85. : "r" (x1) \
  86. : __LL_SC_CLOBBERS, ##cl); \
  87. \
  88. return w0; \
  89. }
  90. ATOMIC_OP_ADD_RETURN(_relaxed, )
  91. ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
  92. ATOMIC_OP_ADD_RETURN(_release, l, "memory")
  93. ATOMIC_OP_ADD_RETURN( , al, "memory")
  94. #undef ATOMIC_OP_ADD_RETURN
  95. static inline void atomic_and(int i, atomic_t *v)
  96. {
  97. register int w0 asm ("w0") = i;
  98. register atomic_t *x1 asm ("x1") = v;
  99. asm volatile(ARM64_LSE_ATOMIC_INSN(
  100. /* LL/SC */
  101. __LL_SC_ATOMIC(and)
  102. __nops(1),
  103. /* LSE atomics */
  104. " mvn %w[i], %w[i]\n"
  105. " stclr %w[i], %[v]")
  106. : [i] "+&r" (w0), [v] "+Q" (v->counter)
  107. : "r" (x1)
  108. : __LL_SC_CLOBBERS);
  109. }
  110. #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
  111. static inline int atomic_fetch_and##name(int i, atomic_t *v) \
  112. { \
  113. register int w0 asm ("w0") = i; \
  114. register atomic_t *x1 asm ("x1") = v; \
  115. \
  116. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  117. /* LL/SC */ \
  118. __LL_SC_ATOMIC(fetch_and##name) \
  119. __nops(1), \
  120. /* LSE atomics */ \
  121. " mvn %w[i], %w[i]\n" \
  122. " ldclr" #mb " %w[i], %w[i], %[v]") \
  123. : [i] "+&r" (w0), [v] "+Q" (v->counter) \
  124. : "r" (x1) \
  125. : __LL_SC_CLOBBERS, ##cl); \
  126. \
  127. return w0; \
  128. }
  129. ATOMIC_FETCH_OP_AND(_relaxed, )
  130. ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
  131. ATOMIC_FETCH_OP_AND(_release, l, "memory")
  132. ATOMIC_FETCH_OP_AND( , al, "memory")
  133. #undef ATOMIC_FETCH_OP_AND
  134. static inline void atomic_sub(int i, atomic_t *v)
  135. {
  136. register int w0 asm ("w0") = i;
  137. register atomic_t *x1 asm ("x1") = v;
  138. asm volatile(ARM64_LSE_ATOMIC_INSN(
  139. /* LL/SC */
  140. __LL_SC_ATOMIC(sub)
  141. __nops(1),
  142. /* LSE atomics */
  143. " neg %w[i], %w[i]\n"
  144. " stadd %w[i], %[v]")
  145. : [i] "+&r" (w0), [v] "+Q" (v->counter)
  146. : "r" (x1)
  147. : __LL_SC_CLOBBERS);
  148. }
  149. #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
  150. static inline int atomic_sub_return##name(int i, atomic_t *v) \
  151. { \
  152. register int w0 asm ("w0") = i; \
  153. register atomic_t *x1 asm ("x1") = v; \
  154. \
  155. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  156. /* LL/SC */ \
  157. __LL_SC_ATOMIC(sub_return##name) \
  158. __nops(2), \
  159. /* LSE atomics */ \
  160. " neg %w[i], %w[i]\n" \
  161. " ldadd" #mb " %w[i], w30, %[v]\n" \
  162. " add %w[i], %w[i], w30") \
  163. : [i] "+&r" (w0), [v] "+Q" (v->counter) \
  164. : "r" (x1) \
  165. : __LL_SC_CLOBBERS , ##cl); \
  166. \
  167. return w0; \
  168. }
  169. ATOMIC_OP_SUB_RETURN(_relaxed, )
  170. ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
  171. ATOMIC_OP_SUB_RETURN(_release, l, "memory")
  172. ATOMIC_OP_SUB_RETURN( , al, "memory")
  173. #undef ATOMIC_OP_SUB_RETURN
  174. #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
  175. static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
  176. { \
  177. register int w0 asm ("w0") = i; \
  178. register atomic_t *x1 asm ("x1") = v; \
  179. \
  180. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  181. /* LL/SC */ \
  182. __LL_SC_ATOMIC(fetch_sub##name) \
  183. __nops(1), \
  184. /* LSE atomics */ \
  185. " neg %w[i], %w[i]\n" \
  186. " ldadd" #mb " %w[i], %w[i], %[v]") \
  187. : [i] "+&r" (w0), [v] "+Q" (v->counter) \
  188. : "r" (x1) \
  189. : __LL_SC_CLOBBERS, ##cl); \
  190. \
  191. return w0; \
  192. }
  193. ATOMIC_FETCH_OP_SUB(_relaxed, )
  194. ATOMIC_FETCH_OP_SUB(_acquire, a, "memory")
  195. ATOMIC_FETCH_OP_SUB(_release, l, "memory")
  196. ATOMIC_FETCH_OP_SUB( , al, "memory")
  197. #undef ATOMIC_FETCH_OP_SUB
  198. #undef __LL_SC_ATOMIC
  199. #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
  200. #define ATOMIC64_OP(op, asm_op) \
  201. static inline void atomic64_##op(long i, atomic64_t *v) \
  202. { \
  203. register long x0 asm ("x0") = i; \
  204. register atomic64_t *x1 asm ("x1") = v; \
  205. \
  206. asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
  207. " " #asm_op " %[i], %[v]\n") \
  208. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  209. : "r" (x1) \
  210. : __LL_SC_CLOBBERS); \
  211. }
  212. ATOMIC64_OP(andnot, stclr)
  213. ATOMIC64_OP(or, stset)
  214. ATOMIC64_OP(xor, steor)
  215. ATOMIC64_OP(add, stadd)
  216. #undef ATOMIC64_OP
  217. #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
  218. static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
  219. { \
  220. register long x0 asm ("x0") = i; \
  221. register atomic64_t *x1 asm ("x1") = v; \
  222. \
  223. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  224. /* LL/SC */ \
  225. __LL_SC_ATOMIC64(fetch_##op##name), \
  226. /* LSE atomics */ \
  227. " " #asm_op #mb " %[i], %[i], %[v]") \
  228. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  229. : "r" (x1) \
  230. : __LL_SC_CLOBBERS, ##cl); \
  231. \
  232. return x0; \
  233. }
  234. #define ATOMIC64_FETCH_OPS(op, asm_op) \
  235. ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
  236. ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
  237. ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
  238. ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
  239. ATOMIC64_FETCH_OPS(andnot, ldclr)
  240. ATOMIC64_FETCH_OPS(or, ldset)
  241. ATOMIC64_FETCH_OPS(xor, ldeor)
  242. ATOMIC64_FETCH_OPS(add, ldadd)
  243. #undef ATOMIC64_FETCH_OP
  244. #undef ATOMIC64_FETCH_OPS
  245. #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
  246. static inline long atomic64_add_return##name(long i, atomic64_t *v) \
  247. { \
  248. register long x0 asm ("x0") = i; \
  249. register atomic64_t *x1 asm ("x1") = v; \
  250. \
  251. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  252. /* LL/SC */ \
  253. __LL_SC_ATOMIC64(add_return##name) \
  254. __nops(1), \
  255. /* LSE atomics */ \
  256. " ldadd" #mb " %[i], x30, %[v]\n" \
  257. " add %[i], %[i], x30") \
  258. : [i] "+r" (x0), [v] "+Q" (v->counter) \
  259. : "r" (x1) \
  260. : __LL_SC_CLOBBERS, ##cl); \
  261. \
  262. return x0; \
  263. }
  264. ATOMIC64_OP_ADD_RETURN(_relaxed, )
  265. ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
  266. ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
  267. ATOMIC64_OP_ADD_RETURN( , al, "memory")
  268. #undef ATOMIC64_OP_ADD_RETURN
  269. static inline void atomic64_and(long i, atomic64_t *v)
  270. {
  271. register long x0 asm ("x0") = i;
  272. register atomic64_t *x1 asm ("x1") = v;
  273. asm volatile(ARM64_LSE_ATOMIC_INSN(
  274. /* LL/SC */
  275. __LL_SC_ATOMIC64(and)
  276. __nops(1),
  277. /* LSE atomics */
  278. " mvn %[i], %[i]\n"
  279. " stclr %[i], %[v]")
  280. : [i] "+&r" (x0), [v] "+Q" (v->counter)
  281. : "r" (x1)
  282. : __LL_SC_CLOBBERS);
  283. }
  284. #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
  285. static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
  286. { \
  287. register long x0 asm ("w0") = i; \
  288. register atomic64_t *x1 asm ("x1") = v; \
  289. \
  290. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  291. /* LL/SC */ \
  292. __LL_SC_ATOMIC64(fetch_and##name) \
  293. __nops(1), \
  294. /* LSE atomics */ \
  295. " mvn %[i], %[i]\n" \
  296. " ldclr" #mb " %[i], %[i], %[v]") \
  297. : [i] "+&r" (x0), [v] "+Q" (v->counter) \
  298. : "r" (x1) \
  299. : __LL_SC_CLOBBERS, ##cl); \
  300. \
  301. return x0; \
  302. }
  303. ATOMIC64_FETCH_OP_AND(_relaxed, )
  304. ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
  305. ATOMIC64_FETCH_OP_AND(_release, l, "memory")
  306. ATOMIC64_FETCH_OP_AND( , al, "memory")
  307. #undef ATOMIC64_FETCH_OP_AND
  308. static inline void atomic64_sub(long i, atomic64_t *v)
  309. {
  310. register long x0 asm ("x0") = i;
  311. register atomic64_t *x1 asm ("x1") = v;
  312. asm volatile(ARM64_LSE_ATOMIC_INSN(
  313. /* LL/SC */
  314. __LL_SC_ATOMIC64(sub)
  315. __nops(1),
  316. /* LSE atomics */
  317. " neg %[i], %[i]\n"
  318. " stadd %[i], %[v]")
  319. : [i] "+&r" (x0), [v] "+Q" (v->counter)
  320. : "r" (x1)
  321. : __LL_SC_CLOBBERS);
  322. }
  323. #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
  324. static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
  325. { \
  326. register long x0 asm ("x0") = i; \
  327. register atomic64_t *x1 asm ("x1") = v; \
  328. \
  329. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  330. /* LL/SC */ \
  331. __LL_SC_ATOMIC64(sub_return##name) \
  332. __nops(2), \
  333. /* LSE atomics */ \
  334. " neg %[i], %[i]\n" \
  335. " ldadd" #mb " %[i], x30, %[v]\n" \
  336. " add %[i], %[i], x30") \
  337. : [i] "+&r" (x0), [v] "+Q" (v->counter) \
  338. : "r" (x1) \
  339. : __LL_SC_CLOBBERS, ##cl); \
  340. \
  341. return x0; \
  342. }
  343. ATOMIC64_OP_SUB_RETURN(_relaxed, )
  344. ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
  345. ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
  346. ATOMIC64_OP_SUB_RETURN( , al, "memory")
  347. #undef ATOMIC64_OP_SUB_RETURN
  348. #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
  349. static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
  350. { \
  351. register long x0 asm ("w0") = i; \
  352. register atomic64_t *x1 asm ("x1") = v; \
  353. \
  354. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  355. /* LL/SC */ \
  356. __LL_SC_ATOMIC64(fetch_sub##name) \
  357. __nops(1), \
  358. /* LSE atomics */ \
  359. " neg %[i], %[i]\n" \
  360. " ldadd" #mb " %[i], %[i], %[v]") \
  361. : [i] "+&r" (x0), [v] "+Q" (v->counter) \
  362. : "r" (x1) \
  363. : __LL_SC_CLOBBERS, ##cl); \
  364. \
  365. return x0; \
  366. }
  367. ATOMIC64_FETCH_OP_SUB(_relaxed, )
  368. ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
  369. ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
  370. ATOMIC64_FETCH_OP_SUB( , al, "memory")
  371. #undef ATOMIC64_FETCH_OP_SUB
  372. static inline long atomic64_dec_if_positive(atomic64_t *v)
  373. {
  374. register long x0 asm ("x0") = (long)v;
  375. asm volatile(ARM64_LSE_ATOMIC_INSN(
  376. /* LL/SC */
  377. __LL_SC_ATOMIC64(dec_if_positive)
  378. __nops(6),
  379. /* LSE atomics */
  380. "1: ldr x30, %[v]\n"
  381. " subs %[ret], x30, #1\n"
  382. " b.lt 2f\n"
  383. " casal x30, %[ret], %[v]\n"
  384. " sub x30, x30, #1\n"
  385. " sub x30, x30, %[ret]\n"
  386. " cbnz x30, 1b\n"
  387. "2:")
  388. : [ret] "+&r" (x0), [v] "+Q" (v->counter)
  389. :
  390. : __LL_SC_CLOBBERS, "cc", "memory");
  391. return x0;
  392. }
  393. #undef __LL_SC_ATOMIC64
  394. #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
  395. #define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
  396. static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
  397. unsigned long old, \
  398. unsigned long new) \
  399. { \
  400. register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
  401. register unsigned long x1 asm ("x1") = old; \
  402. register unsigned long x2 asm ("x2") = new; \
  403. \
  404. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  405. /* LL/SC */ \
  406. __LL_SC_CMPXCHG(name) \
  407. __nops(2), \
  408. /* LSE atomics */ \
  409. " mov " #w "30, %" #w "[old]\n" \
  410. " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
  411. " mov %" #w "[ret], " #w "30") \
  412. : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
  413. : [old] "r" (x1), [new] "r" (x2) \
  414. : __LL_SC_CLOBBERS, ##cl); \
  415. \
  416. return x0; \
  417. }
  418. __CMPXCHG_CASE(w, b, 1, )
  419. __CMPXCHG_CASE(w, h, 2, )
  420. __CMPXCHG_CASE(w, , 4, )
  421. __CMPXCHG_CASE(x, , 8, )
  422. __CMPXCHG_CASE(w, b, acq_1, a, "memory")
  423. __CMPXCHG_CASE(w, h, acq_2, a, "memory")
  424. __CMPXCHG_CASE(w, , acq_4, a, "memory")
  425. __CMPXCHG_CASE(x, , acq_8, a, "memory")
  426. __CMPXCHG_CASE(w, b, rel_1, l, "memory")
  427. __CMPXCHG_CASE(w, h, rel_2, l, "memory")
  428. __CMPXCHG_CASE(w, , rel_4, l, "memory")
  429. __CMPXCHG_CASE(x, , rel_8, l, "memory")
  430. __CMPXCHG_CASE(w, b, mb_1, al, "memory")
  431. __CMPXCHG_CASE(w, h, mb_2, al, "memory")
  432. __CMPXCHG_CASE(w, , mb_4, al, "memory")
  433. __CMPXCHG_CASE(x, , mb_8, al, "memory")
  434. #undef __LL_SC_CMPXCHG
  435. #undef __CMPXCHG_CASE
  436. #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
  437. #define __CMPXCHG_DBL(name, mb, cl...) \
  438. static inline long __cmpxchg_double##name(unsigned long old1, \
  439. unsigned long old2, \
  440. unsigned long new1, \
  441. unsigned long new2, \
  442. volatile void *ptr) \
  443. { \
  444. unsigned long oldval1 = old1; \
  445. unsigned long oldval2 = old2; \
  446. register unsigned long x0 asm ("x0") = old1; \
  447. register unsigned long x1 asm ("x1") = old2; \
  448. register unsigned long x2 asm ("x2") = new1; \
  449. register unsigned long x3 asm ("x3") = new2; \
  450. register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
  451. \
  452. asm volatile(ARM64_LSE_ATOMIC_INSN( \
  453. /* LL/SC */ \
  454. __LL_SC_CMPXCHG_DBL(name) \
  455. __nops(3), \
  456. /* LSE atomics */ \
  457. " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
  458. " eor %[old1], %[old1], %[oldval1]\n" \
  459. " eor %[old2], %[old2], %[oldval2]\n" \
  460. " orr %[old1], %[old1], %[old2]") \
  461. : [old1] "+&r" (x0), [old2] "+&r" (x1), \
  462. [v] "+Q" (*(unsigned long *)ptr) \
  463. : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
  464. [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
  465. : __LL_SC_CLOBBERS, ##cl); \
  466. \
  467. return x0; \
  468. }
  469. __CMPXCHG_DBL( , )
  470. __CMPXCHG_DBL(_mb, al, "memory")
  471. #undef __LL_SC_CMPXCHG_DBL
  472. #undef __CMPXCHG_DBL
  473. #endif /* __ASM_ATOMIC_LSE_H */