123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439 |
- #ifndef _ASM_POWERPC_CMPXCHG_H_
- #define _ASM_POWERPC_CMPXCHG_H_
- #ifdef __KERNEL__
- #include <linux/compiler.h>
- #include <asm/synch.h>
- #include <asm/asm-compat.h>
- #include <linux/bug.h>
- /*
- * Atomic exchange
- *
- * Changes the memory location '*p' to be val and returns
- * the previous value stored there.
- */
- static __always_inline unsigned long
- __xchg_u32_local(volatile void *p, unsigned long val)
- {
- unsigned long prev;
- __asm__ __volatile__(
- "1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
- " stwcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __xchg_u32_relaxed(u32 *p, unsigned long val)
- {
- unsigned long prev;
- __asm__ __volatile__(
- "1: lwarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
- " stwcx. %3,0,%2\n"
- " bne- 1b"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (val)
- : "cc");
- return prev;
- }
- #ifdef CONFIG_PPC64
- static __always_inline unsigned long
- __xchg_u64_local(volatile void *p, unsigned long val)
- {
- unsigned long prev;
- __asm__ __volatile__(
- "1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
- " stdcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __xchg_u64_relaxed(u64 *p, unsigned long val)
- {
- unsigned long prev;
- __asm__ __volatile__(
- "1: ldarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
- " stdcx. %3,0,%2\n"
- " bne- 1b"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (val)
- : "cc");
- return prev;
- }
- #endif
- static __always_inline unsigned long
- __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
- {
- switch (size) {
- case 4:
- return __xchg_u32_local(ptr, x);
- #ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64_local(ptr, x);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
- return x;
- }
- static __always_inline unsigned long
- __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
- {
- switch (size) {
- case 4:
- return __xchg_u32_relaxed(ptr, x);
- #ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64_relaxed(ptr, x);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
- return x;
- }
- #define xchg_local(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), \
- (unsigned long)_x_, sizeof(*(ptr))); \
- })
- #define xchg_relaxed(ptr, x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
- (unsigned long)_x_, sizeof(*(ptr))); \
- })
- /*
- * Compare and exchange - if *p == old, set it to new,
- * and return the old value of *p.
- */
- static __always_inline unsigned long
- __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
- {
- unsigned int prev;
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
- "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
- " stwcx. %4,0,%2\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
- 2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
- unsigned long new)
- {
- unsigned int prev;
- __asm__ __volatile__ (
- "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
- " stwcx. %4,0,%2\n\
- bne- 1b"
- "\n\
- 2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- "1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
- " cmpw 0,%0,%3\n"
- " bne- 2f\n"
- PPC405_ERR77(0, %2)
- " stwcx. %4,0,%2\n"
- " bne- 1b\n"
- "2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc");
- return prev;
- }
- /*
- * cmpxchg family don't have order guarantee if cmp part fails, therefore we
- * can avoid superfluous barriers if we use assembly code to implement
- * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
- * cmpxchg_release() because that will result in putting a barrier in the
- * middle of a ll/sc loop, which is probably a bad idea. For example, this
- * might cause the conditional store more likely to fail.
- */
- static __always_inline unsigned long
- __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- "1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
- " cmpw 0,%0,%3\n"
- " bne- 2f\n"
- PPC405_ERR77(0, %2)
- " stwcx. %4,0,%2\n"
- " bne- 1b\n"
- PPC_ACQUIRE_BARRIER
- "\n"
- "2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- #ifdef CONFIG_PPC64
- static __always_inline unsigned long
- __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
- "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
- 2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
- unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- "\n\
- 2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- static __always_inline unsigned long
- __cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- "1: ldarx %0,0,%2 # __cmpxchg_u64_relaxed\n"
- " cmpd 0,%0,%3\n"
- " bne- 2f\n"
- " stdcx. %4,0,%2\n"
- " bne- 1b\n"
- "2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc");
- return prev;
- }
- static __always_inline unsigned long
- __cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
- {
- unsigned long prev;
- __asm__ __volatile__ (
- "1: ldarx %0,0,%2 # __cmpxchg_u64_acquire\n"
- " cmpd 0,%0,%3\n"
- " bne- 2f\n"
- " stdcx. %4,0,%2\n"
- " bne- 1b\n"
- PPC_ACQUIRE_BARRIER
- "\n"
- "2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
- return prev;
- }
- #endif
- static __always_inline unsigned long
- __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
- {
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
- #ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64(ptr, old, new);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
- return old;
- }
- static __always_inline unsigned long
- __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
- {
- switch (size) {
- case 4:
- return __cmpxchg_u32_local(ptr, old, new);
- #ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_local(ptr, old, new);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
- return old;
- }
- static __always_inline unsigned long
- __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
- {
- switch (size) {
- case 4:
- return __cmpxchg_u32_relaxed(ptr, old, new);
- #ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_relaxed(ptr, old, new);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
- return old;
- }
- static __always_inline unsigned long
- __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
- {
- switch (size) {
- case 4:
- return __cmpxchg_u32_acquire(ptr, old, new);
- #ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_acquire(ptr, old, new);
- #endif
- }
- BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
- return old;
- }
- #define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
- #define cmpxchg_local(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
- #define cmpxchg_relaxed(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
- (unsigned long)_o_, (unsigned long)_n_, \
- sizeof(*(ptr))); \
- })
- #define cmpxchg_acquire(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
- (unsigned long)_o_, (unsigned long)_n_, \
- sizeof(*(ptr))); \
- })
- #ifdef CONFIG_PPC64
- #define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
- #define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
- #define cmpxchg64_relaxed(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
- })
- #define cmpxchg64_acquire(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
- })
- #else
- #include <asm-generic/cmpxchg-local.h>
- #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
- #endif
- #endif /* __KERNEL__ */
- #endif /* _ASM_POWERPC_CMPXCHG_H_ */
|