cmpxchg.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Atomic xchg and cmpxchg operations.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_CMPXCHG_H
  11. #define _XTENSA_CMPXCHG_H
  12. #ifndef __ASSEMBLY__
  13. #include <linux/stringify.h>
  14. /*
  15. * cmpxchg
  16. */
  17. static inline unsigned long
  18. __cmpxchg_u32(volatile int *p, int old, int new)
  19. {
  20. __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
  21. "l32i %0, %1, 0 \n\t"
  22. "bne %0, %2, 1f \n\t"
  23. "s32i %3, %1, 0 \n\t"
  24. "1: \n\t"
  25. "wsr a15, "__stringify(PS)" \n\t"
  26. "rsync \n\t"
  27. : "=&a" (old)
  28. : "a" (p), "a" (old), "r" (new)
  29. : "a15", "memory");
  30. return old;
  31. }
  32. /* This function doesn't exist, so you'll get a linker error
  33. * if something tries to do an invalid cmpxchg(). */
  34. extern void __cmpxchg_called_with_bad_pointer(void);
  35. static __inline__ unsigned long
  36. __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
  37. {
  38. switch (size) {
  39. case 4: return __cmpxchg_u32(ptr, old, new);
  40. default: __cmpxchg_called_with_bad_pointer();
  41. return old;
  42. }
  43. }
  44. #define cmpxchg(ptr,o,n) \
  45. ({ __typeof__(*(ptr)) _o_ = (o); \
  46. __typeof__(*(ptr)) _n_ = (n); \
  47. (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
  48. (unsigned long)_n_, sizeof (*(ptr))); \
  49. })
  50. #include <asm-generic/cmpxchg-local.h>
  51. static inline unsigned long __cmpxchg_local(volatile void *ptr,
  52. unsigned long old,
  53. unsigned long new, int size)
  54. {
  55. switch (size) {
  56. case 4:
  57. return __cmpxchg_u32(ptr, old, new);
  58. default:
  59. return __cmpxchg_local_generic(ptr, old, new, size);
  60. }
  61. return old;
  62. }
  63. /*
  64. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  65. * them available.
  66. */
  67. #define cmpxchg_local(ptr, o, n) \
  68. ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
  69. (unsigned long)(n), sizeof(*(ptr))))
  70. #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
  71. /*
  72. * xchg_u32
  73. *
  74. * Note that a15 is used here because the register allocation
  75. * done by the compiler is not guaranteed and a window overflow
  76. * may not occur between the rsil and wsr instructions. By using
  77. * a15 in the rsil, the machine is guaranteed to be in a state
  78. * where no register reference will cause an overflow.
  79. */
  80. static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
  81. {
  82. unsigned long tmp;
  83. __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
  84. "l32i %0, %1, 0 \n\t"
  85. "s32i %2, %1, 0 \n\t"
  86. "wsr a15, "__stringify(PS)" \n\t"
  87. "rsync \n\t"
  88. : "=&a" (tmp)
  89. : "a" (m), "a" (val)
  90. : "a15", "memory");
  91. return tmp;
  92. }
  93. #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
  94. /*
  95. * This only works if the compiler isn't horribly bad at optimizing.
  96. * gcc-2.5.8 reportedly can't handle this, but I define that one to
  97. * be dead anyway.
  98. */
  99. extern void __xchg_called_with_bad_pointer(void);
  100. static __inline__ unsigned long
  101. __xchg(unsigned long x, volatile void * ptr, int size)
  102. {
  103. switch (size) {
  104. case 4:
  105. return xchg_u32(ptr, x);
  106. }
  107. __xchg_called_with_bad_pointer();
  108. return x;
  109. }
  110. #endif /* __ASSEMBLY__ */
  111. #endif /* _XTENSA_CMPXCHG_H */