atomic_64.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. /*
  2. * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Do not include directly; use <linux/atomic.h>.
  15. */
  16. #ifndef _ASM_TILE_ATOMIC_64_H
  17. #define _ASM_TILE_ATOMIC_64_H
  18. #ifndef __ASSEMBLY__
  19. #include <asm/barrier.h>
  20. #include <arch/spr_def.h>
  21. /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
  22. #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
  23. /*
  24. * The smp_mb() operations throughout are to support the fact that
  25. * Linux requires memory barriers before and after the operation,
  26. * on any routine which updates memory and returns a value.
  27. */
  28. /*
  29. * Note a subtlety of the locking here. We are required to provide a
  30. * full memory barrier before and after the operation. However, we
  31. * only provide an explicit mb before the operation. After the
  32. * operation, we use barrier() to get a full mb for free, because:
  33. *
  34. * (1) The barrier directive to the compiler prohibits any instructions
  35. * being statically hoisted before the barrier;
  36. * (2) the microarchitecture will not issue any further instructions
  37. * until the fetchadd result is available for the "+ i" add instruction;
  38. * (3) the smb_mb before the fetchadd ensures that no other memory
  39. * operations are in flight at this point.
  40. */
  41. static inline int atomic_add_return(int i, atomic_t *v)
  42. {
  43. int val;
  44. smp_mb(); /* barrier for proper semantics */
  45. val = __insn_fetchadd4((void *)&v->counter, i) + i;
  46. barrier(); /* equivalent to smp_mb(); see block comment above */
  47. return val;
  48. }
  49. #define ATOMIC_OPS(op) \
  50. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  51. { \
  52. int val; \
  53. smp_mb(); \
  54. val = __insn_fetch##op##4((void *)&v->counter, i); \
  55. smp_mb(); \
  56. return val; \
  57. } \
  58. static inline void atomic_##op(int i, atomic_t *v) \
  59. { \
  60. __insn_fetch##op##4((void *)&v->counter, i); \
  61. }
  62. ATOMIC_OPS(add)
  63. ATOMIC_OPS(and)
  64. ATOMIC_OPS(or)
  65. #undef ATOMIC_OPS
  66. static inline int atomic_fetch_xor(int i, atomic_t *v)
  67. {
  68. int guess, oldval = v->counter;
  69. smp_mb();
  70. do {
  71. guess = oldval;
  72. __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  73. oldval = __insn_cmpexch4(&v->counter, guess ^ i);
  74. } while (guess != oldval);
  75. smp_mb();
  76. return oldval;
  77. }
  78. static inline void atomic_xor(int i, atomic_t *v)
  79. {
  80. int guess, oldval = v->counter;
  81. do {
  82. guess = oldval;
  83. __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  84. oldval = __insn_cmpexch4(&v->counter, guess ^ i);
  85. } while (guess != oldval);
  86. }
  87. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  88. {
  89. int guess, oldval = v->counter;
  90. do {
  91. if (oldval == u)
  92. break;
  93. guess = oldval;
  94. oldval = cmpxchg(&v->counter, guess, guess + a);
  95. } while (guess != oldval);
  96. return oldval;
  97. }
  98. /* Now the true 64-bit operations. */
  99. #define ATOMIC64_INIT(i) { (i) }
  100. #define atomic64_read(v) READ_ONCE((v)->counter)
  101. #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
  102. static inline long atomic64_add_return(long i, atomic64_t *v)
  103. {
  104. int val;
  105. smp_mb(); /* barrier for proper semantics */
  106. val = __insn_fetchadd((void *)&v->counter, i) + i;
  107. barrier(); /* equivalent to smp_mb; see atomic_add_return() */
  108. return val;
  109. }
  110. #define ATOMIC64_OPS(op) \
  111. static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
  112. { \
  113. long val; \
  114. smp_mb(); \
  115. val = __insn_fetch##op((void *)&v->counter, i); \
  116. smp_mb(); \
  117. return val; \
  118. } \
  119. static inline void atomic64_##op(long i, atomic64_t *v) \
  120. { \
  121. __insn_fetch##op((void *)&v->counter, i); \
  122. }
  123. ATOMIC64_OPS(add)
  124. ATOMIC64_OPS(and)
  125. ATOMIC64_OPS(or)
  126. #undef ATOMIC64_OPS
  127. static inline long atomic64_fetch_xor(long i, atomic64_t *v)
  128. {
  129. long guess, oldval = v->counter;
  130. smp_mb();
  131. do {
  132. guess = oldval;
  133. __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  134. oldval = __insn_cmpexch(&v->counter, guess ^ i);
  135. } while (guess != oldval);
  136. smp_mb();
  137. return oldval;
  138. }
  139. static inline void atomic64_xor(long i, atomic64_t *v)
  140. {
  141. long guess, oldval = v->counter;
  142. do {
  143. guess = oldval;
  144. __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  145. oldval = __insn_cmpexch(&v->counter, guess ^ i);
  146. } while (guess != oldval);
  147. }
  148. static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
  149. {
  150. long guess, oldval = v->counter;
  151. do {
  152. if (oldval == u)
  153. break;
  154. guess = oldval;
  155. oldval = cmpxchg(&v->counter, guess, guess + a);
  156. } while (guess != oldval);
  157. return oldval != u;
  158. }
  159. #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
  160. #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
  161. #define atomic64_sub(i, v) atomic64_add(-(i), (v))
  162. #define atomic64_inc_return(v) atomic64_add_return(1, (v))
  163. #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
  164. #define atomic64_inc(v) atomic64_add(1, (v))
  165. #define atomic64_dec(v) atomic64_sub(1, (v))
  166. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  167. #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
  168. #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
  169. #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
  170. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
  171. #endif /* !__ASSEMBLY__ */
  172. #endif /* _ASM_TILE_ATOMIC_64_H */