atomic_32.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Do not include directly; use <linux/atomic.h>.
  15. */
  16. #ifndef _ASM_TILE_ATOMIC_32_H
  17. #define _ASM_TILE_ATOMIC_32_H
  18. #include <asm/barrier.h>
  19. #include <arch/chip.h>
  20. #ifndef __ASSEMBLY__
  21. /**
  22. * atomic_add - add integer to atomic variable
  23. * @i: integer value to add
  24. * @v: pointer of type atomic_t
  25. *
  26. * Atomically adds @i to @v.
  27. */
  28. static inline void atomic_add(int i, atomic_t *v)
  29. {
  30. _atomic_xchg_add(&v->counter, i);
  31. }
  32. #define ATOMIC_OPS(op) \
  33. unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
  34. static inline void atomic_##op(int i, atomic_t *v) \
  35. { \
  36. _atomic_fetch_##op((unsigned long *)&v->counter, i); \
  37. } \
  38. static inline int atomic_fetch_##op(int i, atomic_t *v) \
  39. { \
  40. smp_mb(); \
  41. return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
  42. }
  43. ATOMIC_OPS(and)
  44. ATOMIC_OPS(or)
  45. ATOMIC_OPS(xor)
  46. #undef ATOMIC_OPS
  47. static inline int atomic_fetch_add(int i, atomic_t *v)
  48. {
  49. smp_mb();
  50. return _atomic_xchg_add(&v->counter, i);
  51. }
  52. /**
  53. * atomic_add_return - add integer and return
  54. * @v: pointer of type atomic_t
  55. * @i: integer value to add
  56. *
  57. * Atomically adds @i to @v and returns @i + @v
  58. */
  59. static inline int atomic_add_return(int i, atomic_t *v)
  60. {
  61. smp_mb(); /* barrier for proper semantics */
  62. return _atomic_xchg_add(&v->counter, i) + i;
  63. }
  64. /**
  65. * __atomic_add_unless - add unless the number is already a given value
  66. * @v: pointer of type atomic_t
  67. * @a: the amount to add to v...
  68. * @u: ...unless v is equal to u.
  69. *
  70. * Atomically adds @a to @v, so long as @v was not already @u.
  71. * Returns the old value of @v.
  72. */
  73. static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  74. {
  75. smp_mb(); /* barrier for proper semantics */
  76. return _atomic_xchg_add_unless(&v->counter, a, u);
  77. }
  78. /**
  79. * atomic_set - set atomic variable
  80. * @v: pointer of type atomic_t
  81. * @i: required value
  82. *
  83. * Atomically sets the value of @v to @i.
  84. *
  85. * atomic_set() can't be just a raw store, since it would be lost if it
  86. * fell between the load and store of one of the other atomic ops.
  87. */
  88. static inline void atomic_set(atomic_t *v, int n)
  89. {
  90. _atomic_xchg(&v->counter, n);
  91. }
  92. /* A 64bit atomic type */
  93. typedef struct {
  94. long long counter;
  95. } atomic64_t;
  96. #define ATOMIC64_INIT(val) { (val) }
  97. /**
  98. * atomic64_read - read atomic variable
  99. * @v: pointer of type atomic64_t
  100. *
  101. * Atomically reads the value of @v.
  102. */
  103. static inline long long atomic64_read(const atomic64_t *v)
  104. {
  105. /*
  106. * Requires an atomic op to read both 32-bit parts consistently.
  107. * Casting away const is safe since the atomic support routines
  108. * do not write to memory if the value has not been modified.
  109. */
  110. return _atomic64_xchg_add((long long *)&v->counter, 0);
  111. }
  112. /**
  113. * atomic64_add - add integer to atomic variable
  114. * @i: integer value to add
  115. * @v: pointer of type atomic64_t
  116. *
  117. * Atomically adds @i to @v.
  118. */
  119. static inline void atomic64_add(long long i, atomic64_t *v)
  120. {
  121. _atomic64_xchg_add(&v->counter, i);
  122. }
  123. #define ATOMIC64_OPS(op) \
  124. long long _atomic64_fetch_##op(long long *v, long long n); \
  125. static inline void atomic64_##op(long long i, atomic64_t *v) \
  126. { \
  127. _atomic64_fetch_##op(&v->counter, i); \
  128. } \
  129. static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
  130. { \
  131. smp_mb(); \
  132. return _atomic64_fetch_##op(&v->counter, i); \
  133. }
  134. ATOMIC64_OPS(and)
  135. ATOMIC64_OPS(or)
  136. ATOMIC64_OPS(xor)
  137. #undef ATOMIC64_OPS
  138. static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
  139. {
  140. smp_mb();
  141. return _atomic64_xchg_add(&v->counter, i);
  142. }
  143. /**
  144. * atomic64_add_return - add integer and return
  145. * @v: pointer of type atomic64_t
  146. * @i: integer value to add
  147. *
  148. * Atomically adds @i to @v and returns @i + @v
  149. */
  150. static inline long long atomic64_add_return(long long i, atomic64_t *v)
  151. {
  152. smp_mb(); /* barrier for proper semantics */
  153. return _atomic64_xchg_add(&v->counter, i) + i;
  154. }
  155. /**
  156. * atomic64_add_unless - add unless the number is already a given value
  157. * @v: pointer of type atomic64_t
  158. * @a: the amount to add to v...
  159. * @u: ...unless v is equal to u.
  160. *
  161. * Atomically adds @a to @v, so long as @v was not already @u.
  162. * Returns non-zero if @v was not @u, and zero otherwise.
  163. */
  164. static inline long long atomic64_add_unless(atomic64_t *v, long long a,
  165. long long u)
  166. {
  167. smp_mb(); /* barrier for proper semantics */
  168. return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
  169. }
  170. /**
  171. * atomic64_set - set atomic variable
  172. * @v: pointer of type atomic64_t
  173. * @i: required value
  174. *
  175. * Atomically sets the value of @v to @i.
  176. *
  177. * atomic64_set() can't be just a raw store, since it would be lost if it
  178. * fell between the load and store of one of the other atomic ops.
  179. */
  180. static inline void atomic64_set(atomic64_t *v, long long n)
  181. {
  182. _atomic64_xchg(&v->counter, n);
  183. }
  184. #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
  185. #define atomic64_inc(v) atomic64_add(1LL, (v))
  186. #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
  187. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  188. #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
  189. #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
  190. #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
  191. #define atomic64_sub(i, v) atomic64_add(-(i), (v))
  192. #define atomic64_dec(v) atomic64_sub(1LL, (v))
  193. #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
  194. #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
  195. #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
  196. #endif /* !__ASSEMBLY__ */
  197. /*
  198. * Internal definitions only beyond this point.
  199. */
  200. /*
  201. * Number of atomic locks in atomic_locks[]. Must be a power of two.
  202. * There is no reason for more than PAGE_SIZE / 8 entries, since that
  203. * is the maximum number of pointer bits we can use to index this.
  204. * And we cannot have more than PAGE_SIZE / 4, since this has to
  205. * fit on a single page and each entry takes 4 bytes.
  206. */
  207. #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
  208. #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
  209. #ifndef __ASSEMBLY__
  210. extern int atomic_locks[];
  211. #endif
  212. /*
  213. * All the code that may fault while holding an atomic lock must
  214. * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
  215. * can correctly release and reacquire the lock. Note that we
  216. * mention the register number in a comment in "lib/atomic_asm.S" to help
  217. * assembly coders from using this register by mistake, so if it
  218. * is changed here, change that comment as well.
  219. */
  220. #define ATOMIC_LOCK_REG 20
  221. #define ATOMIC_LOCK_REG_NAME r20
  222. #ifndef __ASSEMBLY__
  223. /* Called from setup to initialize a hash table to point to per_cpu locks. */
  224. void __init_atomic_per_cpu(void);
  225. #ifdef CONFIG_SMP
  226. /* Support releasing the atomic lock in do_page_fault_ics(). */
  227. void __atomic_fault_unlock(int *lock_ptr);
  228. #endif
  229. /* Return a pointer to the lock for the given address. */
  230. int *__atomic_hashed_lock(volatile void *v);
  231. /* Private helper routines in lib/atomic_asm_32.S */
  232. struct __get_user {
  233. unsigned long val;
  234. int err;
  235. };
  236. extern struct __get_user __atomic32_cmpxchg(volatile int *p,
  237. int *lock, int o, int n);
  238. extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
  239. extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
  240. extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
  241. int *lock, int o, int n);
  242. extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
  243. extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
  244. extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
  245. extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
  246. extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
  247. long long o, long long n);
  248. extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
  249. extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
  250. long long n);
  251. extern long long __atomic64_xchg_add_unless(volatile long long *p,
  252. int *lock, long long o, long long n);
  253. extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
  254. extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
  255. extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
  256. /* Return failure from the atomic wrappers. */
  257. struct __get_user __atomic_bad_address(int __user *addr);
  258. #endif /* !__ASSEMBLY__ */
  259. #endif /* _ASM_TILE_ATOMIC_32_H */