local.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #ifndef _ASM_X86_LOCAL_H
  2. #define _ASM_X86_LOCAL_H
  3. #include <linux/percpu.h>
  4. #include <linux/atomic.h>
  5. #include <asm/asm.h>
  6. typedef struct {
  7. atomic_long_t a;
  8. } local_t;
  9. #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
  10. #define local_read(l) atomic_long_read(&(l)->a)
  11. #define local_set(l, i) atomic_long_set(&(l)->a, (i))
  12. static inline void local_inc(local_t *l)
  13. {
  14. asm volatile(_ASM_INC "%0"
  15. : "+m" (l->a.counter));
  16. }
  17. static inline void local_dec(local_t *l)
  18. {
  19. asm volatile(_ASM_DEC "%0"
  20. : "+m" (l->a.counter));
  21. }
  22. static inline void local_add(long i, local_t *l)
  23. {
  24. asm volatile(_ASM_ADD "%1,%0"
  25. : "+m" (l->a.counter)
  26. : "ir" (i));
  27. }
  28. static inline void local_sub(long i, local_t *l)
  29. {
  30. asm volatile(_ASM_SUB "%1,%0"
  31. : "+m" (l->a.counter)
  32. : "ir" (i));
  33. }
  34. /**
  35. * local_sub_and_test - subtract value from variable and test result
  36. * @i: integer value to subtract
  37. * @l: pointer to type local_t
  38. *
  39. * Atomically subtracts @i from @l and returns
  40. * true if the result is zero, or false for all
  41. * other cases.
  42. */
  43. static inline int local_sub_and_test(long i, local_t *l)
  44. {
  45. unsigned char c;
  46. asm volatile(_ASM_SUB "%2,%0; sete %1"
  47. : "+m" (l->a.counter), "=qm" (c)
  48. : "ir" (i) : "memory");
  49. return c;
  50. }
  51. /**
  52. * local_dec_and_test - decrement and test
  53. * @l: pointer to type local_t
  54. *
  55. * Atomically decrements @l by 1 and
  56. * returns true if the result is 0, or false for all other
  57. * cases.
  58. */
  59. static inline int local_dec_and_test(local_t *l)
  60. {
  61. unsigned char c;
  62. asm volatile(_ASM_DEC "%0; sete %1"
  63. : "+m" (l->a.counter), "=qm" (c)
  64. : : "memory");
  65. return c != 0;
  66. }
  67. /**
  68. * local_inc_and_test - increment and test
  69. * @l: pointer to type local_t
  70. *
  71. * Atomically increments @l by 1
  72. * and returns true if the result is zero, or false for all
  73. * other cases.
  74. */
  75. static inline int local_inc_and_test(local_t *l)
  76. {
  77. unsigned char c;
  78. asm volatile(_ASM_INC "%0; sete %1"
  79. : "+m" (l->a.counter), "=qm" (c)
  80. : : "memory");
  81. return c != 0;
  82. }
  83. /**
  84. * local_add_negative - add and test if negative
  85. * @i: integer value to add
  86. * @l: pointer to type local_t
  87. *
  88. * Atomically adds @i to @l and returns true
  89. * if the result is negative, or false when
  90. * result is greater than or equal to zero.
  91. */
  92. static inline int local_add_negative(long i, local_t *l)
  93. {
  94. unsigned char c;
  95. asm volatile(_ASM_ADD "%2,%0; sets %1"
  96. : "+m" (l->a.counter), "=qm" (c)
  97. : "ir" (i) : "memory");
  98. return c;
  99. }
  100. /**
  101. * local_add_return - add and return
  102. * @i: integer value to add
  103. * @l: pointer to type local_t
  104. *
  105. * Atomically adds @i to @l and returns @i + @l
  106. */
  107. static inline long local_add_return(long i, local_t *l)
  108. {
  109. long __i;
  110. #ifdef CONFIG_M386
  111. unsigned long flags;
  112. if (unlikely(boot_cpu_data.x86 <= 3))
  113. goto no_xadd;
  114. #endif
  115. /* Modern 486+ processor */
  116. __i = i;
  117. asm volatile(_ASM_XADD "%0, %1;"
  118. : "+r" (i), "+m" (l->a.counter)
  119. : : "memory");
  120. return i + __i;
  121. #ifdef CONFIG_M386
  122. no_xadd: /* Legacy 386 processor */
  123. local_irq_save(flags);
  124. __i = local_read(l);
  125. local_set(l, i + __i);
  126. local_irq_restore(flags);
  127. return i + __i;
  128. #endif
  129. }
  130. static inline long local_sub_return(long i, local_t *l)
  131. {
  132. return local_add_return(-i, l);
  133. }
  134. #define local_inc_return(l) (local_add_return(1, l))
  135. #define local_dec_return(l) (local_sub_return(1, l))
  136. #define local_cmpxchg(l, o, n) \
  137. (cmpxchg_local(&((l)->a.counter), (o), (n)))
  138. /* Always has a lock prefix */
  139. #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
  140. /**
  141. * local_add_unless - add unless the number is a given value
  142. * @l: pointer of type local_t
  143. * @a: the amount to add to l...
  144. * @u: ...unless l is equal to u.
  145. *
  146. * Atomically adds @a to @l, so long as it was not @u.
  147. * Returns non-zero if @l was not @u, and zero otherwise.
  148. */
  149. #define local_add_unless(l, a, u) \
  150. ({ \
  151. long c, old; \
  152. c = local_read((l)); \
  153. for (;;) { \
  154. if (unlikely(c == (u))) \
  155. break; \
  156. old = local_cmpxchg((l), c, c + (a)); \
  157. if (likely(old == c)) \
  158. break; \
  159. c = old; \
  160. } \
  161. c != (u); \
  162. })
  163. #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
  164. /* On x86_32, these are no better than the atomic variants.
  165. * On x86-64 these are better than the atomic variants on SMP kernels
  166. * because they dont use a lock prefix.
  167. */
  168. #define __local_inc(l) local_inc(l)
  169. #define __local_dec(l) local_dec(l)
  170. #define __local_add(i, l) local_add((i), (l))
  171. #define __local_sub(i, l) local_sub((i), (l))
  172. #endif /* _ASM_X86_LOCAL_H */