barrier.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Generic barrier definitions, originally based on MN10300 definitions.
  3. *
  4. * It should be possible to use these on really simple architectures,
  5. * but it serves more as a starting point for new ports.
  6. *
  7. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  8. * Written by David Howells (dhowells@redhat.com)
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public Licence
  12. * as published by the Free Software Foundation; either version
  13. * 2 of the Licence, or (at your option) any later version.
  14. */
  15. #ifndef __ASM_GENERIC_BARRIER_H
  16. #define __ASM_GENERIC_BARRIER_H
  17. #ifndef __ASSEMBLY__
  18. #include <linux/compiler.h>
  19. #ifndef nop
  20. #define nop() asm volatile ("nop")
  21. #endif
  22. /*
  23. * Force strict CPU ordering. And yes, this is required on UP too when we're
  24. * talking to devices.
  25. *
  26. * Fall back to compiler barriers if nothing better is provided.
  27. */
  28. #ifndef mb
  29. #define mb() barrier()
  30. #endif
  31. #ifndef rmb
  32. #define rmb() mb()
  33. #endif
  34. #ifndef wmb
  35. #define wmb() mb()
  36. #endif
  37. #ifndef dma_rmb
  38. #define dma_rmb() rmb()
  39. #endif
  40. #ifndef dma_wmb
  41. #define dma_wmb() wmb()
  42. #endif
  43. #ifndef read_barrier_depends
  44. #define read_barrier_depends() do { } while (0)
  45. #endif
  46. #ifndef __smp_mb
  47. #define __smp_mb() mb()
  48. #endif
  49. #ifndef __smp_rmb
  50. #define __smp_rmb() rmb()
  51. #endif
  52. #ifndef __smp_wmb
  53. #define __smp_wmb() wmb()
  54. #endif
  55. #ifndef __smp_read_barrier_depends
  56. #define __smp_read_barrier_depends() read_barrier_depends()
  57. #endif
  58. #ifdef CONFIG_SMP
  59. #ifndef smp_mb
  60. #define smp_mb() __smp_mb()
  61. #endif
  62. #ifndef smp_rmb
  63. #define smp_rmb() __smp_rmb()
  64. #endif
  65. #ifndef smp_wmb
  66. #define smp_wmb() __smp_wmb()
  67. #endif
  68. #ifndef smp_read_barrier_depends
  69. #define smp_read_barrier_depends() __smp_read_barrier_depends()
  70. #endif
  71. #else /* !CONFIG_SMP */
  72. #ifndef smp_mb
  73. #define smp_mb() barrier()
  74. #endif
  75. #ifndef smp_rmb
  76. #define smp_rmb() barrier()
  77. #endif
  78. #ifndef smp_wmb
  79. #define smp_wmb() barrier()
  80. #endif
  81. #ifndef smp_read_barrier_depends
  82. #define smp_read_barrier_depends() do { } while (0)
  83. #endif
  84. #endif /* CONFIG_SMP */
  85. #ifndef __smp_store_mb
  86. #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
  87. #endif
  88. #ifndef __smp_mb__before_atomic
  89. #define __smp_mb__before_atomic() __smp_mb()
  90. #endif
  91. #ifndef __smp_mb__after_atomic
  92. #define __smp_mb__after_atomic() __smp_mb()
  93. #endif
  94. #ifndef __smp_store_release
  95. #define __smp_store_release(p, v) \
  96. do { \
  97. compiletime_assert_atomic_type(*p); \
  98. __smp_mb(); \
  99. WRITE_ONCE(*p, v); \
  100. } while (0)
  101. #endif
  102. #ifndef __smp_load_acquire
  103. #define __smp_load_acquire(p) \
  104. ({ \
  105. typeof(*p) ___p1 = READ_ONCE(*p); \
  106. compiletime_assert_atomic_type(*p); \
  107. __smp_mb(); \
  108. ___p1; \
  109. })
  110. #endif
  111. #ifdef CONFIG_SMP
  112. #ifndef smp_store_mb
  113. #define smp_store_mb(var, value) __smp_store_mb(var, value)
  114. #endif
  115. #ifndef smp_mb__before_atomic
  116. #define smp_mb__before_atomic() __smp_mb__before_atomic()
  117. #endif
  118. #ifndef smp_mb__after_atomic
  119. #define smp_mb__after_atomic() __smp_mb__after_atomic()
  120. #endif
  121. #ifndef smp_store_release
  122. #define smp_store_release(p, v) __smp_store_release(p, v)
  123. #endif
  124. #ifndef smp_load_acquire
  125. #define smp_load_acquire(p) __smp_load_acquire(p)
  126. #endif
  127. #else /* !CONFIG_SMP */
  128. #ifndef smp_store_mb
  129. #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
  130. #endif
  131. #ifndef smp_mb__before_atomic
  132. #define smp_mb__before_atomic() barrier()
  133. #endif
  134. #ifndef smp_mb__after_atomic
  135. #define smp_mb__after_atomic() barrier()
  136. #endif
  137. #ifndef smp_store_release
  138. #define smp_store_release(p, v) \
  139. do { \
  140. compiletime_assert_atomic_type(*p); \
  141. barrier(); \
  142. WRITE_ONCE(*p, v); \
  143. } while (0)
  144. #endif
  145. #ifndef smp_load_acquire
  146. #define smp_load_acquire(p) \
  147. ({ \
  148. typeof(*p) ___p1 = READ_ONCE(*p); \
  149. compiletime_assert_atomic_type(*p); \
  150. barrier(); \
  151. ___p1; \
  152. })
  153. #endif
  154. #endif /* CONFIG_SMP */
  155. /* Barriers for virtual machine guests when talking to an SMP host */
  156. #define virt_mb() __smp_mb()
  157. #define virt_rmb() __smp_rmb()
  158. #define virt_wmb() __smp_wmb()
  159. #define virt_read_barrier_depends() __smp_read_barrier_depends()
  160. #define virt_store_mb(var, value) __smp_store_mb(var, value)
  161. #define virt_mb__before_atomic() __smp_mb__before_atomic()
  162. #define virt_mb__after_atomic() __smp_mb__after_atomic()
  163. #define virt_store_release(p, v) __smp_store_release(p, v)
  164. #define virt_load_acquire(p) __smp_load_acquire(p)
  165. /**
  166. * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
  167. *
  168. * A control dependency provides a LOAD->STORE order, the additional RMB
  169. * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
  170. * aka. (load)-ACQUIRE.
  171. *
  172. * Architectures that do not do load speculation can have this be barrier().
  173. */
  174. #ifndef smp_acquire__after_ctrl_dep
  175. #define smp_acquire__after_ctrl_dep() smp_rmb()
  176. #endif
  177. /**
  178. * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
  179. * @ptr: pointer to the variable to wait on
  180. * @cond: boolean expression to wait for
  181. *
  182. * Equivalent to using smp_load_acquire() on the condition variable but employs
  183. * the control dependency of the wait to reduce the barrier on many platforms.
  184. *
  185. * Due to C lacking lambda expressions we load the value of *ptr into a
  186. * pre-named variable @VAL to be used in @cond.
  187. */
  188. #ifndef smp_cond_load_acquire
  189. #define smp_cond_load_acquire(ptr, cond_expr) ({ \
  190. typeof(ptr) __PTR = (ptr); \
  191. typeof(*ptr) VAL; \
  192. for (;;) { \
  193. VAL = READ_ONCE(*__PTR); \
  194. if (cond_expr) \
  195. break; \
  196. cpu_relax(); \
  197. } \
  198. smp_acquire__after_ctrl_dep(); \
  199. VAL; \
  200. })
  201. #endif
  202. #endif /* !__ASSEMBLY__ */
  203. #endif /* __ASM_GENERIC_BARRIER_H */