atomics.nim 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. #
  2. #
  3. # Nim's Runtime Library
  4. # (c) Copyright 2015 Andreas Rumpf
  5. #
  6. # See the file "copying.txt", included in this
  7. # distribution, for details about the copyright.
  8. #
  9. # Atomic operations for Nim.
  10. {.push stackTrace:off, profiler:off.}
  11. const someGcc = defined(gcc) or defined(llvm_gcc) or defined(clang)
  12. const someVcc = defined(vcc) or defined(clang_cl)
  13. type
  14. AtomType* = SomeNumber|pointer|ptr|char|bool
  15. ## Type Class representing valid types for use with atomic procs
  16. when someGcc and hasThreadSupport:
  17. type AtomMemModel* = distinct cint
  18. var ATOMIC_RELAXED* {.importc: "__ATOMIC_RELAXED", nodecl.}: AtomMemModel
  19. ## No barriers or synchronization.
  20. var ATOMIC_CONSUME* {.importc: "__ATOMIC_CONSUME", nodecl.}: AtomMemModel
  21. ## Data dependency only for both barrier and
  22. ## synchronization with another thread.
  23. var ATOMIC_ACQUIRE* {.importc: "__ATOMIC_ACQUIRE", nodecl.}: AtomMemModel
  24. ## Barrier to hoisting of code and synchronizes with
  25. ## release (or stronger)
  26. ## semantic stores from another thread.
  27. var ATOMIC_RELEASE* {.importc: "__ATOMIC_RELEASE", nodecl.}: AtomMemModel
  28. ## Barrier to sinking of code and synchronizes with
  29. ## acquire (or stronger)
  30. ## semantic loads from another thread.
  31. var ATOMIC_ACQ_REL* {.importc: "__ATOMIC_ACQ_REL", nodecl.}: AtomMemModel
  32. ## Full barrier in both directions and synchronizes
  33. ## with acquire loads
  34. ## and release stores in another thread.
  35. var ATOMIC_SEQ_CST* {.importc: "__ATOMIC_SEQ_CST", nodecl.}: AtomMemModel
  36. ## Full barrier in both directions and synchronizes
  37. ## with acquire loads
  38. ## and release stores in all threads.
  39. proc atomicLoadN*[T: AtomType](p: ptr T, mem: AtomMemModel): T {.
  40. importc: "__atomic_load_n", nodecl.}
  41. ## This proc implements an atomic load operation. It returns the contents at p.
  42. ## ATOMIC_RELAXED, ATOMIC_SEQ_CST, ATOMIC_ACQUIRE, ATOMIC_CONSUME.
  43. proc atomicLoad*[T: AtomType](p, ret: ptr T, mem: AtomMemModel) {.
  44. importc: "__atomic_load", nodecl.}
  45. ## This is the generic version of an atomic load. It returns the contents at p in ret.
  46. proc atomicStoreN*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel) {.
  47. importc: "__atomic_store_n", nodecl.}
  48. ## This proc implements an atomic store operation. It writes val at p.
  49. ## ATOMIC_RELAXED, ATOMIC_SEQ_CST, and ATOMIC_RELEASE.
  50. proc atomicStore*[T: AtomType](p, val: ptr T, mem: AtomMemModel) {.
  51. importc: "__atomic_store", nodecl.}
  52. ## This is the generic version of an atomic store. It stores the value of val at p
  53. proc atomicExchangeN*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  54. importc: "__atomic_exchange_n", nodecl.}
  55. ## This proc implements an atomic exchange operation. It writes val at p,
  56. ## and returns the previous contents at p.
  57. ## ATOMIC_RELAXED, ATOMIC_SEQ_CST, ATOMIC_ACQUIRE, ATOMIC_RELEASE, ATOMIC_ACQ_REL
  58. proc atomicExchange*[T: AtomType](p, val, ret: ptr T, mem: AtomMemModel) {.
  59. importc: "__atomic_exchange", nodecl.}
  60. ## This is the generic version of an atomic exchange. It stores the contents at val at p.
  61. ## The original value at p is copied into ret.
  62. proc atomicCompareExchangeN*[T: AtomType](p, expected: ptr T, desired: T,
  63. weak: bool, success_memmodel: AtomMemModel, failure_memmodel: AtomMemModel): bool {.
  64. importc: "__atomic_compare_exchange_n", nodecl.}
  65. ## This proc implements an atomic compare and exchange operation. This compares the
  66. ## contents at p with the contents at expected and if equal, writes desired at p.
  67. ## If they are not equal, the current contents at p is written into expected.
  68. ## Weak is true for weak compare_exchange, and false for the strong variation.
  69. ## Many targets only offer the strong variation and ignore the parameter.
  70. ## When in doubt, use the strong variation.
  71. ## True is returned if desired is written at p and the execution is considered
  72. ## to conform to the memory model specified by success_memmodel. There are no
  73. ## restrictions on what memory model can be used here. False is returned otherwise,
  74. ## and the execution is considered to conform to failure_memmodel. This memory model
  75. ## cannot be __ATOMIC_RELEASE nor __ATOMIC_ACQ_REL. It also cannot be a stronger model
  76. ## than that specified by success_memmodel.
  77. proc atomicCompareExchange*[T: AtomType](p, expected, desired: ptr T,
  78. weak: bool, success_memmodel: AtomMemModel, failure_memmodel: AtomMemModel): bool {.
  79. importc: "__atomic_compare_exchange", nodecl.}
  80. ## This proc implements the generic version of atomic_compare_exchange.
  81. ## The proc is virtually identical to atomic_compare_exchange_n, except the desired
  82. ## value is also a pointer.
  83. ## Perform the operation return the new value, all memory models are valid
  84. proc atomicAddFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  85. importc: "__atomic_add_fetch", nodecl.}
  86. proc atomicSubFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  87. importc: "__atomic_sub_fetch", nodecl.}
  88. proc atomicOrFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  89. importc: "__atomic_or_fetch", nodecl.}
  90. proc atomicAndFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  91. importc: "__atomic_and_fetch", nodecl.}
  92. proc atomicXorFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  93. importc: "__atomic_xor_fetch", nodecl.}
  94. proc atomicNandFetch*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  95. importc: "__atomic_nand_fetch", nodecl.}
  96. ## Perform the operation return the old value, all memory models are valid
  97. proc atomicFetchAdd*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  98. importc: "__atomic_fetch_add", nodecl.}
  99. proc atomicFetchSub*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  100. importc: "__atomic_fetch_sub", nodecl.}
  101. proc atomicFetchOr*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  102. importc: "__atomic_fetch_or", nodecl.}
  103. proc atomicFetchAnd*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  104. importc: "__atomic_fetch_and", nodecl.}
  105. proc atomicFetchXor*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  106. importc: "__atomic_fetch_xor", nodecl.}
  107. proc atomicFetchNand*[T: AtomType](p: ptr T, val: T, mem: AtomMemModel): T {.
  108. importc: "__atomic_fetch_nand", nodecl.}
  109. proc atomicTestAndSet*(p: pointer, mem: AtomMemModel): bool {.
  110. importc: "__atomic_test_and_set", nodecl.}
  111. ## This built-in function performs an atomic test-and-set operation on the byte at p.
  112. ## The byte is set to some implementation defined nonzero "set" value and the return
  113. ## value is true if and only if the previous contents were "set".
  114. ## All memory models are valid.
  115. proc atomicClear*(p: pointer, mem: AtomMemModel) {.
  116. importc: "__atomic_clear", nodecl.}
  117. ## This built-in function performs an atomic clear operation at p.
  118. ## After the operation, at p contains 0.
  119. ## ATOMIC_RELAXED, ATOMIC_SEQ_CST, ATOMIC_RELEASE
  120. proc atomicThreadFence*(mem: AtomMemModel) {.
  121. importc: "__atomic_thread_fence", nodecl.}
  122. ## This built-in function acts as a synchronization fence between threads based
  123. ## on the specified memory model. All memory orders are valid.
  124. proc atomicSignalFence*(mem: AtomMemModel) {.
  125. importc: "__atomic_signal_fence", nodecl.}
  126. ## This built-in function acts as a synchronization fence between a thread and
  127. ## signal handlers based in the same thread. All memory orders are valid.
  128. proc atomicAlwaysLockFree*(size: int, p: pointer): bool {.
  129. importc: "__atomic_always_lock_free", nodecl.}
  130. ## This built-in function returns true if objects of size bytes always generate
  131. ## lock free atomic instructions for the target architecture. size must resolve
  132. ## to a compile-time constant and the result also resolves to a compile-time constant.
  133. ## ptr is an optional pointer to the object that may be used to determine alignment.
  134. ## A value of 0 indicates typical alignment should be used. The compiler may also
  135. ## ignore this parameter.
  136. proc atomicIsLockFree*(size: int, p: pointer): bool {.
  137. importc: "__atomic_is_lock_free", nodecl.}
  138. ## This built-in function returns true if objects of size bytes always generate
  139. ## lock free atomic instructions for the target architecture. If it is not known
  140. ## to be lock free a call is made to a runtime routine named __atomic_is_lock_free.
  141. ## ptr is an optional pointer to the object that may be used to determine alignment.
  142. ## A value of 0 indicates typical alignment should be used. The compiler may also
  143. ## ignore this parameter.
  144. template fence*() = atomicThreadFence(ATOMIC_SEQ_CST)
  145. elif someVcc and hasThreadSupport:
  146. type AtomMemModel* = distinct cint
  147. const
  148. ATOMIC_RELAXED = 0.AtomMemModel
  149. ATOMIC_CONSUME = 1.AtomMemModel
  150. ATOMIC_ACQUIRE = 2.AtomMemModel
  151. ATOMIC_RELEASE = 3.AtomMemModel
  152. ATOMIC_ACQ_REL = 4.AtomMemModel
  153. ATOMIC_SEQ_CST = 5.AtomMemModel
  154. proc `==`(x1, x2: AtomMemModel): bool {.borrow.}
  155. proc readBarrier() {.importc: "_ReadBarrier", header: "<intrin.h>".}
  156. proc writeBarrier() {.importc: "_WriteBarrier", header: "<intrin.h>".}
  157. proc fence*() {.importc: "_ReadWriteBarrier", header: "<intrin.h>".}
  158. template barrier(mem: AtomMemModel) =
  159. when mem == ATOMIC_RELAXED: discard
  160. elif mem == ATOMIC_CONSUME: readBarrier()
  161. elif mem == ATOMIC_ACQUIRE: writeBarrier()
  162. elif mem == ATOMIC_RELEASE: fence()
  163. elif mem == ATOMIC_ACQ_REL: fence()
  164. elif mem == ATOMIC_SEQ_CST: fence()
  165. proc atomicLoadN*[T: AtomType](p: ptr T, mem: static[AtomMemModel]): T =
  166. result = p[]
  167. barrier(mem)
  168. when defined(cpp):
  169. when sizeof(int) == 8:
  170. proc addAndFetch*(p: ptr int, val: int): int {.
  171. importcpp: "_InterlockedExchangeAdd64(static_cast<NI volatile *>(#), #)",
  172. header: "<intrin.h>".}
  173. else:
  174. proc addAndFetch*(p: ptr int, val: int): int {.
  175. importcpp: "_InterlockedExchangeAdd(reinterpret_cast<long volatile *>(#), static_cast<long>(#))",
  176. header: "<intrin.h>".}
  177. else:
  178. when sizeof(int) == 8:
  179. proc addAndFetch*(p: ptr int, val: int): int {.
  180. importc: "_InterlockedExchangeAdd64", header: "<intrin.h>".}
  181. else:
  182. proc addAndFetch*(p: ptr int, val: int): int {.
  183. importc: "_InterlockedExchangeAdd", header: "<intrin.h>".}
  184. else:
  185. proc addAndFetch*(p: ptr int, val: int): int {.inline.} =
  186. inc(p[], val)
  187. result = p[]
  188. proc atomicInc*(memLoc: var int, x: int = 1): int =
  189. when someGcc and hasThreadSupport:
  190. result = atomicAddFetch(memLoc.addr, x, ATOMIC_SEQ_CST)
  191. elif someVcc and hasThreadSupport:
  192. result = addAndFetch(memLoc.addr, x)
  193. inc(result, x)
  194. else:
  195. inc(memLoc, x)
  196. result = memLoc
  197. proc atomicDec*(memLoc: var int, x: int = 1): int =
  198. when someGcc and hasThreadSupport:
  199. when declared(atomicSubFetch):
  200. result = atomicSubFetch(memLoc.addr, x, ATOMIC_SEQ_CST)
  201. else:
  202. result = atomicAddFetch(memLoc.addr, -x, ATOMIC_SEQ_CST)
  203. elif someVcc and hasThreadSupport:
  204. result = addAndFetch(memLoc.addr, -x)
  205. dec(result, x)
  206. else:
  207. dec(memLoc, x)
  208. result = memLoc
  209. when someVcc:
  210. when defined(cpp):
  211. proc interlockedCompareExchange64(p: pointer; exchange, comparand: int64): int64
  212. {.importcpp: "_InterlockedCompareExchange64(static_cast<NI64 volatile *>(#), #, #)", header: "<intrin.h>".}
  213. proc interlockedCompareExchange32(p: pointer; exchange, comparand: int32): int32
  214. {.importcpp: "_InterlockedCompareExchange(static_cast<NI volatile *>(#), #, #)", header: "<intrin.h>".}
  215. proc interlockedCompareExchange8(p: pointer; exchange, comparand: byte): byte
  216. {.importcpp: "_InterlockedCompareExchange8(static_cast<char volatile *>(#), #, #)", header: "<intrin.h>".}
  217. else:
  218. proc interlockedCompareExchange64(p: pointer; exchange, comparand: int64): int64
  219. {.importc: "_InterlockedCompareExchange64", header: "<intrin.h>".}
  220. proc interlockedCompareExchange32(p: pointer; exchange, comparand: int32): int32
  221. {.importc: "_InterlockedCompareExchange", header: "<intrin.h>".}
  222. proc interlockedCompareExchange8(p: pointer; exchange, comparand: byte): byte
  223. {.importc: "_InterlockedCompareExchange8", header: "<intrin.h>".}
  224. proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool =
  225. when sizeof(T) == 8:
  226. interlockedCompareExchange64(p, cast[int64](newValue), cast[int64](oldValue)) ==
  227. cast[int64](oldValue)
  228. elif sizeof(T) == 4:
  229. interlockedCompareExchange32(p, cast[int32](newValue), cast[int32](oldValue)) ==
  230. cast[int32](oldValue)
  231. elif sizeof(T) == 1:
  232. interlockedCompareExchange8(p, cast[byte](newValue), cast[byte](oldValue)) ==
  233. cast[byte](oldValue)
  234. else:
  235. {.error: "invalid CAS instruction".}
  236. elif defined(tcc):
  237. when defined(amd64):
  238. {.emit:"""
  239. static int __tcc_cas(int *ptr, int oldVal, int newVal)
  240. {
  241. unsigned char ret;
  242. __asm__ __volatile__ (
  243. " lock\n"
  244. " cmpxchgq %2,%1\n"
  245. " sete %0\n"
  246. : "=q" (ret), "=m" (*ptr)
  247. : "r" (newVal), "m" (*ptr), "a" (oldVal)
  248. : "memory");
  249. return ret;
  250. }
  251. """.}
  252. else:
  253. #assert sizeof(int) == 4
  254. {.emit:"""
  255. static int __tcc_cas(int *ptr, int oldVal, int newVal)
  256. {
  257. unsigned char ret;
  258. __asm__ __volatile__ (
  259. " lock\n"
  260. " cmpxchgl %2,%1\n"
  261. " sete %0\n"
  262. : "=q" (ret), "=m" (*ptr)
  263. : "r" (newVal), "m" (*ptr), "a" (oldVal)
  264. : "memory");
  265. return ret;
  266. }
  267. """.}
  268. proc tcc_cas(p: ptr int; oldValue, newValue: int): bool
  269. {.importc: "__tcc_cas", nodecl.}
  270. proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool =
  271. tcc_cas(cast[ptr int](p), cast[int](oldValue), cast[int](newValue))
  272. elif declared(atomicCompareExchangeN):
  273. proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool =
  274. atomicCompareExchangeN(p, oldValue.unsafeAddr, newValue, false, ATOMIC_SEQ_CST, ATOMIC_SEQ_CST)
  275. else:
  276. # this is valid for GCC and Intel C++
  277. proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool
  278. {.importc: "__sync_bool_compare_and_swap", nodecl.}
  279. # XXX is this valid for 'int'?
  280. when (defined(x86) or defined(amd64)) and someVcc:
  281. proc cpuRelax* {.importc: "YieldProcessor", header: "<windows.h>".}
  282. elif (defined(x86) or defined(amd64)) and (someGcc or defined(bcc)):
  283. proc cpuRelax* {.inline.} =
  284. {.emit: """asm volatile("pause" ::: "memory");""".}
  285. elif someGcc or defined(tcc):
  286. proc cpuRelax* {.inline.} =
  287. {.emit: """asm volatile("" ::: "memory");""".}
  288. elif defined(icl):
  289. proc cpuRelax* {.importc: "_mm_pause", header: "xmmintrin.h".}
  290. elif false:
  291. from os import sleep
  292. proc cpuRelax* {.inline.} = os.sleep(1)
  293. when not declared(fence) and hasThreadSupport:
  294. # XXX fixme
  295. proc fence*() {.inline.} =
  296. var dummy: bool
  297. discard cas(addr dummy, false, true)
  298. {.pop.}