safe_refcount.cpp 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*************************************************************************/
  2. /* safe_refcount.cpp */
  3. /*************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /*************************************************************************/
  8. /* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
  9. /* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /*************************************************************************/
  30. #include "safe_refcount.h"
  31. // Atomic functions, these are used for multithread safe reference counters!
  32. #ifdef NO_THREADS
  33. /* Bogus implementation unaware of multiprocessing */
  34. template <class T>
  35. static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
  36. if (*pw == 0)
  37. return 0;
  38. (*pw)++;
  39. return *pw;
  40. }
  41. template <class T>
  42. static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
  43. (*pw)--;
  44. return *pw;
  45. }
  46. template <class T>
  47. static _ALWAYS_INLINE_ T _atomic_increment_impl(register T *pw) {
  48. (*pw)++;
  49. return *pw;
  50. }
  51. template <class T>
  52. static _ALWAYS_INLINE_ T _atomic_sub_impl(register T *pw, register T val) {
  53. (*pw) -= val;
  54. return *pw;
  55. }
  56. template <class T>
  57. static _ALWAYS_INLINE_ T _atomic_add_impl(register T *pw, register T val) {
  58. (*pw) += val;
  59. return *pw;
  60. }
  61. template <class T>
  62. static _ALWAYS_INLINE_ T _atomic_exchange_if_greater_impl(register T *pw, register T val) {
  63. if (val > *pw)
  64. *pw = val;
  65. return *pw;
  66. }
  67. #elif defined(__GNUC__)
  68. /* Implementation for GCC & Clang */
  69. // GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
  70. // Clang states it supports GCC atomic builtins.
  71. template <class T>
  72. static _ALWAYS_INLINE_ T _atomic_conditional_increment_impl(register T *pw) {
  73. while (true) {
  74. T tmp = static_cast<T const volatile &>(*pw);
  75. if (tmp == 0)
  76. return 0; // if zero, can't add to it anymore
  77. if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
  78. return tmp + 1;
  79. }
  80. }
  81. template <class T>
  82. static _ALWAYS_INLINE_ T _atomic_decrement_impl(register T *pw) {
  83. return __sync_sub_and_fetch(pw, 1);
  84. }
  85. template <class T>
  86. static _ALWAYS_INLINE_ T _atomic_increment_impl(register T *pw) {
  87. return __sync_add_and_fetch(pw, 1);
  88. }
  89. template <class T>
  90. static _ALWAYS_INLINE_ T _atomic_sub_impl(register T *pw, register T val) {
  91. return __sync_sub_and_fetch(pw, val);
  92. }
  93. template <class T>
  94. static _ALWAYS_INLINE_ T _atomic_add_impl(register T *pw, register T val) {
  95. return __sync_add_and_fetch(pw, val);
  96. }
  97. template <class T>
  98. static _ALWAYS_INLINE_ T _atomic_exchange_if_greater_impl(register T *pw, register T val) {
  99. while (true) {
  100. T tmp = static_cast<T const volatile &>(*pw);
  101. if (tmp >= val)
  102. return tmp; // already greater, or equal
  103. if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
  104. return val;
  105. }
  106. }
  107. #elif defined(_MSC_VER)
  108. /* Implementation for MSVC-Windows */
  109. // don't pollute my namespace!
  110. #include <windows.h>
  111. #define ATOMIC_CONDITIONAL_INCREMENT_BODY(m_pw, m_win_type, m_win_cmpxchg, m_cpp_type) \
  112. /* try to increment until it actually works */ \
  113. /* taken from boost */ \
  114. while (true) { \
  115. m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \
  116. if (tmp == 0) \
  117. return 0; /* if zero, can't add to it anymore */ \
  118. if (m_win_cmpxchg((m_win_type volatile *)(m_pw), tmp + 1, tmp) == tmp) \
  119. return tmp + 1; \
  120. }
  121. #define ATOMIC_EXCHANGE_IF_GREATER_BODY(m_pw, m_val, m_win_type, m_win_cmpxchg, m_cpp_type) \
  122. while (true) { \
  123. m_cpp_type tmp = static_cast<m_cpp_type const volatile &>(*(m_pw)); \
  124. if (tmp >= m_val) \
  125. return tmp; /* already greater, or equal */ \
  126. if (m_win_cmpxchg((m_win_type volatile *)(m_pw), m_val, tmp) == tmp) \
  127. return m_val; \
  128. }
  129. static _ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw) {
  130. ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
  131. }
  132. static _ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) {
  133. return InterlockedDecrement((LONG volatile *)pw);
  134. }
  135. static _ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) {
  136. return InterlockedIncrement((LONG volatile *)pw);
  137. }
  138. static _ALWAYS_INLINE_ uint32_t _atomic_sub_impl(register uint32_t *pw, register uint32_t val) {
  139. return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val;
  140. }
  141. static _ALWAYS_INLINE_ uint32_t _atomic_add_impl(register uint32_t *pw, register uint32_t val) {
  142. return InterlockedAdd((LONG volatile *)pw, val);
  143. }
  144. static _ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(register uint32_t *pw, register uint32_t val) {
  145. ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t)
  146. }
  147. static _ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw) {
  148. ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
  149. }
  150. static _ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) {
  151. return InterlockedDecrement64((LONGLONG volatile *)pw);
  152. }
  153. static _ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) {
  154. return InterlockedIncrement64((LONGLONG volatile *)pw);
  155. }
  156. static _ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) {
  157. return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
  158. }
  159. static _ALWAYS_INLINE_ uint64_t _atomic_add_impl(register uint64_t *pw, register uint64_t val) {
  160. return InterlockedAdd64((LONGLONG volatile *)pw, val);
  161. }
  162. static _ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(register uint64_t *pw, register uint64_t val) {
  163. ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t)
  164. }
  165. #else
  166. //no threads supported?
  167. #error Must provide atomic functions for this platform or compiler!
  168. #endif
  169. // The actual advertised functions; they'll call the right implementation
  170. uint32_t atomic_conditional_increment(register uint32_t *counter) {
  171. return _atomic_conditional_increment_impl(counter);
  172. }
  173. uint32_t atomic_decrement(register uint32_t *pw) {
  174. return _atomic_decrement_impl(pw);
  175. }
  176. uint32_t atomic_increment(register uint32_t *pw) {
  177. return _atomic_increment_impl(pw);
  178. }
  179. uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) {
  180. return _atomic_sub_impl(pw, val);
  181. }
  182. uint32_t atomic_add(register uint32_t *pw, register uint32_t val) {
  183. return _atomic_add_impl(pw, val);
  184. }
  185. uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val) {
  186. return _atomic_exchange_if_greater_impl(pw, val);
  187. }
  188. uint64_t atomic_conditional_increment(register uint64_t *counter) {
  189. return _atomic_conditional_increment_impl(counter);
  190. }
  191. uint64_t atomic_decrement(register uint64_t *pw) {
  192. return _atomic_decrement_impl(pw);
  193. }
  194. uint64_t atomic_increment(register uint64_t *pw) {
  195. return _atomic_increment_impl(pw);
  196. }
  197. uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) {
  198. return _atomic_sub_impl(pw, val);
  199. }
  200. uint64_t atomic_add(register uint64_t *pw, register uint64_t val) {
  201. return _atomic_add_impl(pw, val);
  202. }
  203. uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val) {
  204. return _atomic_exchange_if_greater_impl(pw, val);
  205. }