sanitizer_atomic_clang_other.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
  9. // Not intended for direct inclusion. Include sanitizer_atomic.h.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
  13. #define SANITIZER_ATOMIC_CLANG_OTHER_H
  14. namespace __sanitizer {
  15. INLINE void proc_yield(int cnt) {
  16. __asm__ __volatile__("" ::: "memory");
  17. }
  18. template<typename T>
  19. INLINE typename T::Type atomic_load(
  20. const volatile T *a, memory_order mo) {
  21. DCHECK(mo & (memory_order_relaxed | memory_order_consume
  22. | memory_order_acquire | memory_order_seq_cst));
  23. DCHECK(!((uptr)a % sizeof(*a)));
  24. typename T::Type v;
  25. if (sizeof(*a) < 8 || sizeof(void*) == 8) {
  26. // Assume that aligned loads are atomic.
  27. if (mo == memory_order_relaxed) {
  28. v = a->val_dont_use;
  29. } else if (mo == memory_order_consume) {
  30. // Assume that processor respects data dependencies
  31. // (and that compiler won't break them).
  32. __asm__ __volatile__("" ::: "memory");
  33. v = a->val_dont_use;
  34. __asm__ __volatile__("" ::: "memory");
  35. } else if (mo == memory_order_acquire) {
  36. __asm__ __volatile__("" ::: "memory");
  37. v = a->val_dont_use;
  38. __sync_synchronize();
  39. } else { // seq_cst
  40. // E.g. on POWER we need a hw fence even before the store.
  41. __sync_synchronize();
  42. v = a->val_dont_use;
  43. __sync_synchronize();
  44. }
  45. } else {
  46. // 64-bit load on 32-bit platform.
  47. // Gross, but simple and reliable.
  48. // Assume that it is not in read-only memory.
  49. v = __sync_fetch_and_add(
  50. const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
  51. }
  52. return v;
  53. }
  54. template<typename T>
  55. INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
  56. DCHECK(mo & (memory_order_relaxed | memory_order_release
  57. | memory_order_seq_cst));
  58. DCHECK(!((uptr)a % sizeof(*a)));
  59. if (sizeof(*a) < 8 || sizeof(void*) == 8) {
  60. // Assume that aligned loads are atomic.
  61. if (mo == memory_order_relaxed) {
  62. a->val_dont_use = v;
  63. } else if (mo == memory_order_release) {
  64. __sync_synchronize();
  65. a->val_dont_use = v;
  66. __asm__ __volatile__("" ::: "memory");
  67. } else { // seq_cst
  68. __sync_synchronize();
  69. a->val_dont_use = v;
  70. __sync_synchronize();
  71. }
  72. } else {
  73. // 64-bit store on 32-bit platform.
  74. // Gross, but simple and reliable.
  75. typename T::Type cmp = a->val_dont_use;
  76. typename T::Type cur;
  77. for (;;) {
  78. cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
  79. if (cmp == v)
  80. break;
  81. cmp = cur;
  82. }
  83. }
  84. }
  85. } // namespace __sanitizer
  86. #endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H