sanitizer_stackdepot.cc 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. //===-- sanitizer_stackdepot.cc -------------------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is shared between AddressSanitizer and ThreadSanitizer
  9. // run-time libraries.
  10. //===----------------------------------------------------------------------===//
  11. #include "sanitizer_stackdepot.h"
  12. #include "sanitizer_common.h"
  13. #include "sanitizer_stackdepotbase.h"
  14. namespace __sanitizer {
  15. struct StackDepotNode {
  16. StackDepotNode *link;
  17. u32 id;
  18. atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
  19. uptr size;
  20. uptr stack[1]; // [size]
  21. static const u32 kTabSizeLog = 20;
  22. // Lower kTabSizeLog bits are equal for all items in one bucket.
  23. // We use these bits to store the per-stack use counter.
  24. static const u32 kUseCountBits = kTabSizeLog;
  25. static const u32 kMaxUseCount = 1 << kUseCountBits;
  26. static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
  27. static const u32 kHashMask = ~kUseCountMask;
  28. typedef StackTrace args_type;
  29. bool eq(u32 hash, const args_type &args) const {
  30. u32 hash_bits =
  31. atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
  32. if ((hash & kHashMask) != hash_bits || args.size != size) return false;
  33. uptr i = 0;
  34. for (; i < size; i++) {
  35. if (stack[i] != args.trace[i]) return false;
  36. }
  37. return true;
  38. }
  39. static uptr storage_size(const args_type &args) {
  40. return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
  41. }
  42. static u32 hash(const args_type &args) {
  43. // murmur2
  44. const u32 m = 0x5bd1e995;
  45. const u32 seed = 0x9747b28c;
  46. const u32 r = 24;
  47. u32 h = seed ^ (args.size * sizeof(uptr));
  48. for (uptr i = 0; i < args.size; i++) {
  49. u32 k = args.trace[i];
  50. k *= m;
  51. k ^= k >> r;
  52. k *= m;
  53. h *= m;
  54. h ^= k;
  55. }
  56. h ^= h >> 13;
  57. h *= m;
  58. h ^= h >> 15;
  59. return h;
  60. }
  61. static bool is_valid(const args_type &args) {
  62. return args.size > 0 && args.trace;
  63. }
  64. void store(const args_type &args, u32 hash) {
  65. atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
  66. size = args.size;
  67. internal_memcpy(stack, args.trace, size * sizeof(uptr));
  68. }
  69. args_type load() const {
  70. return args_type(&stack[0], size);
  71. }
  72. StackDepotHandle get_handle() { return StackDepotHandle(this); }
  73. typedef StackDepotHandle handle_type;
  74. };
  75. COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
  76. u32 StackDepotHandle::id() { return node_->id; }
  77. int StackDepotHandle::use_count() {
  78. return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
  79. StackDepotNode::kUseCountMask;
  80. }
  81. void StackDepotHandle::inc_use_count_unsafe() {
  82. u32 prev =
  83. atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
  84. StackDepotNode::kUseCountMask;
  85. CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
  86. }
  87. // FIXME(dvyukov): this single reserved bit is used in TSan.
  88. typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
  89. StackDepot;
  90. static StackDepot theDepot;
  91. StackDepotStats *StackDepotGetStats() {
  92. return theDepot.GetStats();
  93. }
  94. u32 StackDepotPut(StackTrace stack) {
  95. StackDepotHandle h = theDepot.Put(stack);
  96. return h.valid() ? h.id() : 0;
  97. }
  98. StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
  99. return theDepot.Put(stack);
  100. }
  101. StackTrace StackDepotGet(u32 id) {
  102. return theDepot.Get(id);
  103. }
  104. void StackDepotLockAll() {
  105. theDepot.LockAll();
  106. }
  107. void StackDepotUnlockAll() {
  108. theDepot.UnlockAll();
  109. }
  110. bool StackDepotReverseMap::IdDescPair::IdComparator(
  111. const StackDepotReverseMap::IdDescPair &a,
  112. const StackDepotReverseMap::IdDescPair &b) {
  113. return a.id < b.id;
  114. }
  115. StackDepotReverseMap::StackDepotReverseMap()
  116. : map_(StackDepotGetStats()->n_uniq_ids + 100) {
  117. for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
  118. atomic_uintptr_t *p = &theDepot.tab[idx];
  119. uptr v = atomic_load(p, memory_order_consume);
  120. StackDepotNode *s = (StackDepotNode*)(v & ~1);
  121. for (; s; s = s->link) {
  122. IdDescPair pair = {s->id, s};
  123. map_.push_back(pair);
  124. }
  125. }
  126. InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
  127. }
  128. StackTrace StackDepotReverseMap::Get(u32 id) {
  129. if (!map_.size())
  130. return StackTrace();
  131. IdDescPair pair = {id, 0};
  132. uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
  133. IdDescPair::IdComparator);
  134. if (idx > map_.size())
  135. return StackTrace();
  136. return map_[idx].desc->load();
  137. }
  138. } // namespace __sanitizer