sanitizer_quarantine.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // Memory quarantine for AddressSanitizer and potentially other tools.
  9. // Quarantine caches some specified amount of memory in per-thread caches,
  10. // then evicts to global FIFO queue. When the queue reaches specified threshold,
  11. // oldest memory is recycled.
  12. //
  13. //===----------------------------------------------------------------------===//
  14. #ifndef SANITIZER_QUARANTINE_H
  15. #define SANITIZER_QUARANTINE_H
  16. #include "sanitizer_internal_defs.h"
  17. #include "sanitizer_mutex.h"
  18. #include "sanitizer_list.h"
  19. namespace __sanitizer {
  20. template<typename Node> class QuarantineCache;
  21. struct QuarantineBatch {
  22. static const uptr kSize = 1021;
  23. QuarantineBatch *next;
  24. uptr size;
  25. uptr count;
  26. void *batch[kSize];
  27. };
  28. COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
  29. // The callback interface is:
  30. // void Callback::Recycle(Node *ptr);
  31. // void *cb.Allocate(uptr size);
  32. // void cb.Deallocate(void *ptr);
  33. template<typename Callback, typename Node>
  34. class Quarantine {
  35. public:
  36. typedef QuarantineCache<Callback> Cache;
  37. explicit Quarantine(LinkerInitialized)
  38. : cache_(LINKER_INITIALIZED) {
  39. }
  40. void Init(uptr size, uptr cache_size) {
  41. max_size_ = size;
  42. min_size_ = size / 10 * 9; // 90% of max size.
  43. max_cache_size_ = cache_size;
  44. }
  45. void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
  46. c->Enqueue(cb, ptr, size);
  47. if (c->Size() > max_cache_size_)
  48. Drain(c, cb);
  49. }
  50. void NOINLINE Drain(Cache *c, Callback cb) {
  51. {
  52. SpinMutexLock l(&cache_mutex_);
  53. cache_.Transfer(c);
  54. }
  55. if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
  56. Recycle(cb);
  57. }
  58. private:
  59. // Read-only data.
  60. char pad0_[kCacheLineSize];
  61. uptr max_size_;
  62. uptr min_size_;
  63. uptr max_cache_size_;
  64. char pad1_[kCacheLineSize];
  65. SpinMutex cache_mutex_;
  66. SpinMutex recycle_mutex_;
  67. Cache cache_;
  68. char pad2_[kCacheLineSize];
  69. void NOINLINE Recycle(Callback cb) {
  70. Cache tmp;
  71. {
  72. SpinMutexLock l(&cache_mutex_);
  73. while (cache_.Size() > min_size_) {
  74. QuarantineBatch *b = cache_.DequeueBatch();
  75. tmp.EnqueueBatch(b);
  76. }
  77. }
  78. recycle_mutex_.Unlock();
  79. DoRecycle(&tmp, cb);
  80. }
  81. void NOINLINE DoRecycle(Cache *c, Callback cb) {
  82. while (QuarantineBatch *b = c->DequeueBatch()) {
  83. const uptr kPrefetch = 16;
  84. for (uptr i = 0; i < kPrefetch; i++)
  85. PREFETCH(b->batch[i]);
  86. for (uptr i = 0; i < b->count; i++) {
  87. PREFETCH(b->batch[i + kPrefetch]);
  88. cb.Recycle((Node*)b->batch[i]);
  89. }
  90. cb.Deallocate(b);
  91. }
  92. }
  93. };
  94. // Per-thread cache of memory blocks.
  95. template<typename Callback>
  96. class QuarantineCache {
  97. public:
  98. explicit QuarantineCache(LinkerInitialized) {
  99. }
  100. QuarantineCache()
  101. : size_() {
  102. list_.clear();
  103. }
  104. uptr Size() const {
  105. return atomic_load(&size_, memory_order_relaxed);
  106. }
  107. void Enqueue(Callback cb, void *ptr, uptr size) {
  108. if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
  109. AllocBatch(cb);
  110. size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
  111. }
  112. QuarantineBatch *b = list_.back();
  113. b->batch[b->count++] = ptr;
  114. b->size += size;
  115. SizeAdd(size);
  116. }
  117. void Transfer(QuarantineCache *c) {
  118. list_.append_back(&c->list_);
  119. SizeAdd(c->Size());
  120. atomic_store(&c->size_, 0, memory_order_relaxed);
  121. }
  122. void EnqueueBatch(QuarantineBatch *b) {
  123. list_.push_back(b);
  124. SizeAdd(b->size);
  125. }
  126. QuarantineBatch *DequeueBatch() {
  127. if (list_.empty())
  128. return 0;
  129. QuarantineBatch *b = list_.front();
  130. list_.pop_front();
  131. SizeSub(b->size);
  132. return b;
  133. }
  134. private:
  135. IntrusiveList<QuarantineBatch> list_;
  136. atomic_uintptr_t size_;
  137. void SizeAdd(uptr add) {
  138. atomic_store(&size_, Size() + add, memory_order_relaxed);
  139. }
  140. void SizeSub(uptr sub) {
  141. atomic_store(&size_, Size() - sub, memory_order_relaxed);
  142. }
  143. NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
  144. QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
  145. b->count = 0;
  146. b->size = 0;
  147. list_.push_back(b);
  148. return b;
  149. }
  150. };
  151. } // namespace __sanitizer
  152. #endif // #ifndef SANITIZER_QUARANTINE_H