lsan_common.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. //=-- lsan_common.cc ------------------------------------------------------===//
  2. //
  3. // This file is distributed under the University of Illinois Open Source
  4. // License. See LICENSE.TXT for details.
  5. //
  6. //===----------------------------------------------------------------------===//
  7. //
  8. // This file is a part of LeakSanitizer.
  9. // Implementation of common leak checking functionality.
  10. //
  11. //===----------------------------------------------------------------------===//
  12. #include "lsan_common.h"
  13. #include "sanitizer_common/sanitizer_common.h"
  14. #include "sanitizer_common/sanitizer_flags.h"
  15. #include "sanitizer_common/sanitizer_placement_new.h"
  16. #include "sanitizer_common/sanitizer_procmaps.h"
  17. #include "sanitizer_common/sanitizer_stackdepot.h"
  18. #include "sanitizer_common/sanitizer_stacktrace.h"
  19. #include "sanitizer_common/sanitizer_stoptheworld.h"
  20. #include "sanitizer_common/sanitizer_suppressions.h"
  21. #include "sanitizer_common/sanitizer_report_decorator.h"
  22. #if CAN_SANITIZE_LEAKS
  23. namespace __lsan {
  24. // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
  25. // also to protect the global list of root regions.
  26. BlockingMutex global_mutex(LINKER_INITIALIZED);
  27. THREADLOCAL int disable_counter;
  28. bool DisabledInThisThread() { return disable_counter > 0; }
  29. Flags lsan_flags;
  30. static void InitializeFlags(bool standalone) {
  31. Flags *f = flags();
  32. // Default values.
  33. f->report_objects = false;
  34. f->resolution = 0;
  35. f->max_leaks = 0;
  36. f->exitcode = 23;
  37. f->use_registers = true;
  38. f->use_globals = true;
  39. f->use_stacks = true;
  40. f->use_tls = true;
  41. f->use_root_regions = true;
  42. f->use_unaligned = false;
  43. f->use_poisoned = false;
  44. f->log_pointers = false;
  45. f->log_threads = false;
  46. const char *options = GetEnv("LSAN_OPTIONS");
  47. if (options) {
  48. ParseFlag(options, &f->use_registers, "use_registers", "");
  49. ParseFlag(options, &f->use_globals, "use_globals", "");
  50. ParseFlag(options, &f->use_stacks, "use_stacks", "");
  51. ParseFlag(options, &f->use_tls, "use_tls", "");
  52. ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
  53. ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
  54. ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
  55. ParseFlag(options, &f->report_objects, "report_objects", "");
  56. ParseFlag(options, &f->resolution, "resolution", "");
  57. CHECK_GE(&f->resolution, 0);
  58. ParseFlag(options, &f->max_leaks, "max_leaks", "");
  59. CHECK_GE(&f->max_leaks, 0);
  60. ParseFlag(options, &f->log_pointers, "log_pointers", "");
  61. ParseFlag(options, &f->log_threads, "log_threads", "");
  62. ParseFlag(options, &f->exitcode, "exitcode", "");
  63. }
  64. // Set defaults for common flags (only in standalone mode) and parse
  65. // them from LSAN_OPTIONS.
  66. CommonFlags *cf = common_flags();
  67. if (standalone) {
  68. SetCommonFlagsDefaults(cf);
  69. cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
  70. cf->malloc_context_size = 30;
  71. cf->detect_leaks = true;
  72. }
  73. ParseCommonFlagsFromString(cf, options);
  74. }
  75. #define LOG_POINTERS(...) \
  76. do { \
  77. if (flags()->log_pointers) Report(__VA_ARGS__); \
  78. } while (0);
  79. #define LOG_THREADS(...) \
  80. do { \
  81. if (flags()->log_threads) Report(__VA_ARGS__); \
  82. } while (0);
  83. static bool suppressions_inited = false;
  84. void InitializeSuppressions() {
  85. CHECK(!suppressions_inited);
  86. SuppressionContext::InitIfNecessary();
  87. if (&__lsan_default_suppressions)
  88. SuppressionContext::Get()->Parse(__lsan_default_suppressions());
  89. suppressions_inited = true;
  90. }
  91. struct RootRegion {
  92. const void *begin;
  93. uptr size;
  94. };
  95. InternalMmapVector<RootRegion> *root_regions;
  96. void InitializeRootRegions() {
  97. CHECK(!root_regions);
  98. ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
  99. root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
  100. }
  101. void InitCommonLsan(bool standalone) {
  102. InitializeFlags(standalone);
  103. InitializeRootRegions();
  104. if (common_flags()->detect_leaks) {
  105. // Initialization which can fail or print warnings should only be done if
  106. // LSan is actually enabled.
  107. InitializeSuppressions();
  108. InitializePlatformSpecificModules();
  109. }
  110. }
  111. class Decorator: public __sanitizer::SanitizerCommonDecorator {
  112. public:
  113. Decorator() : SanitizerCommonDecorator() { }
  114. const char *Error() { return Red(); }
  115. const char *Leak() { return Blue(); }
  116. const char *End() { return Default(); }
  117. };
  118. static inline bool CanBeAHeapPointer(uptr p) {
  119. // Since our heap is located in mmap-ed memory, we can assume a sensible lower
  120. // bound on heap addresses.
  121. const uptr kMinAddress = 4 * 4096;
  122. if (p < kMinAddress) return false;
  123. #ifdef __x86_64__
  124. // Accept only canonical form user-space addresses.
  125. return ((p >> 47) == 0);
  126. #else
  127. return true;
  128. #endif
  129. }
  130. // Scans the memory range, looking for byte patterns that point into allocator
  131. // chunks. Marks those chunks with |tag| and adds them to |frontier|.
  132. // There are two usage modes for this function: finding reachable or ignored
  133. // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
  134. // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
  135. // so |frontier| = 0.
  136. void ScanRangeForPointers(uptr begin, uptr end,
  137. Frontier *frontier,
  138. const char *region_type, ChunkTag tag) {
  139. const uptr alignment = flags()->pointer_alignment();
  140. LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
  141. uptr pp = begin;
  142. if (pp % alignment)
  143. pp = pp + alignment - pp % alignment;
  144. for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
  145. void *p = *reinterpret_cast<void **>(pp);
  146. if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
  147. uptr chunk = PointsIntoChunk(p);
  148. if (!chunk) continue;
  149. // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
  150. if (chunk == begin) continue;
  151. LsanMetadata m(chunk);
  152. // Reachable beats ignored beats leaked.
  153. if (m.tag() == kReachable) continue;
  154. if (m.tag() == kIgnored && tag != kReachable) continue;
  155. // Do this check relatively late so we can log only the interesting cases.
  156. if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
  157. LOG_POINTERS(
  158. "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
  159. "%zu.\n",
  160. pp, p, chunk, chunk + m.requested_size(), m.requested_size());
  161. continue;
  162. }
  163. m.set_tag(tag);
  164. LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
  165. chunk, chunk + m.requested_size(), m.requested_size());
  166. if (frontier)
  167. frontier->push_back(chunk);
  168. }
  169. }
  170. void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
  171. Frontier *frontier = reinterpret_cast<Frontier *>(arg);
  172. ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
  173. }
  174. // Scans thread data (stacks and TLS) for heap pointers.
  175. static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
  176. Frontier *frontier) {
  177. InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
  178. uptr registers_begin = reinterpret_cast<uptr>(registers.data());
  179. uptr registers_end = registers_begin + registers.size();
  180. for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
  181. uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
  182. LOG_THREADS("Processing thread %d.\n", os_id);
  183. uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
  184. bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
  185. &tls_begin, &tls_end,
  186. &cache_begin, &cache_end);
  187. if (!thread_found) {
  188. // If a thread can't be found in the thread registry, it's probably in the
  189. // process of destruction. Log this event and move on.
  190. LOG_THREADS("Thread %d not found in registry.\n", os_id);
  191. continue;
  192. }
  193. uptr sp;
  194. bool have_registers =
  195. (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
  196. if (!have_registers) {
  197. Report("Unable to get registers from thread %d.\n");
  198. // If unable to get SP, consider the entire stack to be reachable.
  199. sp = stack_begin;
  200. }
  201. if (flags()->use_registers && have_registers)
  202. ScanRangeForPointers(registers_begin, registers_end, frontier,
  203. "REGISTERS", kReachable);
  204. if (flags()->use_stacks) {
  205. LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
  206. if (sp < stack_begin || sp >= stack_end) {
  207. // SP is outside the recorded stack range (e.g. the thread is running a
  208. // signal handler on alternate stack). Again, consider the entire stack
  209. // range to be reachable.
  210. LOG_THREADS("WARNING: stack pointer not in stack range.\n");
  211. } else {
  212. // Shrink the stack range to ignore out-of-scope values.
  213. stack_begin = sp;
  214. }
  215. ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
  216. kReachable);
  217. ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
  218. }
  219. if (flags()->use_tls) {
  220. LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
  221. if (cache_begin == cache_end) {
  222. ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
  223. } else {
  224. // Because LSan should not be loaded with dlopen(), we can assume
  225. // that allocator cache will be part of static TLS image.
  226. CHECK_LE(tls_begin, cache_begin);
  227. CHECK_GE(tls_end, cache_end);
  228. if (tls_begin < cache_begin)
  229. ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
  230. kReachable);
  231. if (tls_end > cache_end)
  232. ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
  233. }
  234. }
  235. }
  236. }
  237. static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
  238. uptr root_end) {
  239. MemoryMappingLayout proc_maps(/*cache_enabled*/true);
  240. uptr begin, end, prot;
  241. while (proc_maps.Next(&begin, &end,
  242. /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
  243. &prot)) {
  244. uptr intersection_begin = Max(root_begin, begin);
  245. uptr intersection_end = Min(end, root_end);
  246. if (intersection_begin >= intersection_end) continue;
  247. bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
  248. LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
  249. root_begin, root_end, begin, end,
  250. is_readable ? "readable" : "unreadable");
  251. if (is_readable)
  252. ScanRangeForPointers(intersection_begin, intersection_end, frontier,
  253. "ROOT", kReachable);
  254. }
  255. }
  256. // Scans root regions for heap pointers.
  257. static void ProcessRootRegions(Frontier *frontier) {
  258. if (!flags()->use_root_regions) return;
  259. CHECK(root_regions);
  260. for (uptr i = 0; i < root_regions->size(); i++) {
  261. RootRegion region = (*root_regions)[i];
  262. uptr begin_addr = reinterpret_cast<uptr>(region.begin);
  263. ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
  264. }
  265. }
  266. static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
  267. while (frontier->size()) {
  268. uptr next_chunk = frontier->back();
  269. frontier->pop_back();
  270. LsanMetadata m(next_chunk);
  271. ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
  272. "HEAP", tag);
  273. }
  274. }
  275. // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
  276. // which are reachable from it as indirectly leaked.
  277. static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
  278. chunk = GetUserBegin(chunk);
  279. LsanMetadata m(chunk);
  280. if (m.allocated() && m.tag() != kReachable) {
  281. ScanRangeForPointers(chunk, chunk + m.requested_size(),
  282. /* frontier */ 0, "HEAP", kIndirectlyLeaked);
  283. }
  284. }
  285. // ForEachChunk callback. If chunk is marked as ignored, adds its address to
  286. // frontier.
  287. static void CollectIgnoredCb(uptr chunk, void *arg) {
  288. CHECK(arg);
  289. chunk = GetUserBegin(chunk);
  290. LsanMetadata m(chunk);
  291. if (m.allocated() && m.tag() == kIgnored)
  292. reinterpret_cast<Frontier *>(arg)->push_back(chunk);
  293. }
  294. // Sets the appropriate tag on each chunk.
  295. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
  296. // Holds the flood fill frontier.
  297. Frontier frontier(1);
  298. ProcessGlobalRegions(&frontier);
  299. ProcessThreads(suspended_threads, &frontier);
  300. ProcessRootRegions(&frontier);
  301. FloodFillTag(&frontier, kReachable);
  302. // The check here is relatively expensive, so we do this in a separate flood
  303. // fill. That way we can skip the check for chunks that are reachable
  304. // otherwise.
  305. LOG_POINTERS("Processing platform-specific allocations.\n");
  306. ProcessPlatformSpecificAllocations(&frontier);
  307. FloodFillTag(&frontier, kReachable);
  308. LOG_POINTERS("Scanning ignored chunks.\n");
  309. CHECK_EQ(0, frontier.size());
  310. ForEachChunk(CollectIgnoredCb, &frontier);
  311. FloodFillTag(&frontier, kIgnored);
  312. // Iterate over leaked chunks and mark those that are reachable from other
  313. // leaked chunks.
  314. LOG_POINTERS("Scanning leaked chunks.\n");
  315. ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
  316. }
  317. static void PrintStackTraceById(u32 stack_trace_id) {
  318. CHECK(stack_trace_id);
  319. StackDepotGet(stack_trace_id).Print();
  320. }
  321. // ForEachChunk callback. Aggregates information about unreachable chunks into
  322. // a LeakReport.
  323. static void CollectLeaksCb(uptr chunk, void *arg) {
  324. CHECK(arg);
  325. LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
  326. chunk = GetUserBegin(chunk);
  327. LsanMetadata m(chunk);
  328. if (!m.allocated()) return;
  329. if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
  330. uptr resolution = flags()->resolution;
  331. u32 stack_trace_id = 0;
  332. if (resolution > 0) {
  333. StackTrace stack = StackDepotGet(m.stack_trace_id());
  334. stack.size = Min(stack.size, resolution);
  335. stack_trace_id = StackDepotPut(stack);
  336. } else {
  337. stack_trace_id = m.stack_trace_id();
  338. }
  339. leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
  340. m.tag());
  341. }
  342. }
  343. static void PrintMatchedSuppressions() {
  344. InternalMmapVector<Suppression *> matched(1);
  345. SuppressionContext::Get()->GetMatched(&matched);
  346. if (!matched.size())
  347. return;
  348. const char *line = "-----------------------------------------------------";
  349. Printf("%s\n", line);
  350. Printf("Suppressions used:\n");
  351. Printf(" count bytes template\n");
  352. for (uptr i = 0; i < matched.size(); i++)
  353. Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
  354. matched[i]->weight, matched[i]->templ);
  355. Printf("%s\n\n", line);
  356. }
  357. struct DoLeakCheckParam {
  358. bool success;
  359. LeakReport leak_report;
  360. };
  361. static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
  362. void *arg) {
  363. DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
  364. CHECK(param);
  365. CHECK(!param->success);
  366. ClassifyAllChunks(suspended_threads);
  367. ForEachChunk(CollectLeaksCb, &param->leak_report);
  368. param->success = true;
  369. }
  370. void DoLeakCheck() {
  371. EnsureMainThreadIDIsCorrect();
  372. BlockingMutexLock l(&global_mutex);
  373. static bool already_done;
  374. if (already_done) return;
  375. already_done = true;
  376. if (&__lsan_is_turned_off && __lsan_is_turned_off())
  377. return;
  378. DoLeakCheckParam param;
  379. param.success = false;
  380. LockThreadRegistry();
  381. LockAllocator();
  382. StopTheWorld(DoLeakCheckCallback, &param);
  383. UnlockAllocator();
  384. UnlockThreadRegistry();
  385. if (!param.success) {
  386. Report("LeakSanitizer has encountered a fatal error.\n");
  387. Die();
  388. }
  389. param.leak_report.ApplySuppressions();
  390. uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
  391. if (unsuppressed_count > 0) {
  392. Decorator d;
  393. Printf("\n"
  394. "================================================================="
  395. "\n");
  396. Printf("%s", d.Error());
  397. Report("ERROR: LeakSanitizer: detected memory leaks\n");
  398. Printf("%s", d.End());
  399. param.leak_report.ReportTopLeaks(flags()->max_leaks);
  400. }
  401. if (common_flags()->print_suppressions)
  402. PrintMatchedSuppressions();
  403. if (unsuppressed_count > 0) {
  404. param.leak_report.PrintSummary();
  405. if (flags()->exitcode) {
  406. if (common_flags()->coverage)
  407. __sanitizer_cov_dump();
  408. internal__exit(flags()->exitcode);
  409. }
  410. }
  411. }
  412. static Suppression *GetSuppressionForAddr(uptr addr) {
  413. Suppression *s;
  414. // Suppress by module name.
  415. const char *module_name;
  416. uptr module_offset;
  417. if (Symbolizer::GetOrInit()
  418. ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
  419. SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
  420. return s;
  421. // Suppress by file or function name.
  422. static const uptr kMaxAddrFrames = 16;
  423. InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
  424. for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
  425. uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
  426. addr, addr_frames.data(), kMaxAddrFrames);
  427. for (uptr i = 0; i < addr_frames_num; i++) {
  428. if (SuppressionContext::Get()->Match(addr_frames[i].function,
  429. SuppressionLeak, &s) ||
  430. SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
  431. &s))
  432. return s;
  433. }
  434. return 0;
  435. }
  436. static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
  437. StackTrace stack = StackDepotGet(stack_trace_id);
  438. for (uptr i = 0; i < stack.size; i++) {
  439. Suppression *s = GetSuppressionForAddr(
  440. StackTrace::GetPreviousInstructionPc(stack.trace[i]));
  441. if (s) return s;
  442. }
  443. return 0;
  444. }
  445. ///// LeakReport implementation. /////
  446. // A hard limit on the number of distinct leaks, to avoid quadratic complexity
  447. // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
  448. // in real-world applications.
  449. // FIXME: Get rid of this limit by changing the implementation of LeakReport to
  450. // use a hash table.
  451. const uptr kMaxLeaksConsidered = 5000;
  452. void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
  453. uptr leaked_size, ChunkTag tag) {
  454. CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
  455. bool is_directly_leaked = (tag == kDirectlyLeaked);
  456. uptr i;
  457. for (i = 0; i < leaks_.size(); i++) {
  458. if (leaks_[i].stack_trace_id == stack_trace_id &&
  459. leaks_[i].is_directly_leaked == is_directly_leaked) {
  460. leaks_[i].hit_count++;
  461. leaks_[i].total_size += leaked_size;
  462. break;
  463. }
  464. }
  465. if (i == leaks_.size()) {
  466. if (leaks_.size() == kMaxLeaksConsidered) return;
  467. Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
  468. is_directly_leaked, /* is_suppressed */ false };
  469. leaks_.push_back(leak);
  470. }
  471. if (flags()->report_objects) {
  472. LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
  473. leaked_objects_.push_back(obj);
  474. }
  475. }
  476. static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
  477. if (leak1.is_directly_leaked == leak2.is_directly_leaked)
  478. return leak1.total_size > leak2.total_size;
  479. else
  480. return leak1.is_directly_leaked;
  481. }
  482. void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
  483. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  484. Printf("\n");
  485. if (leaks_.size() == kMaxLeaksConsidered)
  486. Printf("Too many leaks! Only the first %zu leaks encountered will be "
  487. "reported.\n",
  488. kMaxLeaksConsidered);
  489. uptr unsuppressed_count = UnsuppressedLeakCount();
  490. if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
  491. Printf("The %zu top leak(s):\n", num_leaks_to_report);
  492. InternalSort(&leaks_, leaks_.size(), LeakComparator);
  493. uptr leaks_reported = 0;
  494. for (uptr i = 0; i < leaks_.size(); i++) {
  495. if (leaks_[i].is_suppressed) continue;
  496. PrintReportForLeak(i);
  497. leaks_reported++;
  498. if (leaks_reported == num_leaks_to_report) break;
  499. }
  500. if (leaks_reported < unsuppressed_count) {
  501. uptr remaining = unsuppressed_count - leaks_reported;
  502. Printf("Omitting %zu more leak(s).\n", remaining);
  503. }
  504. }
  505. void LeakReport::PrintReportForLeak(uptr index) {
  506. Decorator d;
  507. Printf("%s", d.Leak());
  508. Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
  509. leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
  510. leaks_[index].total_size, leaks_[index].hit_count);
  511. Printf("%s", d.End());
  512. PrintStackTraceById(leaks_[index].stack_trace_id);
  513. if (flags()->report_objects) {
  514. Printf("Objects leaked above:\n");
  515. PrintLeakedObjectsForLeak(index);
  516. Printf("\n");
  517. }
  518. }
  519. void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
  520. u32 leak_id = leaks_[index].id;
  521. for (uptr j = 0; j < leaked_objects_.size(); j++) {
  522. if (leaked_objects_[j].leak_id == leak_id)
  523. Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
  524. leaked_objects_[j].size);
  525. }
  526. }
  527. void LeakReport::PrintSummary() {
  528. CHECK(leaks_.size() <= kMaxLeaksConsidered);
  529. uptr bytes = 0, allocations = 0;
  530. for (uptr i = 0; i < leaks_.size(); i++) {
  531. if (leaks_[i].is_suppressed) continue;
  532. bytes += leaks_[i].total_size;
  533. allocations += leaks_[i].hit_count;
  534. }
  535. InternalScopedBuffer<char> summary(kMaxSummaryLength);
  536. internal_snprintf(summary.data(), summary.size(),
  537. "%zu byte(s) leaked in %zu allocation(s).", bytes,
  538. allocations);
  539. ReportErrorSummary(summary.data());
  540. }
  541. void LeakReport::ApplySuppressions() {
  542. for (uptr i = 0; i < leaks_.size(); i++) {
  543. Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
  544. if (s) {
  545. s->weight += leaks_[i].total_size;
  546. s->hit_count += leaks_[i].hit_count;
  547. leaks_[i].is_suppressed = true;
  548. }
  549. }
  550. }
  551. uptr LeakReport::UnsuppressedLeakCount() {
  552. uptr result = 0;
  553. for (uptr i = 0; i < leaks_.size(); i++)
  554. if (!leaks_[i].is_suppressed) result++;
  555. return result;
  556. }
  557. } // namespace __lsan
  558. #endif // CAN_SANITIZE_LEAKS
  559. using namespace __lsan; // NOLINT
  560. extern "C" {
  561. SANITIZER_INTERFACE_ATTRIBUTE
  562. void __lsan_ignore_object(const void *p) {
  563. #if CAN_SANITIZE_LEAKS
  564. if (!common_flags()->detect_leaks)
  565. return;
  566. // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
  567. // locked.
  568. BlockingMutexLock l(&global_mutex);
  569. IgnoreObjectResult res = IgnoreObjectLocked(p);
  570. if (res == kIgnoreObjectInvalid)
  571. VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
  572. if (res == kIgnoreObjectAlreadyIgnored)
  573. VReport(1, "__lsan_ignore_object(): "
  574. "heap object at %p is already being ignored\n", p);
  575. if (res == kIgnoreObjectSuccess)
  576. VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
  577. #endif // CAN_SANITIZE_LEAKS
  578. }
  579. SANITIZER_INTERFACE_ATTRIBUTE
  580. void __lsan_register_root_region(const void *begin, uptr size) {
  581. #if CAN_SANITIZE_LEAKS
  582. BlockingMutexLock l(&global_mutex);
  583. CHECK(root_regions);
  584. RootRegion region = {begin, size};
  585. root_regions->push_back(region);
  586. VReport(1, "Registered root region at %p of size %llu\n", begin, size);
  587. #endif // CAN_SANITIZE_LEAKS
  588. }
  589. SANITIZER_INTERFACE_ATTRIBUTE
  590. void __lsan_unregister_root_region(const void *begin, uptr size) {
  591. #if CAN_SANITIZE_LEAKS
  592. BlockingMutexLock l(&global_mutex);
  593. CHECK(root_regions);
  594. bool removed = false;
  595. for (uptr i = 0; i < root_regions->size(); i++) {
  596. RootRegion region = (*root_regions)[i];
  597. if (region.begin == begin && region.size == size) {
  598. removed = true;
  599. uptr last_index = root_regions->size() - 1;
  600. (*root_regions)[i] = (*root_regions)[last_index];
  601. root_regions->pop_back();
  602. VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
  603. break;
  604. }
  605. }
  606. if (!removed) {
  607. Report(
  608. "__lsan_unregister_root_region(): region at %p of size %llu has not "
  609. "been registered.\n",
  610. begin, size);
  611. Die();
  612. }
  613. #endif // CAN_SANITIZE_LEAKS
  614. }
  615. SANITIZER_INTERFACE_ATTRIBUTE
  616. void __lsan_disable() {
  617. #if CAN_SANITIZE_LEAKS
  618. __lsan::disable_counter++;
  619. #endif
  620. }
  621. SANITIZER_INTERFACE_ATTRIBUTE
  622. void __lsan_enable() {
  623. #if CAN_SANITIZE_LEAKS
  624. if (!__lsan::disable_counter && common_flags()->detect_leaks) {
  625. Report("Unmatched call to __lsan_enable().\n");
  626. Die();
  627. }
  628. __lsan::disable_counter--;
  629. #endif
  630. }
  631. SANITIZER_INTERFACE_ATTRIBUTE
  632. void __lsan_do_leak_check() {
  633. #if CAN_SANITIZE_LEAKS
  634. if (common_flags()->detect_leaks)
  635. __lsan::DoLeakCheck();
  636. #endif // CAN_SANITIZE_LEAKS
  637. }
  638. #if !SANITIZER_SUPPORTS_WEAK_HOOKS
  639. SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
  640. int __lsan_is_turned_off() {
  641. return 0;
  642. }
  643. #endif
  644. } // extern "C"