[lsan] Move out suppression of invalid PCs from StopTheWorld
This removes the last use of StackDepot from StopTheWorld. Depends on D115284. Reviewed By: morehouse Differential Revision: https://reviews.llvm.org/D115319
This commit is contained in:
parent
3c6c30677e
commit
f86deb18ca
|
|
@ -71,9 +71,11 @@ class LeakSuppressionContext {
|
|||
SuppressionContext context;
|
||||
bool suppressed_stacks_sorted = true;
|
||||
InternalMmapVector<u32> suppressed_stacks;
|
||||
const LoadedModule *suppress_module = nullptr;
|
||||
|
||||
Suppression *GetSuppressionForAddr(uptr addr);
|
||||
void LazyInit();
|
||||
Suppression *GetSuppressionForAddr(uptr addr);
|
||||
bool SuppressInvalid(const StackTrace &stack);
|
||||
bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
|
||||
|
||||
public:
|
||||
|
|
@ -124,6 +126,8 @@ void LeakSuppressionContext::LazyInit() {
|
|||
if (&__lsan_default_suppressions)
|
||||
context.Parse(__lsan_default_suppressions());
|
||||
context.Parse(kStdSuppressions);
|
||||
if (flags()->use_tls && flags()->use_ld_allocations)
|
||||
suppress_module = GetLinker();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -148,6 +152,41 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
|
|||
return s;
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(const StackTrace &stack) {
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||
// modules accounting etc.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
|
||||
uptr caller_pc = GetCallerPC(stack);
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
return !caller_pc ||
|
||||
(suppress_module && suppress_module->containsAddress(caller_pc));
|
||||
}
|
||||
|
||||
bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
|
||||
uptr hit_count, uptr total_size) {
|
||||
for (uptr i = 0; i < stack.size; i++) {
|
||||
|
|
@ -166,7 +205,7 @@ bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
|
|||
uptr total_size) {
|
||||
LazyInit();
|
||||
StackTrace stack = StackDepotGet(stack_trace_id);
|
||||
if (!SuppressByRule(stack, hit_count, total_size))
|
||||
if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
|
||||
return false;
|
||||
suppressed_stacks_sorted = false;
|
||||
suppressed_stacks.push_back(stack_trace_id);
|
||||
|
|
@ -530,68 +569,6 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
|||
}
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(const StackTrace &stack) {
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct InvalidPCParam {
|
||||
Frontier *frontier;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
// ForEachChunk callback. If the caller pc is invalid or is within the linker,
|
||||
// mark as reachable. Called by ProcessPlatformSpecificAllocations.
|
||||
static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
CHECK(arg);
|
||||
InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
caller_pc = GetCallerPC(StackDepotGet(stack_id));
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
GetLinker()->containsAddress(caller_pc))) {
|
||||
m.set_tag(kIgnored);
|
||||
param->frontier->push_back(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||
// modules accounting etc.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
static void ProcessPC(Frontier *frontier) {
|
||||
InvalidPCParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
||||
ForEachChunk(MarkInvalidPCCb, &arg);
|
||||
}
|
||||
|
||||
// Sets the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
||||
Frontier *frontier) {
|
||||
|
|
@ -607,9 +584,6 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
|||
ProcessRootRegions(frontier);
|
||||
FloodFillTag(frontier, kReachable);
|
||||
|
||||
CHECK_EQ(0, frontier->size());
|
||||
ProcessPC(frontier);
|
||||
|
||||
// The check here is relatively expensive, so we do this in a separate flood
|
||||
// fill. That way we can skip the check for chunks that are reachable
|
||||
// otherwise.
|
||||
|
|
|
|||
Loading…
Reference in New Issue