Improved ASAN allocator and quarantine stats.
Summary: Improved ASAN allocator and quarantine stats. Reviewers: eugenis Patch by Alex Shlyapnikov. Subscribers: llvm-commits, kubabrecka Differential Revision: https://reviews.llvm.org/D28333 llvm-svn: 291183
This commit is contained in:
parent
4aa0590e33
commit
04cfed9af5
|
@ -681,6 +681,7 @@ struct Allocator {
|
|||
|
||||
void PrintStats() {
|
||||
allocator.PrintStats();
|
||||
quarantine.PrintStats();
|
||||
}
|
||||
|
||||
void ForceLock() {
|
||||
|
@ -700,18 +701,21 @@ static AsanAllocator &get_allocator() {
|
|||
return instance.allocator;
|
||||
}
|
||||
|
||||
bool AsanChunkView::IsValid() {
|
||||
bool AsanChunkView::IsValid() const {
|
||||
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
|
||||
}
|
||||
bool AsanChunkView::IsAllocated() {
|
||||
bool AsanChunkView::IsAllocated() const {
|
||||
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
|
||||
}
|
||||
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
|
||||
AllocType AsanChunkView::GetAllocType() {
|
||||
bool AsanChunkView::IsQuarantined() const {
|
||||
return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
|
||||
}
|
||||
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
|
||||
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
|
||||
AllocType AsanChunkView::GetAllocType() const {
|
||||
return (AllocType)chunk_->alloc_type;
|
||||
}
|
||||
|
||||
|
@ -722,14 +726,14 @@ static StackTrace GetStackTraceFromId(u32 id) {
|
|||
return res;
|
||||
}
|
||||
|
||||
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
|
||||
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
|
||||
u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
|
||||
u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() {
|
||||
StackTrace AsanChunkView::GetAllocStack() const {
|
||||
return GetStackTraceFromId(GetAllocStackId());
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() {
|
||||
StackTrace AsanChunkView::GetFreeStack() const {
|
||||
return GetStackTraceFromId(GetFreeStackId());
|
||||
}
|
||||
|
||||
|
|
|
@ -51,28 +51,29 @@ void GetAllocatorOptions(AllocatorOptions *options);
|
|||
class AsanChunkView {
|
||||
public:
|
||||
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
|
||||
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
|
||||
// or quarantined chunk.
|
||||
bool IsAllocated(); // Checks if the memory is currently allocated.
|
||||
uptr Beg(); // First byte of user memory.
|
||||
uptr End(); // Last byte of user memory.
|
||||
uptr UsedSize(); // Size requested by the user.
|
||||
uptr AllocTid();
|
||||
uptr FreeTid();
|
||||
bool IsValid() const; // Checks if AsanChunkView points to a valid
|
||||
// allocated or quarantined chunk.
|
||||
bool IsAllocated() const; // Checks if the memory is currently allocated.
|
||||
bool IsQuarantined() const; // Checks if the memory is currently quarantined.
|
||||
uptr Beg() const; // First byte of user memory.
|
||||
uptr End() const; // Last byte of user memory.
|
||||
uptr UsedSize() const; // Size requested by the user.
|
||||
uptr AllocTid() const;
|
||||
uptr FreeTid() const;
|
||||
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
|
||||
u32 GetAllocStackId();
|
||||
u32 GetFreeStackId();
|
||||
StackTrace GetAllocStack();
|
||||
StackTrace GetFreeStack();
|
||||
AllocType GetAllocType();
|
||||
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
|
||||
u32 GetAllocStackId() const;
|
||||
u32 GetFreeStackId() const;
|
||||
StackTrace GetAllocStack() const;
|
||||
StackTrace GetFreeStack() const;
|
||||
AllocType GetAllocType() const;
|
||||
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
|
||||
if (addr >= Beg() && (addr + access_size) <= End()) {
|
||||
*offset = addr - Beg();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
|
||||
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
|
||||
(void)access_size;
|
||||
if (addr < Beg()) {
|
||||
*offset = Beg() - addr;
|
||||
|
@ -80,7 +81,7 @@ class AsanChunkView {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
|
||||
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
|
||||
if (addr + access_size > End()) {
|
||||
*offset = addr - End();
|
||||
return true;
|
||||
|
|
|
@ -32,9 +32,56 @@ struct AllocationSite {
|
|||
class HeapProfile {
|
||||
public:
|
||||
HeapProfile() : allocations_(1024) {}
|
||||
|
||||
void ProcessChunk(const AsanChunkView& cv) {
|
||||
if (cv.IsAllocated()) {
|
||||
total_allocated_user_size_ += cv.UsedSize();
|
||||
total_allocated_count_++;
|
||||
u32 id = cv.GetAllocStackId();
|
||||
if (id)
|
||||
Insert(id, cv.UsedSize());
|
||||
} else if (cv.IsQuarantined()) {
|
||||
total_quarantined_user_size_ += cv.UsedSize();
|
||||
total_quarantined_count_++;
|
||||
} else {
|
||||
total_other_count_++;
|
||||
}
|
||||
}
|
||||
|
||||
void Print(uptr top_percent) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
CHECK(total_allocated_user_size_);
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
|
||||
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
|
||||
"showing top %zd%%\n",
|
||||
total_allocated_user_size_, total_allocated_count_,
|
||||
total_quarantined_user_size_, total_quarantined_count_,
|
||||
total_other_count_, total_allocated_count_ +
|
||||
total_quarantined_count_ + total_other_count_, top_percent);
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
auto &a = allocations_[i];
|
||||
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
|
||||
a.total_size * 100 / total_allocated_user_size_, a.count);
|
||||
StackDepotGet(a.id).Print();
|
||||
total_shown += a.total_size;
|
||||
if (total_shown * 100 / total_allocated_user_size_ > top_percent)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uptr total_allocated_user_size_ = 0;
|
||||
uptr total_allocated_count_ = 0;
|
||||
uptr total_quarantined_user_size_ = 0;
|
||||
uptr total_quarantined_count_ = 0;
|
||||
uptr total_other_count_ = 0;
|
||||
InternalMmapVector<AllocationSite> allocations_;
|
||||
|
||||
void Insert(u32 id, uptr size) {
|
||||
total_allocated_ += size;
|
||||
total_count_++;
|
||||
// Linear lookup will be good enough for most cases (although not all).
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
if (allocations_[i].id == id) {
|
||||
|
@ -45,40 +92,11 @@ class HeapProfile {
|
|||
}
|
||||
allocations_.push_back({id, size, 1});
|
||||
}
|
||||
|
||||
void Print(uptr top_percent) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
CHECK(total_allocated_);
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
|
||||
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
auto &a = allocations_[i];
|
||||
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
|
||||
a.total_size * 100 / total_allocated_, a.count);
|
||||
StackDepotGet(a.id).Print();
|
||||
total_shown += a.total_size;
|
||||
if (total_shown * 100 / total_allocated_ > top_percent)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uptr total_allocated_ = 0;
|
||||
uptr total_count_ = 0;
|
||||
InternalMmapVector<AllocationSite> allocations_;
|
||||
};
|
||||
|
||||
static void ChunkCallback(uptr chunk, void *arg) {
|
||||
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
|
||||
AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
|
||||
if (!cv.IsAllocated()) return;
|
||||
u32 id = cv.GetAllocStackId();
|
||||
if (!id) return;
|
||||
hp->Insert(id, cv.UsedSize());
|
||||
reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
|
||||
FindHeapChunkByAllocBeg(chunk));
|
||||
}
|
||||
|
||||
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
|
||||
|
|
|
@ -227,9 +227,8 @@ class SizeClassAllocator64 {
|
|||
uptr in_use = region->n_allocated - region->n_freed;
|
||||
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
|
||||
Printf(
|
||||
" %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
|
||||
"num_freed_chunks %zd"
|
||||
" avail: %zd rss: %zdK releases: %zd\n",
|
||||
" %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
|
||||
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd\n",
|
||||
class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
|
||||
region->n_allocated, region->n_freed, in_use,
|
||||
region->num_freed_chunks, avail_chunks, rss >> 10,
|
||||
|
|
|
@ -73,6 +73,11 @@ class Quarantine {
|
|||
Recycle(cb);
|
||||
}
|
||||
|
||||
void PrintStats() const {
|
||||
// It assumes that the world is stopped, just as the allocator's PrintStats.
|
||||
cache_.PrintStats();
|
||||
}
|
||||
|
||||
private:
|
||||
// Read-only data.
|
||||
char pad0_[kCacheLineSize];
|
||||
|
@ -163,8 +168,25 @@ class QuarantineCache {
|
|||
return b;
|
||||
}
|
||||
|
||||
void PrintStats() const {
|
||||
uptr batch_count = 0;
|
||||
uptr total_quarantine_bytes = 0;
|
||||
uptr total_quarantine_chunks = 0;
|
||||
for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
|
||||
batch_count++;
|
||||
total_quarantine_bytes += (*it).size;
|
||||
total_quarantine_chunks += (*it).count;
|
||||
}
|
||||
Printf("Global quarantine stats: batches: %zd; bytes: %zd; chunks: %zd "
|
||||
"(capacity: %zd chunks)\n",
|
||||
batch_count, total_quarantine_bytes, total_quarantine_chunks,
|
||||
batch_count * QuarantineBatch::kSize);
|
||||
}
|
||||
|
||||
private:
|
||||
IntrusiveList<QuarantineBatch> list_;
|
||||
typedef IntrusiveList<QuarantineBatch> List;
|
||||
|
||||
List list_;
|
||||
atomic_uintptr_t size_;
|
||||
|
||||
void SizeAdd(uptr add) {
|
||||
|
|
Loading…
Reference in New Issue