Improved ASAN allocator and quarantine stats.

Summary: Improved ASAN allocator and quarantine stats.

Reviewers: eugenis

Patch by Alex Shlyapnikov.

Subscribers: llvm-commits, kubabrecka

Differential Revision: https://reviews.llvm.org/D28333

llvm-svn: 291183
This commit is contained in:
Evgeniy Stepanov 2017-01-05 22:17:53 +00:00
parent 4aa0590e33
commit 04cfed9af5
5 changed files with 109 additions and 65 deletions

View File

@ -681,6 +681,7 @@ struct Allocator {
void PrintStats() { void PrintStats() {
allocator.PrintStats(); allocator.PrintStats();
quarantine.PrintStats();
} }
void ForceLock() { void ForceLock() {
@ -700,18 +701,21 @@ static AsanAllocator &get_allocator() {
return instance.allocator; return instance.allocator;
} }
bool AsanChunkView::IsValid() { bool AsanChunkView::IsValid() const {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
} }
bool AsanChunkView::IsAllocated() { bool AsanChunkView::IsAllocated() const {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
} }
uptr AsanChunkView::Beg() { return chunk_->Beg(); } bool AsanChunkView::IsQuarantined() const {
uptr AsanChunkView::End() { return Beg() + UsedSize(); } return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
AllocType AsanChunkView::GetAllocType() { uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type; return (AllocType)chunk_->alloc_type;
} }
@ -722,14 +726,14 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res; return res;
} }
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; } u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; } u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
StackTrace AsanChunkView::GetAllocStack() { StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId()); return GetStackTraceFromId(GetAllocStackId());
} }
StackTrace AsanChunkView::GetFreeStack() { StackTrace AsanChunkView::GetFreeStack() const {
return GetStackTraceFromId(GetFreeStackId()); return GetStackTraceFromId(GetFreeStackId());
} }

View File

@ -51,28 +51,29 @@ void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView { class AsanChunkView {
public: public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid(); // Checks if AsanChunkView points to a valid allocated bool IsValid() const; // Checks if AsanChunkView points to a valid
// or quarantined chunk. // allocated or quarantined chunk.
bool IsAllocated(); // Checks if the memory is currently allocated. bool IsAllocated() const; // Checks if the memory is currently allocated.
uptr Beg(); // First byte of user memory. bool IsQuarantined() const; // Checks if the memory is currently quarantined.
uptr End(); // Last byte of user memory. uptr Beg() const; // First byte of user memory.
uptr UsedSize(); // Size requested by the user. uptr End() const; // Last byte of user memory.
uptr AllocTid(); uptr UsedSize() const; // Size requested by the user.
uptr FreeTid(); uptr AllocTid() const;
uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId(); u32 GetAllocStackId() const;
u32 GetFreeStackId(); u32 GetFreeStackId() const;
StackTrace GetAllocStack(); StackTrace GetAllocStack() const;
StackTrace GetFreeStack(); StackTrace GetFreeStack() const;
AllocType GetAllocType(); AllocType GetAllocType() const;
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) { if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg(); *offset = addr - Beg();
return true; return true;
} }
return false; return false;
} }
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) { bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
(void)access_size; (void)access_size;
if (addr < Beg()) { if (addr < Beg()) {
*offset = Beg() - addr; *offset = Beg() - addr;
@ -80,7 +81,7 @@ class AsanChunkView {
} }
return false; return false;
} }
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) { bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
if (addr + access_size > End()) { if (addr + access_size > End()) {
*offset = addr - End(); *offset = addr - End();
return true; return true;

View File

@ -32,9 +32,56 @@ struct AllocationSite {
class HeapProfile { class HeapProfile {
public: public:
HeapProfile() : allocations_(1024) {} HeapProfile() : allocations_(1024) {}
void ProcessChunk(const AsanChunkView& cv) {
if (cv.IsAllocated()) {
total_allocated_user_size_ += cv.UsedSize();
total_allocated_count_++;
u32 id = cv.GetAllocStackId();
if (id)
Insert(id, cv.UsedSize());
} else if (cv.IsQuarantined()) {
total_quarantined_user_size_ += cv.UsedSize();
total_quarantined_count_++;
} else {
total_other_count_++;
}
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_user_size_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
"showing top %zd%%\n",
total_allocated_user_size_, total_allocated_count_,
total_quarantined_user_size_, total_quarantined_count_,
total_other_count_, total_allocated_count_ +
total_quarantined_count_ + total_other_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_user_size_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_user_size_ > top_percent)
break;
}
}
private:
uptr total_allocated_user_size_ = 0;
uptr total_allocated_count_ = 0;
uptr total_quarantined_user_size_ = 0;
uptr total_quarantined_count_ = 0;
uptr total_other_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
void Insert(u32 id, uptr size) { void Insert(u32 id, uptr size) {
total_allocated_ += size;
total_count_++;
// Linear lookup will be good enough for most cases (although not all). // Linear lookup will be good enough for most cases (although not all).
for (uptr i = 0; i < allocations_.size(); i++) { for (uptr i = 0; i < allocations_.size(); i++) {
if (allocations_[i].id == id) { if (allocations_[i].id == id) {
@ -45,40 +92,11 @@ class HeapProfile {
} }
allocations_.push_back({id, size, 1}); allocations_.push_back({id, size, 1});
} }
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_ > top_percent)
break;
}
}
private:
uptr total_allocated_ = 0;
uptr total_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
}; };
static void ChunkCallback(uptr chunk, void *arg) { static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg); reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
AsanChunkView cv = FindHeapChunkByAllocBeg(chunk); FindHeapChunkByAllocBeg(chunk));
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
hp->Insert(id, cv.UsedSize());
} }
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list, static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,

View File

@ -227,9 +227,8 @@ class SizeClassAllocator64 {
uptr in_use = region->n_allocated - region->n_freed; uptr in_use = region->n_allocated - region->n_freed;
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id); uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
Printf( Printf(
" %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd " " %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %zd" "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd\n",
" avail: %zd rss: %zdK releases: %zd\n",
class_id, ClassIdToSize(class_id), region->mapped_user >> 10, class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
region->n_allocated, region->n_freed, in_use, region->n_allocated, region->n_freed, in_use,
region->num_freed_chunks, avail_chunks, rss >> 10, region->num_freed_chunks, avail_chunks, rss >> 10,

View File

@ -73,6 +73,11 @@ class Quarantine {
Recycle(cb); Recycle(cb);
} }
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
cache_.PrintStats();
}
private: private:
// Read-only data. // Read-only data.
char pad0_[kCacheLineSize]; char pad0_[kCacheLineSize];
@ -163,8 +168,25 @@ class QuarantineCache {
return b; return b;
} }
void PrintStats() const {
uptr batch_count = 0;
uptr total_quarantine_bytes = 0;
uptr total_quarantine_chunks = 0;
for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
batch_count++;
total_quarantine_bytes += (*it).size;
total_quarantine_chunks += (*it).count;
}
Printf("Global quarantine stats: batches: %zd; bytes: %zd; chunks: %zd "
"(capacity: %zd chunks)\n",
batch_count, total_quarantine_bytes, total_quarantine_chunks,
batch_count * QuarantineBatch::kSize);
}
private: private:
IntrusiveList<QuarantineBatch> list_; typedef IntrusiveList<QuarantineBatch> List;
List list_;
atomic_uintptr_t size_; atomic_uintptr_t size_;
void SizeAdd(uptr add) { void SizeAdd(uptr add) {