[sanitizer] make LargeMmapAllocator::GetBlockBegin faster by not using a linked list
llvm-svn: 171035
This commit is contained in:
parent
19969e5045
commit
fc7de2910c
|
|
@ -714,11 +714,9 @@ class LargeMmapAllocator {
|
||||||
h->map_size = map_size;
|
h->map_size = map_size;
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
h->next = list_;
|
uptr idx = n_chunks_++;
|
||||||
h->prev = 0;
|
h->chunk_idx = idx;
|
||||||
if (list_)
|
chunks_[idx] = h;
|
||||||
list_->prev = h;
|
|
||||||
list_ = h;
|
|
||||||
}
|
}
|
||||||
return reinterpret_cast<void*>(res);
|
return reinterpret_cast<void*>(res);
|
||||||
}
|
}
|
||||||
|
|
@ -727,14 +725,12 @@ class LargeMmapAllocator {
|
||||||
Header *h = GetHeader(p);
|
Header *h = GetHeader(p);
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
Header *prev = h->prev;
|
uptr idx = h->chunk_idx;
|
||||||
Header *next = h->next;
|
CHECK_EQ(chunks_[idx], h);
|
||||||
if (prev)
|
CHECK_LT(idx, n_chunks_);
|
||||||
prev->next = next;
|
chunks_[idx] = chunks_[n_chunks_ - 1];
|
||||||
if (next)
|
chunks_[idx]->chunk_idx = idx;
|
||||||
next->prev = prev;
|
n_chunks_--;
|
||||||
if (h == list_)
|
|
||||||
list_ = next;
|
|
||||||
}
|
}
|
||||||
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
||||||
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
||||||
|
|
@ -743,8 +739,10 @@ class LargeMmapAllocator {
|
||||||
uptr TotalMemoryUsed() {
|
uptr TotalMemoryUsed() {
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
uptr res = 0;
|
uptr res = 0;
|
||||||
for (Header *l = list_; l; l = l->next) {
|
for (uptr i = 0; i < n_chunks_; i++) {
|
||||||
res += RoundUpMapSize(l->size);
|
Header *h = chunks_[i];
|
||||||
|
CHECK_EQ(h->chunk_idx, i);
|
||||||
|
res += RoundUpMapSize(h->size);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -765,20 +763,32 @@ class LargeMmapAllocator {
|
||||||
void *GetBlockBegin(void *ptr) {
|
void *GetBlockBegin(void *ptr) {
|
||||||
uptr p = reinterpret_cast<uptr>(ptr);
|
uptr p = reinterpret_cast<uptr>(ptr);
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
for (Header *l = list_; l; l = l->next) {
|
uptr nearest_chunk = 0;
|
||||||
if (p >= l->map_beg && p < l->map_beg + l->map_size)
|
// Cache-friendly linear search.
|
||||||
return GetUser(l);
|
for (uptr i = 0; i < n_chunks_; i++) {
|
||||||
|
uptr ch = reinterpret_cast<uptr>(chunks_[i]);
|
||||||
|
if (p < ch) continue; // p is at left to this chunk, skip it.
|
||||||
|
if (p - ch < p - nearest_chunk)
|
||||||
|
nearest_chunk = ch;
|
||||||
}
|
}
|
||||||
|
if (!nearest_chunk)
|
||||||
return 0;
|
return 0;
|
||||||
|
Header *h = reinterpret_cast<Header *>(nearest_chunk);
|
||||||
|
CHECK_GE(nearest_chunk, h->map_beg);
|
||||||
|
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
||||||
|
CHECK_LE(nearest_chunk, p);
|
||||||
|
if (h->map_beg + h->map_size < p)
|
||||||
|
return 0;
|
||||||
|
return GetUser(h);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
|
||||||
struct Header {
|
struct Header {
|
||||||
uptr map_beg;
|
uptr map_beg;
|
||||||
uptr map_size;
|
uptr map_size;
|
||||||
uptr size;
|
uptr size;
|
||||||
Header *next;
|
uptr chunk_idx;
|
||||||
Header *prev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Header *GetHeader(uptr p) {
|
Header *GetHeader(uptr p) {
|
||||||
|
|
@ -797,7 +807,8 @@ class LargeMmapAllocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr page_size_;
|
uptr page_size_;
|
||||||
Header *list_;
|
Header *chunks_[kMaxNumChunks];
|
||||||
|
uptr n_chunks_;
|
||||||
SpinMutex mutex_;
|
SpinMutex mutex_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -244,12 +244,13 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
LargeMmapAllocator<> a;
|
LargeMmapAllocator<> a;
|
||||||
a.Init();
|
a.Init();
|
||||||
|
|
||||||
static const int kNumAllocs = 100;
|
static const int kNumAllocs = 1000;
|
||||||
char *allocated[kNumAllocs];
|
char *allocated[kNumAllocs];
|
||||||
static const uptr size = 1000;
|
static const uptr size = 4000;
|
||||||
// Allocate some.
|
// Allocate some.
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAllocs; i++) {
|
||||||
allocated[i] = (char *)a.Allocate(size, 1);
|
allocated[i] = (char *)a.Allocate(size, 1);
|
||||||
|
CHECK(a.PointerIsMine(allocated[i]));
|
||||||
}
|
}
|
||||||
// Deallocate all.
|
// Deallocate all.
|
||||||
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
||||||
|
|
@ -269,6 +270,11 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
*meta = i;
|
*meta = i;
|
||||||
allocated[i] = x;
|
allocated[i] = x;
|
||||||
}
|
}
|
||||||
|
for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
|
||||||
|
char *p = allocated[i % kNumAllocs];
|
||||||
|
CHECK(a.PointerIsMine(p));
|
||||||
|
CHECK(a.PointerIsMine(p + 2000));
|
||||||
|
}
|
||||||
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
|
||||||
// Deallocate all in reverse order.
|
// Deallocate all in reverse order.
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAllocs; i++) {
|
||||||
|
|
@ -280,9 +286,12 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
a.Deallocate(p);
|
a.Deallocate(p);
|
||||||
}
|
}
|
||||||
CHECK_EQ(a.TotalMemoryUsed(), 0);
|
CHECK_EQ(a.TotalMemoryUsed(), 0);
|
||||||
|
|
||||||
|
// Test alignments.
|
||||||
uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
|
uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
|
||||||
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
|
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
const uptr kNumAlignedAllocs = 100;
|
||||||
|
for (int i = 0; i < kNumAlignedAllocs; i++) {
|
||||||
uptr size = ((i % 10) + 1) * 4096;
|
uptr size = ((i % 10) + 1) * 4096;
|
||||||
char *p = allocated[i] = (char *)a.Allocate(size, alignment);
|
char *p = allocated[i] = (char *)a.Allocate(size, alignment);
|
||||||
CHECK_EQ(p, a.GetBlockBegin(p));
|
CHECK_EQ(p, a.GetBlockBegin(p));
|
||||||
|
|
@ -291,7 +300,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||||
CHECK_EQ(0, (uptr)allocated[i] % alignment);
|
CHECK_EQ(0, (uptr)allocated[i] % alignment);
|
||||||
p[0] = p[size - 1] = 0;
|
p[0] = p[size - 1] = 0;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kNumAllocs; i++) {
|
for (int i = 0; i < kNumAlignedAllocs; i++) {
|
||||||
a.Deallocate(allocated[i]);
|
a.Deallocate(allocated[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -381,7 +390,7 @@ void TestSizeClassAllocatorLocalCache() {
|
||||||
const int kNumIter = 100;
|
const int kNumIter = 100;
|
||||||
uptr saved_total = 0;
|
uptr saved_total = 0;
|
||||||
for (int class_id = 1; class_id <= 5; class_id++) {
|
for (int class_id = 1; class_id <= 5; class_id++) {
|
||||||
for (int i = 0; i < kNumIter; i++) {
|
for (int it = 0; it < kNumIter; it++) {
|
||||||
void *allocated[kNumAllocs];
|
void *allocated[kNumAllocs];
|
||||||
for (uptr i = 0; i < kNumAllocs; i++) {
|
for (uptr i = 0; i < kNumAllocs; i++) {
|
||||||
allocated[i] = cache.Allocate(a, class_id);
|
allocated[i] = cache.Allocate(a, class_id);
|
||||||
|
|
@ -391,7 +400,7 @@ void TestSizeClassAllocatorLocalCache() {
|
||||||
}
|
}
|
||||||
cache.Drain(a);
|
cache.Drain(a);
|
||||||
uptr total_allocated = a->TotalMemoryUsed();
|
uptr total_allocated = a->TotalMemoryUsed();
|
||||||
if (i)
|
if (it)
|
||||||
CHECK_EQ(saved_total, total_allocated);
|
CHECK_EQ(saved_total, total_allocated);
|
||||||
saved_total = total_allocated;
|
saved_total = total_allocated;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue