tsan: include MBlock/SyncObj stats into mem profile
Include info about MBlock/SyncObj memory consumption in the memory profile. Depends on D110148. Reviewed By: melver, vitalybuka Differential Revision: https://reviews.llvm.org/D110149
This commit is contained in:
parent
608ffc98c3
commit
eefef56ece
|
|
@ -49,11 +49,7 @@ class DenseSlabAlloc {
|
||||||
static_assert(sizeof(T) > sizeof(IndexT),
|
static_assert(sizeof(T) > sizeof(IndexT),
|
||||||
"it doesn't make sense to use dense alloc");
|
"it doesn't make sense to use dense alloc");
|
||||||
|
|
||||||
explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
|
DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
|
||||||
freelist_ = 0;
|
|
||||||
fillpos_ = 0;
|
|
||||||
name_ = name;
|
|
||||||
}
|
|
||||||
|
|
||||||
explicit DenseSlabAlloc(const char *name)
|
explicit DenseSlabAlloc(const char *name)
|
||||||
: DenseSlabAlloc(LINKER_INITIALIZED, name) {
|
: DenseSlabAlloc(LINKER_INITIALIZED, name) {
|
||||||
|
|
@ -89,6 +85,8 @@ class DenseSlabAlloc {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushCache(Cache *c) {
|
void FlushCache(Cache *c) {
|
||||||
|
if (!c->pos)
|
||||||
|
return;
|
||||||
SpinMutexLock lock(&mtx_);
|
SpinMutexLock lock(&mtx_);
|
||||||
while (c->pos) {
|
while (c->pos) {
|
||||||
IndexT idx = c->cache[--c->pos];
|
IndexT idx = c->cache[--c->pos];
|
||||||
|
|
@ -102,33 +100,39 @@ class DenseSlabAlloc {
|
||||||
internal_memset(c->cache, 0, sizeof(c->cache));
|
internal_memset(c->cache, 0, sizeof(c->cache));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uptr AllocatedMemory() const {
|
||||||
|
return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
T *map_[kL1Size];
|
T *map_[kL1Size];
|
||||||
SpinMutex mtx_;
|
SpinMutex mtx_;
|
||||||
IndexT freelist_;
|
IndexT freelist_ = {0};
|
||||||
uptr fillpos_;
|
atomic_uintptr_t fillpos_ = {0};
|
||||||
const char *name_;
|
const char *const name_;
|
||||||
|
|
||||||
void Refill(Cache *c) {
|
void Refill(Cache *c) {
|
||||||
SpinMutexLock lock(&mtx_);
|
SpinMutexLock lock(&mtx_);
|
||||||
if (freelist_ == 0) {
|
if (freelist_ == 0) {
|
||||||
if (fillpos_ == kL1Size) {
|
uptr fillpos = atomic_load_relaxed(&fillpos_);
|
||||||
|
if (fillpos == kL1Size) {
|
||||||
Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
|
Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
|
||||||
name_, kL1Size, kL2Size);
|
name_, kL1Size, kL2Size);
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
|
VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
|
||||||
name_, fillpos_, kL1Size, kL2Size);
|
fillpos, kL1Size, kL2Size);
|
||||||
T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
|
T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
|
||||||
// Reserve 0 as invalid index.
|
// Reserve 0 as invalid index.
|
||||||
IndexT start = fillpos_ == 0 ? 1 : 0;
|
IndexT start = fillpos == 0 ? 1 : 0;
|
||||||
for (IndexT i = start; i < kL2Size; i++) {
|
for (IndexT i = start; i < kL2Size; i++) {
|
||||||
new(batch + i) T;
|
new(batch + i) T;
|
||||||
*(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
|
*(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
|
||||||
}
|
}
|
||||||
*(IndexT*)(batch + kL2Size - 1) = 0;
|
*(IndexT*)(batch + kL2Size - 1) = 0;
|
||||||
freelist_ = fillpos_ * kL2Size + start;
|
freelist_ = fillpos * kL2Size + start;
|
||||||
map_[fillpos_++] = batch;
|
map_[fillpos] = batch;
|
||||||
|
atomic_store_relaxed(&fillpos_, fillpos + 1);
|
||||||
}
|
}
|
||||||
for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
|
for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
|
||||||
IndexT idx = freelist_;
|
IndexT idx = freelist_;
|
||||||
|
|
|
||||||
|
|
@ -121,17 +121,24 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
|
||||||
|
|
||||||
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
|
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
|
||||||
uptr mem[MemCount];
|
uptr mem[MemCount];
|
||||||
internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
|
internal_memset(mem, 0, sizeof(mem));
|
||||||
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
|
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
|
||||||
|
auto meta = ctx->metamap.GetMemoryStats();
|
||||||
StackDepotStats *stacks = StackDepotGetStats();
|
StackDepotStats *stacks = StackDepotGetStats();
|
||||||
|
// All these are allocated from the common mmap region.
|
||||||
|
mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated;
|
||||||
|
if (s64(mem[MemMmap]) < 0)
|
||||||
|
mem[MemMmap] = 0;
|
||||||
internal_snprintf(buf, buf_size,
|
internal_snprintf(buf, buf_size,
|
||||||
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
|
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
|
||||||
" trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
|
" trace:%zd heap:%zd other:%zd memblocks:%zd syncobj:%zu"
|
||||||
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
|
" stacks=%zd[%zd] nthr=%zd/%zd\n",
|
||||||
mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
|
mem[MemTotal] >> 20, mem[MemShadow] >> 20,
|
||||||
mem[MemHeap] >> 20, mem[MemOther] >> 20,
|
mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20,
|
||||||
stacks->allocated >> 20, stacks->n_uniq_ids,
|
mem[MemTrace] >> 20, mem[MemHeap] >> 20,
|
||||||
nlive, nthread);
|
mem[MemOther] >> 20, meta.mem_block >> 20,
|
||||||
|
meta.sync_obj >> 20, stacks->allocated >> 20,
|
||||||
|
stacks->n_uniq_ids, nlive, nthread);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if SANITIZER_LINUX
|
#if SANITIZER_LINUX
|
||||||
|
|
|
||||||
|
|
@ -269,4 +269,11 @@ void MetaMap::OnProcIdle(Processor *proc) {
|
||||||
sync_alloc_.FlushCache(&proc->sync_cache);
|
sync_alloc_.FlushCache(&proc->sync_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
|
||||||
|
MemoryStats stats;
|
||||||
|
stats.mem_block = block_alloc_.AllocatedMemory();
|
||||||
|
stats.sync_obj = sync_alloc_.AllocatedMemory();
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __tsan
|
} // namespace __tsan
|
||||||
|
|
|
||||||
|
|
@ -127,6 +127,13 @@ class MetaMap {
|
||||||
|
|
||||||
void OnProcIdle(Processor *proc);
|
void OnProcIdle(Processor *proc);
|
||||||
|
|
||||||
|
struct MemoryStats {
|
||||||
|
uptr mem_block;
|
||||||
|
uptr sync_obj;
|
||||||
|
};
|
||||||
|
|
||||||
|
MemoryStats GetMemoryStats() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const u32 kFlagMask = 3u << 30;
|
static const u32 kFlagMask = 3u << 30;
|
||||||
static const u32 kFlagBlock = 1u << 30;
|
static const u32 kFlagBlock = 1u << 30;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue