[sanitizer] Reverting D34152
Summary: This broke thread_local_quarantine_pthread_join.cc on some architectures, due to the overhead of the stashed regions. Reverting while figuring out the best way to deal with it. Reviewers: alekseyshl Reviewed By: alekseyshl Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D34213 llvm-svn: 305404
This commit is contained in:
parent
465c2c2621
commit
77f30c9c31
|
|
@ -24,7 +24,7 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
||||||
// be returned by MmapOrDie().
|
// be returned by MmapOrDie().
|
||||||
//
|
//
|
||||||
// Region:
|
// Region:
|
||||||
// a result of an allocation of kRegionSize bytes aligned on kRegionSize.
|
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||||
// Since the regions are aligned by kRegionSize, there are exactly
|
// Since the regions are aligned by kRegionSize, there are exactly
|
||||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||||
// a ByteMap possible_regions to store the size classes of each Region.
|
// a ByteMap possible_regions to store the size classes of each Region.
|
||||||
|
|
@ -106,7 +106,6 @@ class SizeClassAllocator32 {
|
||||||
void Init(s32 release_to_os_interval_ms) {
|
void Init(s32 release_to_os_interval_ms) {
|
||||||
possible_regions.TestOnlyInit();
|
possible_regions.TestOnlyInit();
|
||||||
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
|
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
|
||||||
num_stashed_regions = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s32 ReleaseToOSIntervalMs() const {
|
s32 ReleaseToOSIntervalMs() const {
|
||||||
|
|
@ -276,49 +275,15 @@ class SizeClassAllocator32 {
|
||||||
return mem & ~(kRegionSize - 1);
|
return mem & ~(kRegionSize - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocates a region of kRegionSize bytes, aligned on kRegionSize. If we get
|
|
||||||
// more than one region back (in the event the allocation is aligned on the
|
|
||||||
// first try), attempt to store the second region into a stash. If the stash
|
|
||||||
// is full, just unmap the superfluous memory.
|
|
||||||
uptr AllocateRegionSlow(AllocatorStats *stat) {
|
|
||||||
uptr map_size = kRegionSize;
|
|
||||||
uptr padding_chunk;
|
|
||||||
uptr region = reinterpret_cast<uptr>(
|
|
||||||
MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32",
|
|
||||||
&padding_chunk));
|
|
||||||
if (padding_chunk) {
|
|
||||||
// We have an extra region, attempt to stash it.
|
|
||||||
CHECK_EQ(padding_chunk, region + kRegionSize);
|
|
||||||
bool trim_extra = true;
|
|
||||||
{
|
|
||||||
SpinMutexLock l(®ions_stash_mutex);
|
|
||||||
if (num_stashed_regions < kMaxStashedRegions) {
|
|
||||||
regions_stash[num_stashed_regions++] = padding_chunk;
|
|
||||||
map_size = 2 * kRegionSize;
|
|
||||||
trim_extra = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (trim_extra)
|
|
||||||
UnmapOrDie((void*)padding_chunk, kRegionSize);
|
|
||||||
}
|
|
||||||
MapUnmapCallback().OnMap(region, map_size);
|
|
||||||
stat->Add(AllocatorStatMapped, map_size);
|
|
||||||
return region;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||||
CHECK_LT(class_id, kNumClasses);
|
CHECK_LT(class_id, kNumClasses);
|
||||||
uptr region = 0;
|
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||||
{
|
"SizeClassAllocator32"));
|
||||||
SpinMutexLock l(®ions_stash_mutex);
|
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||||
if (num_stashed_regions > 0)
|
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||||
region = regions_stash[--num_stashed_regions];
|
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||||
}
|
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
|
||||||
if (!region)
|
return res;
|
||||||
region = AllocateRegionSlow(stat);
|
|
||||||
CHECK(IsAligned(region, kRegionSize));
|
|
||||||
possible_regions.set(ComputeRegionId(region), static_cast<u8>(class_id));
|
|
||||||
return region;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||||
|
|
@ -351,13 +316,6 @@ class SizeClassAllocator32 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unless several threads request regions simultaneously from different size
|
|
||||||
// classes, the stash rarely contains more than 1 entry.
|
|
||||||
static const uptr kMaxStashedRegions = 8;
|
|
||||||
SpinMutex regions_stash_mutex;
|
|
||||||
uptr num_stashed_regions;
|
|
||||||
uptr regions_stash[kMaxStashedRegions];
|
|
||||||
|
|
||||||
ByteMap possible_regions;
|
ByteMap possible_regions;
|
||||||
SizeClassInfo size_class_info_array[kNumClasses];
|
SizeClassInfo size_class_info_array[kNumClasses];
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -92,15 +92,7 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||||
void *MmapNoAccess(uptr size);
|
void *MmapNoAccess(uptr size);
|
||||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||||
// Since the predominant use case of this function is "size == alignment" and
|
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||||
// the nature of the way the alignment requirement is satisfied (by allocating
|
|
||||||
// size+alignment bytes of memory), there's a potential of address space
|
|
||||||
// fragmentation. The padding_chunk parameter provides the opportunity to
|
|
||||||
// return the contiguous padding of "size" bytes of the allocated chunk if the
|
|
||||||
// initial allocation happened to be perfectly aligned and the platform supports
|
|
||||||
// partial unmapping of the mapped region.
|
|
||||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
|
|
||||||
uptr *padding_chunk);
|
|
||||||
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
||||||
// unaccessible memory.
|
// unaccessible memory.
|
||||||
bool MprotectNoAccess(uptr addr, uptr size);
|
bool MprotectNoAccess(uptr addr, uptr size);
|
||||||
|
|
|
||||||
|
|
@ -146,29 +146,22 @@ void UnmapOrDie(void *addr, uptr size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want to map a chunk of address space aligned to 'alignment'.
|
// We want to map a chunk of address space aligned to 'alignment'.
|
||||||
// We do it by mapping a bit more and then unmapping redundant pieces.
|
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
|
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||||
uptr* padding_chunk) {
|
|
||||||
CHECK(IsPowerOfTwo(size));
|
CHECK(IsPowerOfTwo(size));
|
||||||
CHECK(IsPowerOfTwo(alignment));
|
CHECK(IsPowerOfTwo(alignment));
|
||||||
uptr map_size = size + alignment;
|
uptr map_size = size + alignment;
|
||||||
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||||
uptr map_end = map_res + map_size;
|
uptr map_end = map_res + map_size;
|
||||||
bool is_aligned = IsAligned(map_res, alignment);
|
|
||||||
if (is_aligned && padding_chunk && size == alignment) {
|
|
||||||
*padding_chunk = map_res + size;
|
|
||||||
return (void *)map_res;
|
|
||||||
}
|
|
||||||
if (padding_chunk)
|
|
||||||
*padding_chunk = 0;
|
|
||||||
uptr res = map_res;
|
uptr res = map_res;
|
||||||
if (!is_aligned) {
|
if (res & (alignment - 1)) // Not aligned.
|
||||||
res = (map_res + alignment - 1) & ~(alignment - 1);
|
res = (map_res + alignment) & ~(alignment - 1);
|
||||||
UnmapOrDie((void*)map_res, res - map_res);
|
|
||||||
}
|
|
||||||
uptr end = res + size;
|
uptr end = res + size;
|
||||||
UnmapOrDie((void*)end, map_end - end);
|
if (res != map_res)
|
||||||
|
UnmapOrDie((void*)map_res, res - map_res);
|
||||||
|
if (end != map_end)
|
||||||
|
UnmapOrDie((void*)end, map_end - end);
|
||||||
return (void*)res;
|
return (void*)res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -132,14 +132,10 @@ void UnmapOrDie(void *addr, uptr size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want to map a chunk of address space aligned to 'alignment'.
|
// We want to map a chunk of address space aligned to 'alignment'.
|
||||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
|
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||||
uptr *padding_chunk) {
|
|
||||||
CHECK(IsPowerOfTwo(size));
|
CHECK(IsPowerOfTwo(size));
|
||||||
CHECK(IsPowerOfTwo(alignment));
|
CHECK(IsPowerOfTwo(alignment));
|
||||||
|
|
||||||
if (padding_chunk)
|
|
||||||
*padding_chunk = 0;
|
|
||||||
|
|
||||||
// Windows will align our allocations to at least 64K.
|
// Windows will align our allocations to at least 64K.
|
||||||
alignment = Max(alignment, GetMmapGranularity());
|
alignment = Max(alignment, GetMmapGranularity());
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -77,8 +77,8 @@ TEST(SanitizerCommon, MmapAlignedOrDie) {
|
||||||
for (uptr size = 1; size <= 32; size *= 2) {
|
for (uptr size = 1; size <= 32; size *= 2) {
|
||||||
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
|
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
|
||||||
for (int iter = 0; iter < 100; iter++) {
|
for (int iter = 0; iter < 100; iter++) {
|
||||||
uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize,
|
uptr res = (uptr)MmapAlignedOrDie(
|
||||||
"MmapAlignedOrDieTest", nullptr);
|
size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
|
||||||
EXPECT_EQ(0U, res % (alignment * PageSize));
|
EXPECT_EQ(0U, res % (alignment * PageSize));
|
||||||
internal_memset((void*)res, 1, size * PageSize);
|
internal_memset((void*)res, 1, size * PageSize);
|
||||||
UnmapOrDie((void*)res, size * PageSize);
|
UnmapOrDie((void*)res, size * PageSize);
|
||||||
|
|
@ -87,37 +87,6 @@ TEST(SanitizerCommon, MmapAlignedOrDie) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(SanitizerCommon, MmapAlignedOrDiePaddingChunk) {
|
|
||||||
uptr PageSize = GetPageSizeCached();
|
|
||||||
for (uptr size = 1; size <= 32; size *= 2) {
|
|
||||||
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
|
|
||||||
for (int iter = 0; iter < 100; iter++) {
|
|
||||||
uptr padding_chunk;
|
|
||||||
uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize,
|
|
||||||
"MmapAlignedOrDiePaddingChunkTest", &padding_chunk);
|
|
||||||
EXPECT_EQ(0U, res % (alignment * PageSize));
|
|
||||||
internal_memset((void*)res, 1, size * PageSize);
|
|
||||||
UnmapOrDie((void*)res, size * PageSize);
|
|
||||||
if (SANITIZER_WINDOWS || (size != alignment)) {
|
|
||||||
// Not supported on Windows or for different size and alignment.
|
|
||||||
EXPECT_EQ(0U, padding_chunk);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (size == 1 && alignment == 1) {
|
|
||||||
// mmap returns PageSize aligned chunks, so this is a specific case
|
|
||||||
// where we can check that padding_chunk will never be 0.
|
|
||||||
EXPECT_NE(0U, padding_chunk);
|
|
||||||
}
|
|
||||||
if (padding_chunk) {
|
|
||||||
EXPECT_EQ(res + size * PageSize, padding_chunk);
|
|
||||||
internal_memset((void*)padding_chunk, 1, alignment * PageSize);
|
|
||||||
UnmapOrDie((void*)padding_chunk, alignment * PageSize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if SANITIZER_LINUX
|
#if SANITIZER_LINUX
|
||||||
TEST(SanitizerCommon, SanitizerSetThreadName) {
|
TEST(SanitizerCommon, SanitizerSetThreadName) {
|
||||||
const char *names[] = {
|
const char *names[] = {
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue