[sanitizer] Minor 32-bit primary improvements

Summary:
For the 32-bit TransferBatch:
- `SetFromArray` callers have bounds `count`, so relax the `CHECK` to `DCHECK`;
- same for `Add`;
- mark `CopyToArray` as `const`;
For the 32-bit Primary:
- `{Dea,A}llocateBatch` are only called from places that check `class_id`,
  relax the `CHECK` to `DCHECK`;
- same for `AllocateRegion`;
- remove `GetRegionBeginBySizeClass` that is not used;
- use a local variable for the random shuffle state, so that the compiler can
  use a register instead of reading and writing to the `SizeClassInfo` at every
  iteration;
For the 32-bit local cache:
- pass the count to drain instead of doing a `Min` everytime which is at times
  superfluous.

Reviewers: alekseyshl

Reviewed By: alekseyshl

Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits

Differential Revision: https://reviews.llvm.org/D46657

llvm-svn: 332478
This commit is contained in:
Kostya Kortchinsky 2018-05-16 15:13:26 +00:00
parent 4ac68a210c
commit 561228b2fa
2 changed files with 13 additions and 16 deletions

View File

@ -72,8 +72,10 @@ INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)
Swap(a[i], a[RandN(rand_state, i + 1)]);
Swap(a[i], a[RandN(&state, i + 1)]);
*rand_state = state;
}
#include "sanitizer_allocator_size_class_map.h"

View File

@ -64,8 +64,8 @@ class SizeClassAllocator32 {
struct TransferBatch {
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
void SetFromArray(void *batch[], uptr count) {
DCHECK_LE(count, kMaxNumCached);
count_ = count;
CHECK_LE(count_, kMaxNumCached);
for (uptr i = 0; i < count; i++)
batch_[i] = batch[i];
}
@ -73,9 +73,9 @@ class SizeClassAllocator32 {
void Clear() { count_ = 0; }
void Add(void *ptr) {
batch_[count_++] = ptr;
CHECK_LE(count_, kMaxNumCached);
DCHECK_LE(count_, kMaxNumCached);
}
void CopyToArray(void *to_batch[]) {
void CopyToArray(void *to_batch[]) const {
for (uptr i = 0, n = Count(); i < n; i++)
to_batch[i] = batch_[i];
}
@ -153,7 +153,7 @@ class SizeClassAllocator32 {
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
uptr class_id) {
CHECK_LT(class_id, kNumClasses);
DCHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
if (sci->free_list.empty()) {
@ -168,15 +168,13 @@ class SizeClassAllocator32 {
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
TransferBatch *b) {
CHECK_LT(class_id, kNumClasses);
DCHECK_LT(class_id, kNumClasses);
CHECK_GT(b->Count(), 0);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
sci->free_list.push_front(b);
}
uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
bool PointerIsMine(const void *p) {
uptr mem = reinterpret_cast<uptr>(p);
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
@ -252,12 +250,9 @@ class SizeClassAllocator32 {
}
}
void PrintStats() {
}
void PrintStats() {}
static uptr AdditionalSize() {
return 0;
}
static uptr AdditionalSize() { return 0; }
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
@ -267,7 +262,7 @@ class SizeClassAllocator32 {
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
SpinMutex mutex;
StaticSpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
u32 rand_state;
};
@ -284,8 +279,8 @@ class SizeClassAllocator32 {
}
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
DCHECK_LT(class_id, kNumClasses);
const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
kRegionSize, kRegionSize, PrimaryAllocatorName));
if (UNLIKELY(!res))
return 0;