[ADT][NFC] Use empty base optimisation in BumpPtrAllocatorImpl
Most uses of this class just use the default MallocAllocator. As this contains no fields, we can use the empty base optimisation for BumpPtrAllocatorImpl and save 8 bytes of padding for most use cases. This prevents using a class that is marked as `final` as the `AllocatorT` template argument. In one must use an allocator that has been marked as `final`, the simplest way around this is a proxy class. The class should have all the methods that `AllocaterBase` expects and should forward the calls to your own allocator instance. Reviewed By: dblaikie Differential Revision: https://reviews.llvm.org/D94439
This commit is contained in:
parent
55f2eeebc9
commit
a7130d85e4
|
|
@ -66,7 +66,8 @@ template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
|
|||
size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
|
||||
class BumpPtrAllocatorImpl
|
||||
: public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
|
||||
SizeThreshold, GrowthDelay>> {
|
||||
SizeThreshold, GrowthDelay>>,
|
||||
private AllocatorT {
|
||||
public:
|
||||
static_assert(SizeThreshold <= SlabSize,
|
||||
"The SizeThreshold must be at most the SlabSize to ensure "
|
||||
|
|
@ -80,15 +81,15 @@ public:
|
|||
|
||||
template <typename T>
|
||||
BumpPtrAllocatorImpl(T &&Allocator)
|
||||
: Allocator(std::forward<T &&>(Allocator)) {}
|
||||
: AllocatorT(std::forward<T &&>(Allocator)) {}
|
||||
|
||||
// Manually implement a move constructor as we must clear the old allocator's
|
||||
// slabs as a matter of correctness.
|
||||
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
|
||||
: CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
|
||||
: AllocatorT(static_cast<AllocatorT &&>(Old)), CurPtr(Old.CurPtr),
|
||||
End(Old.End), Slabs(std::move(Old.Slabs)),
|
||||
CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
|
||||
BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize),
|
||||
Allocator(std::move(Old.Allocator)) {
|
||||
BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
|
||||
Old.CurPtr = Old.End = nullptr;
|
||||
Old.BytesAllocated = 0;
|
||||
Old.Slabs.clear();
|
||||
|
|
@ -110,7 +111,7 @@ public:
|
|||
RedZoneSize = RHS.RedZoneSize;
|
||||
Slabs = std::move(RHS.Slabs);
|
||||
CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
|
||||
Allocator = std::move(RHS.Allocator);
|
||||
AllocatorT::operator=(static_cast<AllocatorT &&>(RHS));
|
||||
|
||||
RHS.CurPtr = RHS.End = nullptr;
|
||||
RHS.BytesAllocated = 0;
|
||||
|
|
@ -170,7 +171,8 @@ public:
|
|||
// If Size is really big, allocate a separate slab for it.
|
||||
size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
|
||||
if (PaddedSize > SizeThreshold) {
|
||||
void *NewSlab = Allocator.Allocate(PaddedSize, alignof(std::max_align_t));
|
||||
void *NewSlab =
|
||||
AllocatorT::Allocate(PaddedSize, alignof(std::max_align_t));
|
||||
// We own the new slab and don't want anyone reading anyting other than
|
||||
// pieces returned from this method. So poison the whole slab.
|
||||
__asan_poison_memory_region(NewSlab, PaddedSize);
|
||||
|
|
@ -315,9 +317,6 @@ private:
|
|||
/// a sanitizer.
|
||||
size_t RedZoneSize = 1;
|
||||
|
||||
/// The allocator instance we use to get slabs of memory.
|
||||
AllocatorT Allocator;
|
||||
|
||||
static size_t computeSlabSize(unsigned SlabIdx) {
|
||||
// Scale the actual allocated slab size based on the number of slabs
|
||||
// allocated. Every GrowthDelay slabs allocated, we double
|
||||
|
|
@ -333,7 +332,7 @@ private:
|
|||
size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
|
||||
|
||||
void *NewSlab =
|
||||
Allocator.Allocate(AllocatedSlabSize, alignof(std::max_align_t));
|
||||
AllocatorT::Allocate(AllocatedSlabSize, alignof(std::max_align_t));
|
||||
// We own the new slab and don't want anyone reading anything other than
|
||||
// pieces returned from this method. So poison the whole slab.
|
||||
__asan_poison_memory_region(NewSlab, AllocatedSlabSize);
|
||||
|
|
@ -349,7 +348,7 @@ private:
|
|||
for (; I != E; ++I) {
|
||||
size_t AllocatedSlabSize =
|
||||
computeSlabSize(std::distance(Slabs.begin(), I));
|
||||
Allocator.Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
|
||||
AllocatorT::Deallocate(*I, AllocatedSlabSize, alignof(std::max_align_t));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -358,7 +357,7 @@ private:
|
|||
for (auto &PtrAndSize : CustomSizedSlabs) {
|
||||
void *Ptr = PtrAndSize.first;
|
||||
size_t Size = PtrAndSize.second;
|
||||
Allocator.Deallocate(Ptr, Size, alignof(std::max_align_t));
|
||||
AllocatorT::Deallocate(Ptr, Size, alignof(std::max_align_t));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue