[sanitizer] fix the build on ancient gcc which has stricter rules about what can be put on TLS. Long term, we absolutely must build the run-times with the fresh target clang

llvm-svn: 169593
This commit is contained in:
Kostya Serebryany 2012-12-07 09:40:17 +00:00
parent 5e700abc1c
commit 378f93e804
1 changed files with 25 additions and 21 deletions

View File

@ -97,25 +97,29 @@ struct AllocatorListNode {
AllocatorListNode *next; AllocatorListNode *next;
}; };
struct AllocatorFreeList: IntrusiveList<AllocatorListNode> { typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
// Move at most max_count chunks to other_free_list.
void BulkAllocate(uptr max_count, AllocatorFreeList *other_free_list) { // Move at most max_count chunks from allocate_from to allocate_to.
CHECK(!empty()); // This function is better be a method of AllocatorFreeList, but we can't
CHECK(other_free_list->empty()); // inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
if (size() <= max_count) { static inline void BulkMove(uptr max_count,
other_free_list->append_front(this); AllocatorFreeList *allocate_from,
CHECK(empty()); AllocatorFreeList *allocate_to) {
CHECK(!allocate_from->empty());
CHECK(allocate_to->empty());
if (allocate_from->size() <= max_count) {
allocate_to->append_front(allocate_from);
CHECK(allocate_from->empty());
} else { } else {
for (uptr i = 0; i < max_count; i++) { for (uptr i = 0; i < max_count; i++) {
AllocatorListNode *node = front(); AllocatorListNode *node = allocate_from->front();
pop_front(); allocate_from->pop_front();
other_free_list->push_front(node); allocate_to->push_front(node);
} }
CHECK(!empty()); CHECK(!allocate_from->empty());
} }
CHECK(!other_free_list->empty()); CHECK(!allocate_to->empty());
} }
};
// SizeClassAllocator64 -- allocator for 64-bit address space. // SizeClassAllocator64 -- allocator for 64-bit address space.
// //
@ -164,8 +168,7 @@ class SizeClassAllocator64 {
if (region->free_list.empty()) { if (region->free_list.empty()) {
PopulateFreeList(class_id, region); PopulateFreeList(class_id, region);
} }
region->free_list.BulkAllocate( BulkMove(SizeClassMap::MaxCached(class_id), &region->free_list, free_list);
SizeClassMap::MaxCached(class_id), free_list);
} }
// Swallow the entire free_list for the given class_id. // Swallow the entire free_list for the given class_id.
@ -371,7 +374,7 @@ class SizeClassAllocator32 {
SpinMutexLock l(&sci->mutex); SpinMutexLock l(&sci->mutex);
EnsureSizeClassHasAvailableChunks(sci, class_id); EnsureSizeClassHasAvailableChunks(sci, class_id);
CHECK(!sci->free_list.empty()); CHECK(!sci->free_list.empty());
sci->free_list.BulkAllocate(SizeClassMap::MaxCached(class_id), free_list); BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
} }
// Swallow the entire free_list for the given class_id. // Swallow the entire free_list for the given class_id.
@ -424,6 +427,7 @@ class SizeClassAllocator32 {
typedef SizeClassMap SizeClassMapT; typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 128 static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 128
private: private:
static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20; static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kRegionSize = 1 << kRegionSizeLog;