[asan] fully re-implement the FakeStack (use-after-return) to make it faster and async-signal-safe. The implementation is not yet complete (see FIXMEs) but the existing tests pass.
llvm-svn: 190588
This commit is contained in:
parent
7fe6a5390f
commit
729c8dc65b
|
|
@ -17,164 +17,79 @@
|
|||
|
||||
namespace __asan {
|
||||
|
||||
bool FakeStack::AddrIsInSizeClass(uptr addr, uptr class_id) {
|
||||
uptr mem = allocated_size_classes_[class_id];
|
||||
uptr size = ClassMmapSize(class_id);
|
||||
bool res = mem && addr >= mem && addr < mem + size;
|
||||
return res;
|
||||
void FakeStack::PoisonAll(u8 magic) {
|
||||
PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
|
||||
magic);
|
||||
}
|
||||
|
||||
uptr FakeStack::AddrIsInFakeStack(uptr addr) {
|
||||
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
|
||||
if (!AddrIsInSizeClass(addr, class_id)) continue;
|
||||
uptr size_class_first_ptr = allocated_size_classes_[class_id];
|
||||
uptr size = ClassSize(class_id);
|
||||
CHECK_LE(size_class_first_ptr, addr);
|
||||
CHECK_GT(size_class_first_ptr + ClassMmapSize(class_id), addr);
|
||||
return size_class_first_ptr + ((addr - size_class_first_ptr) / size) * size;
|
||||
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
|
||||
uptr real_stack) {
|
||||
CHECK_LT(class_id, kNumberOfSizeClasses);
|
||||
uptr &hint_position = hint_position_[class_id];
|
||||
const int num_iter = NumberOfFrames(stack_size_log, class_id);
|
||||
u8 *flags = GetFlags(stack_size_log, class_id);
|
||||
for (int i = 0; i < num_iter; i++) {
|
||||
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
|
||||
if (flags[pos]) continue;
|
||||
u8 zero = 0;
|
||||
// FIXME: this does not have to be thread-safe, just async-signal-safe.
|
||||
if (atomic_compare_exchange_strong((atomic_uint8_t *)&flags[pos], &zero, 1,
|
||||
memory_order_acquire)) {
|
||||
FakeFrame *res = reinterpret_cast<FakeFrame *>(
|
||||
GetFrame(stack_size_log, class_id, pos));
|
||||
res->real_stack = real_stack;
|
||||
res->class_id = class_id;
|
||||
return res;
|
||||
}
|
||||
}
|
||||
CHECK(0 && "Failed to allocate a fake stack frame");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// We may want to compute this during compilation.
|
||||
ALWAYS_INLINE uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
|
||||
uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
|
||||
uptr log = Log2(rounded_size);
|
||||
CHECK_LE(alloc_size, (1UL << log));
|
||||
CHECK_GT(alloc_size, (1UL << (log-1)));
|
||||
uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
|
||||
CHECK_LT(res, kNumberOfSizeClasses);
|
||||
CHECK_GE(ClassSize(res), rounded_size);
|
||||
return res;
|
||||
void FakeStack::Deallocate(FakeFrame *ff, uptr stack_size_log, uptr class_id,
|
||||
uptr real_stack) {
|
||||
u8 *base = GetFrame(stack_size_log, class_id, 0);
|
||||
u8 *cur = reinterpret_cast<u8 *>(ff);
|
||||
CHECK_LE(base, cur);
|
||||
CHECK_LT(cur, base + (1UL << stack_size_log));
|
||||
uptr pos = (cur - base) >> (kMinStackFrameSizeLog + class_id);
|
||||
u8 *flags = GetFlags(stack_size_log, class_id);
|
||||
CHECK_EQ(flags[pos], 1);
|
||||
flags[pos] = 0;
|
||||
}
|
||||
|
||||
void FakeFrameFifo::FifoPush(FakeFrame *node) {
|
||||
CHECK(node);
|
||||
node->next = 0;
|
||||
if (first_ == 0 && last_ == 0) {
|
||||
first_ = last_ = node;
|
||||
} else {
|
||||
CHECK(first_);
|
||||
CHECK(last_);
|
||||
last_->next = node;
|
||||
last_ = node;
|
||||
}
|
||||
uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
|
||||
uptr stack_size_log = this->stack_size_log();
|
||||
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
|
||||
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
|
||||
if (ptr < beg || ptr >= end) return 0;
|
||||
uptr class_id = (ptr - beg) >> stack_size_log;
|
||||
uptr base = beg + (class_id << stack_size_log);
|
||||
CHECK_LE(base, ptr);
|
||||
CHECK_LT(ptr, base + (1UL << stack_size_log));
|
||||
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
|
||||
return base + pos * BytesInSizeClass(class_id);
|
||||
}
|
||||
|
||||
FakeFrame *FakeFrameFifo::FifoPop() {
|
||||
CHECK(first_ && last_ && "Exhausted fake stack");
|
||||
FakeFrame *res = 0;
|
||||
if (first_ == last_) {
|
||||
res = first_;
|
||||
first_ = last_ = 0;
|
||||
} else {
|
||||
res = first_;
|
||||
first_ = first_->next;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void FakeStack::Init(uptr stack_size) {
|
||||
stack_size_ = stack_size;
|
||||
alive_ = true;
|
||||
}
|
||||
|
||||
void FakeStack::Cleanup() {
|
||||
alive_ = false;
|
||||
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
|
||||
uptr mem = allocated_size_classes_[i];
|
||||
if (mem) {
|
||||
PoisonShadow(mem, ClassMmapSize(i), 0);
|
||||
allocated_size_classes_[i] = 0;
|
||||
UnmapOrDie((void*)mem, ClassMmapSize(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uptr FakeStack::ClassMmapSize(uptr class_id) {
|
||||
// Limit allocation size to ClassSize * MaxDepth when running with unlimited
|
||||
// stack.
|
||||
return RoundUpTo(Min(ClassSize(class_id) * kMaxRecursionDepth, stack_size_),
|
||||
GetPageSizeCached());
|
||||
}
|
||||
|
||||
void FakeStack::AllocateOneSizeClass(uptr class_id) {
|
||||
CHECK(ClassMmapSize(class_id) >= GetPageSizeCached());
|
||||
uptr new_mem = (uptr)MmapOrDie(
|
||||
ClassMmapSize(class_id), __FUNCTION__);
|
||||
if (0) {
|
||||
Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
|
||||
GetCurrentThread()->tid(),
|
||||
class_id, new_mem, new_mem + ClassMmapSize(class_id),
|
||||
ClassMmapSize(class_id));
|
||||
}
|
||||
uptr i;
|
||||
uptr size = ClassSize(class_id);
|
||||
for (i = 0; i + size <= ClassMmapSize(class_id); i += size) {
|
||||
size_classes_[class_id].FifoPush((FakeFrame*)(new_mem + i));
|
||||
}
|
||||
CHECK_LE(i, ClassMmapSize(class_id));
|
||||
allocated_size_classes_[class_id] = new_mem;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uptr
|
||||
FakeStack::AllocateStack(uptr class_id, uptr size, uptr real_stack) {
|
||||
CHECK(size <= kMaxStackMallocSize && size > 1);
|
||||
if (!alive_) return real_stack;
|
||||
if (!allocated_size_classes_[class_id]) {
|
||||
AllocateOneSizeClass(class_id);
|
||||
}
|
||||
FakeFrame *fake_frame = size_classes_[class_id].FifoPop();
|
||||
CHECK(fake_frame);
|
||||
fake_frame->class_id = class_id;
|
||||
fake_frame->real_stack = real_stack;
|
||||
while (FakeFrame *top = call_stack_.top()) {
|
||||
if (top->real_stack > real_stack) break;
|
||||
call_stack_.LifoPop();
|
||||
DeallocateFrame(top);
|
||||
}
|
||||
call_stack_.LifoPush(fake_frame);
|
||||
uptr ptr = (uptr)fake_frame;
|
||||
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) return real_stack;
|
||||
FakeStack *fs = t->fake_stack();
|
||||
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
|
||||
uptr ptr = reinterpret_cast<uptr>(ff);
|
||||
PoisonShadow(ptr, size, 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
|
||||
CHECK(alive_);
|
||||
uptr class_id = static_cast<uptr>(fake_frame->class_id);
|
||||
CHECK(allocated_size_classes_[class_id]);
|
||||
uptr ptr = (uptr)fake_frame;
|
||||
CHECK(AddrIsInSizeClass(ptr, class_id));
|
||||
size_classes_[class_id].FifoPush(fake_frame);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void FakeStack::OnFree(uptr ptr, uptr class_id, uptr size,
|
||||
uptr real_stack) {
|
||||
FakeFrame *fake_frame = (FakeFrame*)ptr;
|
||||
CHECK_EQ(fake_frame->magic, kRetiredStackFrameMagic);
|
||||
CHECK_NE(fake_frame->descr, 0);
|
||||
CHECK_EQ(fake_frame->class_id, class_id);
|
||||
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
|
||||
if (!flags()->use_fake_stack) return real_stack;
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
// TSD is gone, use the real stack.
|
||||
return real_stack;
|
||||
}
|
||||
t->LazyInitFakeStack();
|
||||
uptr ptr = t->fake_stack()->AllocateStack(class_id, size, real_stack);
|
||||
// Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
|
||||
if (!flags()->use_fake_stack) return;
|
||||
if (ptr != real_stack) {
|
||||
FakeStack::OnFree(ptr, class_id, size, real_stack);
|
||||
}
|
||||
if (ptr == real_stack)
|
||||
return;
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) return;
|
||||
FakeStack *fs = t->fake_stack();
|
||||
FakeFrame *ff = reinterpret_cast<FakeFrame *>(ptr);
|
||||
fs->Deallocate(ff, fs->stack_size_log(), class_id, real_stack);
|
||||
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
|
|
|||
|
|
@ -9,12 +9,14 @@
|
|||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan-private header for asan_fake_stack.cc
|
||||
// ASan-private header for asan_fake_stack.cc, implements FakeStack.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_FAKE_STACK_H
|
||||
#define ASAN_FAKE_STACK_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// Fake stack frame contains local variables of one function.
|
||||
|
|
@ -22,99 +24,144 @@ struct FakeFrame {
|
|||
uptr magic; // Modified by the instrumented code.
|
||||
uptr descr; // Modified by the instrumented code.
|
||||
uptr pc; // Modified by the instrumented code.
|
||||
u64 real_stack : 48;
|
||||
u64 real_stack : 48;
|
||||
u64 class_id : 16;
|
||||
// End of the first 32 bytes.
|
||||
// The rest should not be used when the frame is active.
|
||||
FakeFrame *next;
|
||||
};
|
||||
|
||||
struct FakeFrameFifo {
|
||||
public:
|
||||
void FifoPush(FakeFrame *node);
|
||||
FakeFrame *FifoPop();
|
||||
private:
|
||||
FakeFrame *first_, *last_;
|
||||
};
|
||||
|
||||
template<uptr kMaxNumberOfFrames>
|
||||
class FakeFrameLifo {
|
||||
public:
|
||||
explicit FakeFrameLifo(LinkerInitialized) {}
|
||||
FakeFrameLifo() : n_frames_(0) {}
|
||||
void LifoPush(FakeFrame *node) {
|
||||
CHECK_LT(n_frames_, kMaxNumberOfFrames);
|
||||
frames_[n_frames_++] = node;
|
||||
}
|
||||
void LifoPop() {
|
||||
CHECK(n_frames_);
|
||||
n_frames_--;
|
||||
}
|
||||
FakeFrame *top() {
|
||||
if (n_frames_ == 0)
|
||||
return 0;
|
||||
return frames_[n_frames_ - 1];
|
||||
}
|
||||
private:
|
||||
uptr n_frames_;
|
||||
FakeFrame *frames_[kMaxNumberOfFrames];
|
||||
};
|
||||
|
||||
// For each thread we create a fake stack and place stack objects on this fake
|
||||
// stack instead of the real stack. The fake stack is not really a stack but
|
||||
// a fast malloc-like allocator so that when a function exits the fake stack
|
||||
// is not poped but remains there for quite some time until gets used again.
|
||||
// is not popped but remains there for quite some time until gets used again.
|
||||
// So, we poison the objects on the fake stack when function returns.
|
||||
// It helps us find use-after-return bugs.
|
||||
// We can not rely on __asan_stack_free being called on every function exit,
|
||||
// so we maintain a lifo list of all current fake frames and update it on every
|
||||
// call to __asan_stack_malloc.
|
||||
//
|
||||
// The FakeStack objects is allocated by a single mmap call and has no other
|
||||
// pointers. The size of the fake stack depends on the actual thread stack size
|
||||
// and thus can not be a constant.
|
||||
// stack_size is a power of two greater or equal to the thread's stack size;
|
||||
// we store it as its logarithm (stack_size_log).
|
||||
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
|
||||
// is a power of two, starting from 64 bytes. Each size class occupies
|
||||
// stack_size bytes and thus can allocate
|
||||
// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2).
|
||||
// For each size class we have NumberOfFrames allocation flags,
|
||||
// each flag indicates whether the given frame is currently allocated.
|
||||
// All flags for size classes 0 .. 10 are stored in a single contiguous region
|
||||
// followed by another contiguous region which contains the actual memory for
|
||||
// size classes. The addresses are computed by GetFlags and GetFrame without
|
||||
// any memory accesses solely based on 'this' and stack_size_log.
|
||||
// Allocate() flips the appropriate allocation flag atomically, thus achieving
|
||||
// async-signal safety.
|
||||
// This allocator does not have quarantine per se, but it tries to allocate the
|
||||
// frames in round robin fasion to maximize the delay between a deallocation
|
||||
// and the next allocation.
|
||||
//
|
||||
// FIXME: don't lazy init the FakeStack (not async-signal safe).
|
||||
// FIXME: handle throw/longjmp/clone, i.e. garbage collect the unwinded frames.
|
||||
// FIXME: use low bits of the pointer to store stack_size_log_ (performance).
|
||||
class FakeStack {
|
||||
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
|
||||
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
|
||||
|
||||
public:
|
||||
void Init(uptr stack_size);
|
||||
void StopUsingFakeStack() { alive_ = false; }
|
||||
void Cleanup();
|
||||
uptr AllocateStack(uptr class_id, uptr size, uptr real_stack);
|
||||
static void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack);
|
||||
// Return the bottom of the maped region.
|
||||
static const uptr kNumberOfSizeClasses =
|
||||
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
|
||||
|
||||
// CTOR: create the FakeStack as a single mmap-ed object.
|
||||
static FakeStack *Create(uptr stack_size_log) {
|
||||
if (stack_size_log < 15)
|
||||
stack_size_log = 15;
|
||||
FakeStack *res = reinterpret_cast<FakeStack *>(
|
||||
MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
|
||||
res->stack_size_log_ = stack_size_log;
|
||||
return res;
|
||||
}
|
||||
|
||||
void Destroy() {
|
||||
UnmapOrDie(this, RequiredSize(stack_size_log_));
|
||||
}
|
||||
|
||||
// stack_size_log is at least 15 (stack_size >= 32K).
|
||||
static uptr SizeRequiredForFlags(uptr stack_size_log) {
|
||||
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
// Each size class occupies stack_size bytes.
|
||||
static uptr SizeRequiredForFrames(uptr stack_size_log) {
|
||||
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
|
||||
}
|
||||
|
||||
// Number of bytes requires for the whole object.
|
||||
static uptr RequiredSize(uptr stack_size_log) {
|
||||
return kFlagsOffset + SizeRequiredForFlags(stack_size_log) +
|
||||
SizeRequiredForFrames(stack_size_log);
|
||||
}
|
||||
|
||||
// Offset of the given flag from the first flag.
|
||||
// The flags for class 0 begin at offset 000000000
|
||||
// The flags for class 1 begin at offset 100000000
|
||||
// ....................2................ 110000000
|
||||
// ....................3................ 111000000
|
||||
// and so on.
|
||||
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
|
||||
uptr t = kNumberOfSizeClasses - 1 - class_id;
|
||||
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
|
||||
return ((all_ones >> t) << t) << (stack_size_log - 15);
|
||||
}
|
||||
|
||||
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
|
||||
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
|
||||
}
|
||||
|
||||
// Divide n by the numbe of frames in size class.
|
||||
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
|
||||
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
|
||||
}
|
||||
|
||||
// The the pointer to the flags of the given class_id.
|
||||
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
|
||||
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
|
||||
FlagsOffset(stack_size_log, class_id);
|
||||
}
|
||||
|
||||
// Get frame by class_id and pos.
|
||||
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
|
||||
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
|
||||
SizeRequiredForFlags(stack_size_log) +
|
||||
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
|
||||
}
|
||||
|
||||
// Allocate the fake frame.
|
||||
FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack);
|
||||
|
||||
// Deallocate the fake frame.
|
||||
void Deallocate(FakeFrame *ff, uptr stack_size_log, uptr class_id,
|
||||
uptr real_stack);
|
||||
|
||||
// Poison the entire FakeStack's shadow with the magic value.
|
||||
void PoisonAll(u8 magic);
|
||||
|
||||
// Return the beginning of the FakeFrame or 0 if the address is not ours.
|
||||
uptr AddrIsInFakeStack(uptr addr);
|
||||
uptr StackSize() const { return stack_size_; }
|
||||
|
||||
static uptr ComputeSizeClass(uptr alloc_size);
|
||||
|
||||
static uptr ClassSize(uptr class_id) {
|
||||
// Number of bytes in a fake frame of this size class.
|
||||
static uptr BytesInSizeClass(uptr class_id) {
|
||||
return 1UL << (class_id + kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
uptr stack_size_log() const { return stack_size_log_; }
|
||||
|
||||
private:
|
||||
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
|
||||
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
|
||||
static const uptr kNumberOfSizeClasses =
|
||||
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
|
||||
FakeStack() { }
|
||||
static const uptr kFlagsOffset = 4096; // There is were flags begin.
|
||||
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
|
||||
COMPILER_CHECK(kNumberOfSizeClasses == 11);
|
||||
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
|
||||
static const uptr kMaxRecursionDepth = 60000;
|
||||
|
||||
bool AddrIsInSizeClass(uptr addr, uptr class_id);
|
||||
|
||||
// Each size class should be large enough to hold all frames.
|
||||
uptr ClassMmapSize(uptr class_id);
|
||||
|
||||
void DeallocateFrame(FakeFrame *fake_frame);
|
||||
|
||||
void AllocateOneSizeClass(uptr class_id);
|
||||
|
||||
uptr stack_size_;
|
||||
bool alive_;
|
||||
|
||||
uptr allocated_size_classes_[kNumberOfSizeClasses];
|
||||
FakeFrameFifo size_classes_[kNumberOfSizeClasses];
|
||||
FakeFrameLifo<kMaxRecursionDepth> call_stack_;
|
||||
uptr hint_position_[kNumberOfSizeClasses];
|
||||
uptr stack_size_log_;
|
||||
};
|
||||
|
||||
COMPILER_CHECK(sizeof(FakeStack) <= (1 << 19));
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_FAKE_STACK_H
|
||||
|
|
|
|||
|
|
@ -525,14 +525,6 @@ class ScopedInErrorReport {
|
|||
reporting_thread_tid = GetCurrentTidOrInvalid();
|
||||
Printf("===================================================="
|
||||
"=============\n");
|
||||
if (reporting_thread_tid != kInvalidTid) {
|
||||
// We started reporting an error message. Stop using the fake stack
|
||||
// in case we call an instrumented function from a symbolizer.
|
||||
AsanThread *curr_thread = GetCurrentThread();
|
||||
CHECK(curr_thread);
|
||||
if (curr_thread->fake_stack())
|
||||
curr_thread->fake_stack()->StopUsingFakeStack();
|
||||
}
|
||||
}
|
||||
// Destructor is NORETURN, as functions that report errors are.
|
||||
NORETURN ~ScopedInErrorReport() {
|
||||
|
|
|
|||
|
|
@ -75,17 +75,17 @@ class AsanThread {
|
|||
return addr >= stack_bottom_ && addr < stack_top_;
|
||||
}
|
||||
|
||||
void LazyInitFakeStack() {
|
||||
if (fake_stack_) return;
|
||||
fake_stack_ = (FakeStack*)MmapOrDie(sizeof(FakeStack), "FakeStack");
|
||||
fake_stack_->Init(stack_size());
|
||||
}
|
||||
void DeleteFakeStack() {
|
||||
if (!fake_stack_) return;
|
||||
fake_stack_->Cleanup();
|
||||
UnmapOrDie(fake_stack_, sizeof(FakeStack));
|
||||
fake_stack_->PoisonAll(0);
|
||||
fake_stack_->Destroy();
|
||||
}
|
||||
|
||||
FakeStack *fake_stack() {
|
||||
if (!fake_stack_) // FIXME: lazy init is not async-signal safe.
|
||||
fake_stack_ = FakeStack::Create(Log2(RoundUpToPowerOfTwo(stack_size())));
|
||||
return fake_stack_;
|
||||
}
|
||||
FakeStack *fake_stack() { return fake_stack_; }
|
||||
|
||||
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
|
||||
AsanStats &stats() { return stats_; }
|
||||
|
|
|
|||
|
|
@ -128,6 +128,7 @@ add_custom_target(AsanBenchmarks)
|
|||
set_target_properties(AsanBenchmarks PROPERTIES FOLDER "Asan benchmarks")
|
||||
|
||||
set(ASAN_NOINST_TEST_SOURCES
|
||||
asan_fake_stack_test.cc
|
||||
asan_noinst_test.cc
|
||||
asan_test_main.cc)
|
||||
set(ASAN_INST_TEST_SOURCES
|
||||
|
|
|
|||
|
|
@ -0,0 +1,154 @@
|
|||
//===-- asan_fake_stack_test.cc -------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Tests for FakeStack.
|
||||
// This test file should be compiled w/o asan instrumentation.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_fake_stack.h"
|
||||
#include "asan_test_utils.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <set>
|
||||
|
||||
namespace __asan {
|
||||
|
||||
TEST(FakeStack, FlagsSize) {
|
||||
EXPECT_EQ(FakeStack::SizeRequiredForFlags(10), 1 << 5);
|
||||
EXPECT_EQ(FakeStack::SizeRequiredForFlags(11), 1 << 6);
|
||||
EXPECT_EQ(FakeStack::SizeRequiredForFlags(20), 1 << 15);
|
||||
}
|
||||
|
||||
TEST(FakeStack, RequiredSize) {
|
||||
// for (int i = 15; i < 20; i++) {
|
||||
// uptr alloc_size = FakeStack::RequiredSize(i);
|
||||
// printf("%zdK ==> %zd\n", 1 << (i - 10), alloc_size);
|
||||
// }
|
||||
EXPECT_EQ(FakeStack::RequiredSize(15), 365568);
|
||||
EXPECT_EQ(FakeStack::RequiredSize(16), 727040);
|
||||
EXPECT_EQ(FakeStack::RequiredSize(17), 1449984);
|
||||
EXPECT_EQ(FakeStack::RequiredSize(18), 2895872);
|
||||
EXPECT_EQ(FakeStack::RequiredSize(19), 5787648);
|
||||
}
|
||||
|
||||
TEST(FakeStack, FlagsOffset) {
|
||||
for (uptr stack_size_log = 15; stack_size_log <= 20; stack_size_log++) {
|
||||
uptr stack_size = 1UL << stack_size_log;
|
||||
uptr offset = 0;
|
||||
for (uptr class_id = 0; class_id < FakeStack::kNumberOfSizeClasses;
|
||||
class_id++) {
|
||||
uptr frame_size = FakeStack::BytesInSizeClass(class_id);
|
||||
uptr num_flags = stack_size / frame_size;
|
||||
EXPECT_EQ(offset, FakeStack::FlagsOffset(stack_size_log, class_id));
|
||||
// printf("%zd: %zd => %zd %zd\n", stack_size_log, class_id, offset,
|
||||
// FakeStack::FlagsOffset(stack_size_log, class_id));
|
||||
offset += num_flags;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(FakeStack, CreateDestroy) {
|
||||
for (int i = 0; i < 1000; i++) {
|
||||
for (uptr stack_size_log = 20; stack_size_log <= 22; stack_size_log++) {
|
||||
FakeStack *fake_stack = FakeStack::Create(stack_size_log);
|
||||
fake_stack->Destroy();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(FakeStack, ModuloNumberOfFrames) {
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, 0), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<15)), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<10)), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<9)), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<8)), 1<<8);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 0, (1<<15) + 1), 1);
|
||||
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 0), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<9), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<8), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 1, 1<<7), 1<<7);
|
||||
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 0), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 1), 1);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 15), 15);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 16), 0);
|
||||
EXPECT_EQ(FakeStack::ModuloNumberOfFrames(15, 5, 17), 1);
|
||||
}
|
||||
|
||||
TEST(FakeStack, GetFrame) {
|
||||
const uptr stack_size_log = 20;
|
||||
const uptr stack_size = 1 << stack_size_log;
|
||||
FakeStack *fs = FakeStack::Create(stack_size_log);
|
||||
u8 *base = fs->GetFrame(stack_size_log, 0, 0);
|
||||
EXPECT_EQ(base, reinterpret_cast<u8 *>(fs) +
|
||||
fs->SizeRequiredForFlags(stack_size_log) + 4096);
|
||||
EXPECT_EQ(base + 0*stack_size + 64 * 7, fs->GetFrame(stack_size_log, 0, 7));
|
||||
EXPECT_EQ(base + 1*stack_size + 128 * 3, fs->GetFrame(stack_size_log, 1, 3));
|
||||
EXPECT_EQ(base + 2*stack_size + 256 * 5, fs->GetFrame(stack_size_log, 2, 5));
|
||||
fs->Destroy();
|
||||
}
|
||||
|
||||
TEST(FakeStack, Allocate) {
|
||||
const uptr stack_size_log = 19;
|
||||
FakeStack *fs = FakeStack::Create(stack_size_log);
|
||||
std::set<FakeFrame *> s;
|
||||
for (int iter = 0; iter < 2; iter++) {
|
||||
s.clear();
|
||||
for (uptr cid = 0; cid < FakeStack::kNumberOfSizeClasses; cid++) {
|
||||
uptr n = FakeStack::NumberOfFrames(stack_size_log, cid);
|
||||
uptr bytes_in_class = FakeStack::BytesInSizeClass(cid);
|
||||
for (uptr j = 0; j < n; j++) {
|
||||
FakeFrame *ff = fs->Allocate(stack_size_log, cid, 0);
|
||||
uptr x = reinterpret_cast<uptr>(ff);
|
||||
EXPECT_TRUE(s.insert(ff).second);
|
||||
EXPECT_EQ(x, fs->AddrIsInFakeStack(x));
|
||||
EXPECT_EQ(x, fs->AddrIsInFakeStack(x + 1));
|
||||
EXPECT_EQ(x, fs->AddrIsInFakeStack(x + bytes_in_class - 1));
|
||||
EXPECT_NE(x, fs->AddrIsInFakeStack(x + bytes_in_class));
|
||||
}
|
||||
if (iter == 0 &&
|
||||
(cid == 0 || cid == FakeStack::kNumberOfSizeClasses - 1)) {
|
||||
// This is slow, so we do it only sometimes.
|
||||
EXPECT_DEATH(fs->Allocate(stack_size_log, cid, 0),
|
||||
"Failed to allocate a fake stack frame");
|
||||
}
|
||||
}
|
||||
for (std::set<FakeFrame *>::iterator it = s.begin(); it != s.end(); ++it) {
|
||||
FakeFrame *ff = *it;
|
||||
fs->Deallocate(ff, stack_size_log, ff->class_id, 0);
|
||||
}
|
||||
}
|
||||
fs->Destroy();
|
||||
}
|
||||
|
||||
static void RecursiveFunction(FakeStack *fs, int depth) {
|
||||
uptr class_id = depth / 3;
|
||||
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, 0);
|
||||
if (depth) {
|
||||
RecursiveFunction(fs, depth - 1);
|
||||
RecursiveFunction(fs, depth - 1);
|
||||
}
|
||||
fs->Deallocate(ff, fs->stack_size_log(), class_id, 0);
|
||||
}
|
||||
|
||||
TEST(FakeStack, RecursiveStressTest) {
|
||||
const uptr stack_size_log = 16;
|
||||
FakeStack *fs = FakeStack::Create(stack_size_log);
|
||||
RecursiveFunction(fs, 22); // with 26 runs for 2-3 seconds.
|
||||
fs->Destroy();
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
Loading…
Reference in New Issue