forked from OSchip/llvm-project
				
			
							parent
							
								
									6e2557769c
								
							
						
					
					
						commit
						c9e12aa323
					
				| 
						 | 
				
			
			@ -65,7 +65,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
 | 
			
		|||
      CHECK_NE(s, &fdctx.globsync);
 | 
			
		||||
      CHECK_NE(s, &fdctx.filesync);
 | 
			
		||||
      CHECK_NE(s, &fdctx.socksync);
 | 
			
		||||
      SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
 | 
			
		||||
      SyncVar *v = ctx->synctab.GetAndRemove(thr, pc, (uptr)s);
 | 
			
		||||
      if (v)
 | 
			
		||||
        DestroyAndFree(v);
 | 
			
		||||
      internal_free(s);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -20,7 +20,7 @@
 | 
			
		|||
namespace __tsan {
 | 
			
		||||
 | 
			
		||||
Flags *flags() {
 | 
			
		||||
  return &CTX()->flags;
 | 
			
		||||
  return &ctx->flags;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Can be overriden in frontend.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -849,7 +849,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
 | 
			
		|||
TSAN_INTERCEPTOR(int, pthread_create,
 | 
			
		||||
    void *th, void *attr, void *(*callback)(void*), void * param) {
 | 
			
		||||
  SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
 | 
			
		||||
  if (CTX()->after_multithreaded_fork) {
 | 
			
		||||
  if (ctx->after_multithreaded_fork) {
 | 
			
		||||
    if (flags()->die_after_fork) {
 | 
			
		||||
      Printf("ThreadSanitizer: starting new threads after muti-threaded"
 | 
			
		||||
          " fork is not supported. Dying (set die_after_fork=0 to override)\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -1672,7 +1672,6 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool sigact,
 | 
			
		|||
  // from rtl_generic_sighandler) we have not yet received the reraised
 | 
			
		||||
  // signal; and it looks too fragile to intercept all ways to reraise a signal.
 | 
			
		||||
  if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
 | 
			
		||||
    Context *ctx = CTX();
 | 
			
		||||
    __tsan::StackTrace stack;
 | 
			
		||||
    stack.ObtainCurrent(thr, pc);
 | 
			
		||||
    ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
| 
						 | 
				
			
			@ -1998,7 +1997,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
 | 
			
		|||
  ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
 | 
			
		||||
 | 
			
		||||
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
 | 
			
		||||
  CTX()->thread_registry->SetThreadNameByUserId(thread, name)
 | 
			
		||||
  __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
 | 
			
		||||
 | 
			
		||||
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -305,7 +305,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
 | 
			
		|||
  while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
 | 
			
		||||
    ExpectRace *race = dyn_ann_ctx->expect.next;
 | 
			
		||||
    if (race->hitcount == 0) {
 | 
			
		||||
      CTX()->nmissed_expected++;
 | 
			
		||||
      ctx->nmissed_expected++;
 | 
			
		||||
      ReportMissedExpectedRace(race);
 | 
			
		||||
    }
 | 
			
		||||
    race->prev->next = race->next;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -289,7 +289,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
 | 
			
		|||
    MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
 | 
			
		||||
    return NoTsanAtomicLoad(a, mo);
 | 
			
		||||
  }
 | 
			
		||||
  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
 | 
			
		||||
  AcquireImpl(thr, pc, &s->clock);
 | 
			
		||||
  T v = NoTsanAtomicLoad(a, mo);
 | 
			
		||||
  s->mtx.ReadUnlock();
 | 
			
		||||
| 
						 | 
				
			
			@ -321,7 +321,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
 | 
			
		|||
    return;
 | 
			
		||||
  }
 | 
			
		||||
  __sync_synchronize();
 | 
			
		||||
  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
  thr->fast_state.IncrementEpoch();
 | 
			
		||||
  // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -335,7 +335,7 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
 | 
			
		|||
  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
 | 
			
		||||
  SyncVar *s = 0;
 | 
			
		||||
  if (mo != mo_relaxed) {
 | 
			
		||||
    s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
    s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
    thr->fast_state.IncrementEpoch();
 | 
			
		||||
    // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -456,7 +456,7 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
 | 
			
		|||
  MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
 | 
			
		||||
  SyncVar *s = 0;
 | 
			
		||||
  if (mo != mo_relaxed) {
 | 
			
		||||
    s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
    s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
 | 
			
		||||
    thr->fast_state.IncrementEpoch();
 | 
			
		||||
    // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
    TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -132,7 +132,7 @@ SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
 | 
			
		|||
  }
 | 
			
		||||
  if (s == 0 && create) {
 | 
			
		||||
    DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
 | 
			
		||||
    s = CTX()->synctab.Create(thr, pc, addr);
 | 
			
		||||
    s = ctx->synctab.Create(thr, pc, addr);
 | 
			
		||||
    s->next = b->head;
 | 
			
		||||
    b->head = s;
 | 
			
		||||
  }
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -90,7 +90,6 @@ void AllocatorPrintStats() {
 | 
			
		|||
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
 | 
			
		||||
  if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
 | 
			
		||||
    return;
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  StackTrace stack;
 | 
			
		||||
  stack.ObtainCurrent(thr, pc);
 | 
			
		||||
  ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
| 
						 | 
				
			
			@ -109,7 +108,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
 | 
			
		|||
    return 0;
 | 
			
		||||
  MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
 | 
			
		||||
  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
 | 
			
		||||
  if (CTX() && CTX()->initialized) {
 | 
			
		||||
  if (ctx && ctx->initialized) {
 | 
			
		||||
    if (thr->ignore_reads_and_writes == 0)
 | 
			
		||||
      MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
 | 
			
		||||
    else
 | 
			
		||||
| 
						 | 
				
			
			@ -136,7 +135,7 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
 | 
			
		|||
    }
 | 
			
		||||
    b->ListReset();
 | 
			
		||||
  }
 | 
			
		||||
  if (CTX() && CTX()->initialized) {
 | 
			
		||||
  if (ctx && ctx->initialized) {
 | 
			
		||||
    if (thr->ignore_reads_and_writes == 0)
 | 
			
		||||
      MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
 | 
			
		||||
  }
 | 
			
		||||
| 
						 | 
				
			
			@ -180,7 +179,6 @@ MBlock *user_mblock(ThreadState *thr, void *p) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void invoke_malloc_hook(void *ptr, uptr size) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ThreadState *thr = cur_thread();
 | 
			
		||||
  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
 | 
			
		||||
    return;
 | 
			
		||||
| 
						 | 
				
			
			@ -188,7 +186,6 @@ void invoke_malloc_hook(void *ptr, uptr size) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void invoke_free_hook(void *ptr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ThreadState *thr = cur_thread();
 | 
			
		||||
  if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
 | 
			
		||||
    return;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,6 +37,7 @@ namespace __tsan {
 | 
			
		|||
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
 | 
			
		||||
#endif
 | 
			
		||||
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
 | 
			
		||||
Context *ctx;
 | 
			
		||||
 | 
			
		||||
// Can be overriden by a front-end.
 | 
			
		||||
#ifdef TSAN_EXTERNAL_HOOKS
 | 
			
		||||
| 
						 | 
				
			
			@ -51,11 +52,6 @@ SANITIZER_INTERFACE_ATTRIBUTE
 | 
			
		|||
void WEAK OnInitialize() {}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static Context *ctx;
 | 
			
		||||
Context *CTX() {
 | 
			
		||||
  return ctx;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
 | 
			
		||||
 | 
			
		||||
static ThreadContextBase *CreateThreadContext(u32 tid) {
 | 
			
		||||
| 
						 | 
				
			
			@ -118,7 +114,6 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static void BackgroundThread(void *arg) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  // This is a non-initialized non-user thread, nothing to see here.
 | 
			
		||||
  ScopedIgnoreInterceptors ignore;
 | 
			
		||||
  const u64 kMs2Ns = 1000 * 1000;
 | 
			
		||||
| 
						 | 
				
			
			@ -324,19 +319,16 @@ int Finalize(ThreadState *thr) {
 | 
			
		|||
 | 
			
		||||
#ifndef TSAN_GO
 | 
			
		||||
void ForkBefore(ThreadState *thr, uptr pc) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->report_mtx.Lock();
 | 
			
		||||
  ctx->thread_registry->Lock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ForkParentAfter(ThreadState *thr, uptr pc) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->Unlock();
 | 
			
		||||
  ctx->report_mtx.Unlock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ForkChildAfter(ThreadState *thr, uptr pc) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->Unlock();
 | 
			
		||||
  ctx->report_mtx.Unlock();
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -471,8 +471,6 @@ struct ThreadState {
 | 
			
		|||
                       uptr tls_addr, uptr tls_size);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
Context *CTX();
 | 
			
		||||
 | 
			
		||||
#ifndef TSAN_GO
 | 
			
		||||
extern THREADLOCAL char cur_thread_placeholder[];
 | 
			
		||||
INLINE ThreadState *cur_thread() {
 | 
			
		||||
| 
						 | 
				
			
			@ -552,6 +550,8 @@ struct Context {
 | 
			
		|||
  u64 int_alloc_siz[MBlockTypeCount];
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
extern Context *ctx;  // The one and the only global runtime context.
 | 
			
		||||
 | 
			
		||||
struct ScopedIgnoreInterceptors {
 | 
			
		||||
  ScopedIgnoreInterceptors() {
 | 
			
		||||
#ifndef TSAN_GO
 | 
			
		||||
| 
						 | 
				
			
			@ -584,7 +584,6 @@ class ScopedReport {
 | 
			
		|||
  const ReportDesc *GetReport() const;
 | 
			
		||||
 | 
			
		||||
 private:
 | 
			
		||||
  Context *ctx_;
 | 
			
		||||
  ReportDesc *rep_;
 | 
			
		||||
  // Symbolizer makes lots of intercepted calls. If we try to process them,
 | 
			
		||||
  // at best it will cause deadlocks on internal mutexes.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,13 +43,12 @@ struct Callback : DDCallback {
 | 
			
		|||
 | 
			
		||||
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
 | 
			
		||||
  Callback cb(thr, pc);
 | 
			
		||||
  CTX()->dd->MutexInit(&cb, &s->dd);
 | 
			
		||||
  ctx->dd->MutexInit(&cb, &s->dd);
 | 
			
		||||
  s->dd.ctx = s->GetId();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
 | 
			
		||||
                 bool rw, bool recursive, bool linker_init) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
 | 
			
		||||
  StatInc(thr, StatMutexCreate);
 | 
			
		||||
  if (!linker_init && IsAppMem(addr)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -66,7 +65,6 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
 | 
			
		||||
  StatInc(thr, StatMutexDestroy);
 | 
			
		||||
#ifndef TSAN_GO
 | 
			
		||||
| 
						 | 
				
			
			@ -109,7 +107,6 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
 | 
			
		||||
  CHECK_GT(rec, 0);
 | 
			
		||||
  if (IsAppMem(addr))
 | 
			
		||||
| 
						 | 
				
			
			@ -150,7 +147,6 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
 | 
			
		||||
  if (IsAppMem(addr))
 | 
			
		||||
    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
 | 
			
		||||
| 
						 | 
				
			
			@ -196,7 +192,6 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
 | 
			
		||||
  StatInc(thr, StatMutexReadLock);
 | 
			
		||||
  if (IsAppMem(addr))
 | 
			
		||||
| 
						 | 
				
			
			@ -226,7 +221,6 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
 | 
			
		||||
  StatInc(thr, StatMutexReadUnlock);
 | 
			
		||||
  if (IsAppMem(addr))
 | 
			
		||||
| 
						 | 
				
			
			@ -253,7 +247,6 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
 | 
			
		||||
  if (IsAppMem(addr))
 | 
			
		||||
    MemoryReadAtomic(thr, pc, addr, kSizeLog1);
 | 
			
		||||
| 
						 | 
				
			
			@ -298,7 +291,6 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
 | 
			
		||||
  s->owner_tid = SyncVar::kInvalidTid;
 | 
			
		||||
| 
						 | 
				
			
			@ -310,7 +302,7 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
  DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
 | 
			
		||||
  if (thr->ignore_sync)
 | 
			
		||||
    return;
 | 
			
		||||
  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
 | 
			
		||||
  AcquireImpl(thr, pc, &s->clock);
 | 
			
		||||
  s->mtx.ReadUnlock();
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -328,8 +320,8 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
 | 
			
		|||
  DPrintf("#%d: AcquireGlobal\n", thr->tid);
 | 
			
		||||
  if (thr->ignore_sync)
 | 
			
		||||
    return;
 | 
			
		||||
  ThreadRegistryLock l(CTX()->thread_registry);
 | 
			
		||||
  CTX()->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
  ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
  ctx->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
      UpdateClockCallback, thr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -337,7 +329,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
  DPrintf("#%d: Release %zx\n", thr->tid, addr);
 | 
			
		||||
  if (thr->ignore_sync)
 | 
			
		||||
    return;
 | 
			
		||||
  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
 | 
			
		||||
  thr->fast_state.IncrementEpoch();
 | 
			
		||||
  // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -349,7 +341,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
 | 
			
		|||
  DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
 | 
			
		||||
  if (thr->ignore_sync)
 | 
			
		||||
    return;
 | 
			
		||||
  SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
 | 
			
		||||
  thr->fast_state.IncrementEpoch();
 | 
			
		||||
  // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
  TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
 | 
			
		||||
| 
						 | 
				
			
			@ -372,8 +364,8 @@ void AfterSleep(ThreadState *thr, uptr pc) {
 | 
			
		|||
  if (thr->ignore_sync)
 | 
			
		||||
    return;
 | 
			
		||||
  thr->last_sleep_stack_id = CurrentStackId(thr, pc);
 | 
			
		||||
  ThreadRegistryLock l(CTX()->thread_registry);
 | 
			
		||||
  CTX()->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
  ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
  ctx->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
      UpdateSleepClockCallback, thr);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			@ -417,7 +409,6 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
 | 
			
		|||
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
 | 
			
		||||
  if (r == 0)
 | 
			
		||||
    return;
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
  ScopedReport rep(ReportTypeDeadlock);
 | 
			
		||||
  for (int i = 0; i < r->n; i++)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -148,18 +148,17 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
ScopedReport::ScopedReport(ReportType typ) {
 | 
			
		||||
  ctx_ = CTX();
 | 
			
		||||
  ctx_->thread_registry->CheckLocked();
 | 
			
		||||
  ctx->thread_registry->CheckLocked();
 | 
			
		||||
  void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
 | 
			
		||||
  rep_ = new(mem) ReportDesc;
 | 
			
		||||
  rep_->typ = typ;
 | 
			
		||||
  ctx_->report_mtx.Lock();
 | 
			
		||||
  ctx->report_mtx.Lock();
 | 
			
		||||
  CommonSanitizerReportMutex.Lock();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ScopedReport::~ScopedReport() {
 | 
			
		||||
  CommonSanitizerReportMutex.Unlock();
 | 
			
		||||
  ctx_->report_mtx.Unlock();
 | 
			
		||||
  ctx->report_mtx.Unlock();
 | 
			
		||||
  DestroyAndFree(rep_);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -206,7 +205,6 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
 | 
			
		|||
 | 
			
		||||
#ifndef TSAN_GO
 | 
			
		||||
static ThreadContext *FindThreadByUidLocked(int unique_id) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->CheckLocked();
 | 
			
		||||
  for (unsigned i = 0; i < kMaxTid; i++) {
 | 
			
		||||
    ThreadContext *tctx = static_cast<ThreadContext*>(
 | 
			
		||||
| 
						 | 
				
			
			@ -219,7 +217,6 @@ static ThreadContext *FindThreadByUidLocked(int unique_id) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static ThreadContext *FindThreadByTidLocked(int tid) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->CheckLocked();
 | 
			
		||||
  return static_cast<ThreadContext*>(
 | 
			
		||||
      ctx->thread_registry->GetThreadLocked(tid));
 | 
			
		||||
| 
						 | 
				
			
			@ -237,7 +234,6 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->CheckLocked();
 | 
			
		||||
  ThreadContext *tctx = static_cast<ThreadContext*>(
 | 
			
		||||
      ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
 | 
			
		||||
| 
						 | 
				
			
			@ -269,7 +265,7 @@ u64 ScopedReport::AddMutex(u64 id) {
 | 
			
		|||
  u64 uid = 0;
 | 
			
		||||
  u64 mid = id;
 | 
			
		||||
  uptr addr = SyncVar::SplitId(id, &uid);
 | 
			
		||||
  SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
 | 
			
		||||
  SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
 | 
			
		||||
  // Check that the mutex is still alive.
 | 
			
		||||
  // Another mutex can be created at the same address,
 | 
			
		||||
  // so check uid as well.
 | 
			
		||||
| 
						 | 
				
			
			@ -374,7 +370,6 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
 | 
			
		|||
  // This function restores stack trace and mutex set for the thread/epoch.
 | 
			
		||||
  // It does so by getting stack trace and mutex set at the beginning of
 | 
			
		||||
  // trace part, and then replaying the trace till the given epoch.
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->CheckLocked();
 | 
			
		||||
  ThreadContext *tctx = static_cast<ThreadContext*>(
 | 
			
		||||
      ctx->thread_registry->GetThreadLocked(tid));
 | 
			
		||||
| 
						 | 
				
			
			@ -439,7 +434,6 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
 | 
			
		|||
 | 
			
		||||
static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
 | 
			
		||||
    uptr addr_min, uptr addr_max) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  bool equal_stack = false;
 | 
			
		||||
  RacyStacks hash;
 | 
			
		||||
  if (flags()->suppress_equal_stacks) {
 | 
			
		||||
| 
						 | 
				
			
			@ -479,7 +473,6 @@ static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
 | 
			
		|||
 | 
			
		||||
static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2],
 | 
			
		||||
    uptr addr_min, uptr addr_max) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  if (flags()->suppress_equal_stacks) {
 | 
			
		||||
    RacyStacks hash;
 | 
			
		||||
    hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr));
 | 
			
		||||
| 
						 | 
				
			
			@ -584,7 +577,7 @@ static bool IsJavaNonsense(const ReportDesc *rep) {
 | 
			
		|||
          && frame->module == 0)) {
 | 
			
		||||
        if (frame) {
 | 
			
		||||
          FiredSuppression supp = {rep->typ, frame->pc, 0};
 | 
			
		||||
          CTX()->fired_suppressions.push_back(supp);
 | 
			
		||||
          ctx->fired_suppressions.push_back(supp);
 | 
			
		||||
        }
 | 
			
		||||
        return true;
 | 
			
		||||
      }
 | 
			
		||||
| 
						 | 
				
			
			@ -638,7 +631,6 @@ void ReportRace(ThreadState *thr) {
 | 
			
		|||
      return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ThreadRegistryLock l0(ctx->thread_registry);
 | 
			
		||||
 | 
			
		||||
  ReportType typ = ReportTypeRace;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -79,7 +79,6 @@ struct OnStartedArgs {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
void ThreadContext::OnStarted(void *arg) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
 | 
			
		||||
  thr = args->thr;
 | 
			
		||||
  // RoundUp so that one trace part does not contain events
 | 
			
		||||
| 
						 | 
				
			
			@ -123,7 +122,6 @@ void ThreadContext::OnStarted(void *arg) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void ThreadContext::OnFinished() {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  if (!detached) {
 | 
			
		||||
    thr->fast_state.IncrementEpoch();
 | 
			
		||||
    // Can't increment epoch w/o writing to the trace as well.
 | 
			
		||||
| 
						 | 
				
			
			@ -185,7 +183,7 @@ static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
static void ThreadCheckIgnore(ThreadState *thr) {
 | 
			
		||||
  if (CTX()->after_multithreaded_fork)
 | 
			
		||||
  if (ctx->after_multithreaded_fork)
 | 
			
		||||
    return;
 | 
			
		||||
  if (thr->ignore_reads_and_writes)
 | 
			
		||||
    ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
 | 
			
		||||
| 
						 | 
				
			
			@ -201,21 +199,20 @@ void ThreadFinalize(ThreadState *thr) {
 | 
			
		|||
#ifndef TSAN_GO
 | 
			
		||||
  if (!flags()->report_thread_leaks)
 | 
			
		||||
    return;
 | 
			
		||||
  ThreadRegistryLock l(CTX()->thread_registry);
 | 
			
		||||
  ThreadRegistryLock l(ctx->thread_registry);
 | 
			
		||||
  Vector<ThreadLeak> leaks(MBlockScopedBuf);
 | 
			
		||||
  CTX()->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
  ctx->thread_registry->RunCallbackForEachThreadLocked(
 | 
			
		||||
      MaybeReportThreadLeak, &leaks);
 | 
			
		||||
  for (uptr i = 0; i < leaks.Size(); i++) {
 | 
			
		||||
    ScopedReport rep(ReportTypeThreadLeak);
 | 
			
		||||
    rep.AddThread(leaks[i].tctx);
 | 
			
		||||
    rep.SetCount(leaks[i].count);
 | 
			
		||||
    OutputReport(CTX(), rep);
 | 
			
		||||
    OutputReport(ctx, rep);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int ThreadCount(ThreadState *thr) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  uptr result;
 | 
			
		||||
  ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
 | 
			
		||||
  return (int)result;
 | 
			
		||||
| 
						 | 
				
			
			@ -223,7 +220,6 @@ int ThreadCount(ThreadState *thr) {
 | 
			
		|||
 | 
			
		||||
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
 | 
			
		||||
  StatInc(thr, StatThreadCreate);
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  OnCreatedArgs args = { thr, pc };
 | 
			
		||||
  int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args);
 | 
			
		||||
  DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
 | 
			
		||||
| 
						 | 
				
			
			@ -232,7 +228,6 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  uptr stk_addr = 0;
 | 
			
		||||
  uptr stk_size = 0;
 | 
			
		||||
  uptr tls_addr = 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -283,7 +278,6 @@ void ThreadFinish(ThreadState *thr) {
 | 
			
		|||
  if (thr->tls_addr && thr->tls_size)
 | 
			
		||||
    DontNeedShadowFor(thr->tls_addr, thr->tls_size);
 | 
			
		||||
  thr->is_alive = false;
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->FinishThread(thr->tid);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -297,7 +291,6 @@ static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
 | 
			
		||||
  DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
 | 
			
		||||
  return res;
 | 
			
		||||
| 
						 | 
				
			
			@ -307,19 +300,17 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
 | 
			
		|||
  CHECK_GT(tid, 0);
 | 
			
		||||
  CHECK_LT(tid, kMaxTid);
 | 
			
		||||
  DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->JoinThread(tid, thr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
 | 
			
		||||
  CHECK_GT(tid, 0);
 | 
			
		||||
  CHECK_LT(tid, kMaxTid);
 | 
			
		||||
  Context *ctx = CTX();
 | 
			
		||||
  ctx->thread_registry->DetachThread(tid);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void ThreadSetName(ThreadState *thr, const char *name) {
 | 
			
		||||
  CTX()->thread_registry->SetThreadName(thr->tid, name);
 | 
			
		||||
  ctx->thread_registry->SetThreadName(thr->tid, name);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue