2012-11-22 23:03:11 +01:00
|
|
|
//===-- tsan_sync.cc ------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "sanitizer_common/sanitizer_placement_new.h"
|
|
|
|
#include "tsan_sync.h"
|
|
|
|
#include "tsan_rtl.h"
|
|
|
|
#include "tsan_mman.h"
|
|
|
|
|
|
|
|
namespace __tsan {
|
|
|
|
|
2014-05-22 09:09:21 +02:00
|
|
|
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
SyncVar::SyncVar()
|
|
|
|
: mtx(MutexTypeSyncVar, StatMtxSyncVar) {
|
|
|
|
Reset(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
|
|
|
|
this->addr = addr;
|
|
|
|
this->uid = uid;
|
|
|
|
this->next = 0;
|
|
|
|
|
|
|
|
creation_stack_id = 0;
|
2016-11-08 23:04:09 +01:00
|
|
|
if (!SANITIZER_GO) // Go does not use them
|
2014-09-23 19:59:53 +02:00
|
|
|
creation_stack_id = CurrentStackId(thr, pc);
|
|
|
|
if (common_flags()->detect_deadlocks)
|
|
|
|
DDMutexInit(thr, pc, this);
|
|
|
|
}
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
void SyncVar::Reset(Processor *proc) {
|
2014-09-23 19:59:53 +02:00
|
|
|
uid = 0;
|
|
|
|
creation_stack_id = 0;
|
|
|
|
owner_tid = kInvalidTid;
|
|
|
|
last_lock = 0;
|
|
|
|
recursion = 0;
|
|
|
|
is_rw = 0;
|
|
|
|
is_recursive = 0;
|
|
|
|
is_broken = 0;
|
|
|
|
is_linker_init = 0;
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
if (proc == 0) {
|
2014-09-23 19:59:53 +02:00
|
|
|
CHECK_EQ(clock.size(), 0);
|
|
|
|
CHECK_EQ(read_clock.size(), 0);
|
|
|
|
} else {
|
2016-11-08 23:04:09 +01:00
|
|
|
clock.Reset(&proc->clock_cache);
|
|
|
|
read_clock.Reset(&proc->clock_cache);
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
MetaMap::MetaMap() {
|
|
|
|
atomic_store(&uid_gen_, 0, memory_order_relaxed);
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
|
2016-11-08 23:04:09 +01:00
|
|
|
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
|
2014-09-23 19:59:53 +02:00
|
|
|
MBlock *b = block_alloc_.Map(idx);
|
|
|
|
b->siz = sz;
|
|
|
|
b->tid = thr->tid;
|
|
|
|
b->stk = CurrentStackId(thr, pc);
|
|
|
|
u32 *meta = MemToMeta(p);
|
|
|
|
DCHECK_EQ(*meta, 0);
|
|
|
|
*meta = idx | kFlagBlock;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
|
2014-09-23 19:59:53 +02:00
|
|
|
MBlock* b = GetBlock(p);
|
|
|
|
if (b == 0)
|
|
|
|
return 0;
|
|
|
|
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
|
2016-11-08 23:04:09 +01:00
|
|
|
FreeRange(proc, p, sz);
|
2014-09-23 19:59:53 +02:00
|
|
|
return sz;
|
|
|
|
}
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
|
2015-10-21 09:32:45 +02:00
|
|
|
bool has_something = false;
|
2014-09-23 19:59:53 +02:00
|
|
|
u32 *meta = MemToMeta(p);
|
|
|
|
u32 *end = MemToMeta(p + sz);
|
|
|
|
if (end == meta)
|
|
|
|
end++;
|
|
|
|
for (; meta < end; meta++) {
|
|
|
|
u32 idx = *meta;
|
2015-10-21 09:32:45 +02:00
|
|
|
if (idx == 0) {
|
|
|
|
// Note: don't write to meta in this case -- the block can be huge.
|
|
|
|
continue;
|
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
*meta = 0;
|
2015-10-21 09:32:45 +02:00
|
|
|
has_something = true;
|
|
|
|
while (idx != 0) {
|
2014-09-23 19:59:53 +02:00
|
|
|
if (idx & kFlagBlock) {
|
2016-11-08 23:04:09 +01:00
|
|
|
block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
|
2014-09-23 19:59:53 +02:00
|
|
|
break;
|
|
|
|
} else if (idx & kFlagSync) {
|
|
|
|
DCHECK(idx & kFlagSync);
|
|
|
|
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
|
|
|
|
u32 next = s->next;
|
2016-11-08 23:04:09 +01:00
|
|
|
s->Reset(proc);
|
|
|
|
sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
|
2014-09-23 19:59:53 +02:00
|
|
|
idx = next;
|
|
|
|
} else {
|
|
|
|
CHECK(0);
|
|
|
|
}
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
}
|
2015-10-21 09:32:45 +02:00
|
|
|
return has_something;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResetRange removes all meta objects from the range.
|
|
|
|
// It is called for large mmap-ed regions. The function is best-effort wrt
|
|
|
|
// freeing of meta objects, because we don't want to page in the whole range
|
|
|
|
// which can be huge. The function probes pages one-by-one until it finds a page
|
|
|
|
// without meta objects, at this point it stops freeing meta objects. Because
|
|
|
|
// thread stacks grow top-down, we do the same starting from end as well.
|
2016-11-08 23:04:09 +01:00
|
|
|
void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
|
|
|
|
if (SANITIZER_GO) {
|
|
|
|
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
|
|
|
|
// so we do the optimization only for C/C++.
|
|
|
|
FreeRange(proc, p, sz);
|
|
|
|
return;
|
|
|
|
}
|
2015-10-21 09:32:45 +02:00
|
|
|
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
|
|
|
|
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
|
|
|
|
if (sz <= 4 * kPageSize) {
|
|
|
|
// If the range is small, just do the normal free procedure.
|
2016-11-08 23:04:09 +01:00
|
|
|
FreeRange(proc, p, sz);
|
2015-10-21 09:32:45 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// First, round both ends of the range to page size.
|
|
|
|
uptr diff = RoundUp(p, kPageSize) - p;
|
|
|
|
if (diff != 0) {
|
2016-11-08 23:04:09 +01:00
|
|
|
FreeRange(proc, p, diff);
|
2015-10-21 09:32:45 +02:00
|
|
|
p += diff;
|
|
|
|
sz -= diff;
|
|
|
|
}
|
|
|
|
diff = p + sz - RoundDown(p + sz, kPageSize);
|
|
|
|
if (diff != 0) {
|
2016-11-08 23:04:09 +01:00
|
|
|
FreeRange(proc, p + sz - diff, diff);
|
2015-10-21 09:32:45 +02:00
|
|
|
sz -= diff;
|
|
|
|
}
|
|
|
|
// Now we must have a non-empty page-aligned range.
|
|
|
|
CHECK_GT(sz, 0);
|
|
|
|
CHECK_EQ(p, RoundUp(p, kPageSize));
|
|
|
|
CHECK_EQ(sz, RoundUp(sz, kPageSize));
|
|
|
|
const uptr p0 = p;
|
|
|
|
const uptr sz0 = sz;
|
|
|
|
// Probe start of the range.
|
2016-11-08 23:04:09 +01:00
|
|
|
for (uptr checked = 0; sz > 0; checked += kPageSize) {
|
|
|
|
bool has_something = FreeRange(proc, p, kPageSize);
|
2015-10-21 09:32:45 +02:00
|
|
|
p += kPageSize;
|
|
|
|
sz -= kPageSize;
|
2016-11-08 23:04:09 +01:00
|
|
|
if (!has_something && checked > (128 << 10))
|
2015-10-21 09:32:45 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Probe end of the range.
|
2016-11-08 23:04:09 +01:00
|
|
|
for (uptr checked = 0; sz > 0; checked += kPageSize) {
|
|
|
|
bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
|
2015-10-21 09:32:45 +02:00
|
|
|
sz -= kPageSize;
|
2016-11-08 23:04:09 +01:00
|
|
|
// Stacks grow down, so sync object are most likely at the end of the region
|
|
|
|
// (if it is a stack). The very end of the stack is TLS and tsan increases
|
|
|
|
// TLS by at least 256K, so check at least 512K.
|
|
|
|
if (!has_something && checked > (512 << 10))
|
2015-10-21 09:32:45 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Finally, page out the whole range (including the parts that we've just
|
|
|
|
// freed). Note: we can't simply madvise, because we need to leave a zeroed
|
|
|
|
// range (otherwise __tsan_java_move can crash if it encounters a left-over
|
|
|
|
// meta objects in java heap).
|
|
|
|
uptr metap = (uptr)MemToMeta(p0);
|
|
|
|
uptr metasz = sz0 / kMetaRatio;
|
|
|
|
UnmapOrDie((void*)metap, metasz);
|
|
|
|
MmapFixedNoReserve(metap, metasz);
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
MBlock* MetaMap::GetBlock(uptr p) {
|
|
|
|
u32 *meta = MemToMeta(p);
|
|
|
|
u32 idx = *meta;
|
|
|
|
for (;;) {
|
|
|
|
if (idx == 0)
|
|
|
|
return 0;
|
|
|
|
if (idx & kFlagBlock)
|
|
|
|
return block_alloc_.Map(idx & ~kFlagMask);
|
|
|
|
DCHECK(idx & kFlagSync);
|
|
|
|
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
|
|
|
|
idx = s->next;
|
|
|
|
}
|
2013-01-10 13:44:08 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
|
|
|
|
uptr addr, bool write_lock) {
|
|
|
|
return GetAndLock(thr, pc, addr, write_lock, true);
|
2013-01-10 13:44:08 +01:00
|
|
|
}
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
|
|
|
|
return GetAndLock(0, 0, addr, write_lock, false);
|
2013-01-10 13:44:08 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
|
2013-01-10 13:44:08 +01:00
|
|
|
uptr addr, bool write_lock, bool create) {
|
2014-09-23 19:59:53 +02:00
|
|
|
u32 *meta = MemToMeta(addr);
|
|
|
|
u32 idx0 = *meta;
|
|
|
|
u32 myidx = 0;
|
|
|
|
SyncVar *mys = 0;
|
|
|
|
for (;;) {
|
|
|
|
u32 idx = idx0;
|
|
|
|
for (;;) {
|
|
|
|
if (idx == 0)
|
2012-11-22 23:03:11 +01:00
|
|
|
break;
|
2014-09-23 19:59:53 +02:00
|
|
|
if (idx & kFlagBlock)
|
|
|
|
break;
|
|
|
|
DCHECK(idx & kFlagSync);
|
|
|
|
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
|
|
|
|
if (s->addr == addr) {
|
|
|
|
if (myidx != 0) {
|
2016-11-08 23:04:09 +01:00
|
|
|
mys->Reset(thr->proc());
|
|
|
|
sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
|
2014-09-23 19:59:53 +02:00
|
|
|
}
|
2012-11-22 23:03:11 +01:00
|
|
|
if (write_lock)
|
2014-09-23 19:59:53 +02:00
|
|
|
s->mtx.Lock();
|
2012-11-22 23:03:11 +01:00
|
|
|
else
|
2014-09-23 19:59:53 +02:00
|
|
|
s->mtx.ReadLock();
|
|
|
|
return s;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
idx = s->next;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
if (!create)
|
|
|
|
return 0;
|
|
|
|
if (*meta != idx0) {
|
|
|
|
idx0 = *meta;
|
|
|
|
continue;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
if (myidx == 0) {
|
|
|
|
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
|
2016-11-08 23:04:09 +01:00
|
|
|
myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
|
2014-09-23 19:59:53 +02:00
|
|
|
mys = sync_alloc_.Map(myidx);
|
|
|
|
mys->Init(thr, pc, addr, uid);
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
mys->next = idx0;
|
|
|
|
if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
|
|
|
|
myidx | kFlagSync, memory_order_release)) {
|
|
|
|
if (write_lock)
|
|
|
|
mys->mtx.Lock();
|
|
|
|
else
|
|
|
|
mys->mtx.ReadLock();
|
|
|
|
return mys;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-23 19:59:53 +02:00
|
|
|
void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
|
|
|
|
// src and dst can overlap,
|
|
|
|
// there are no concurrent accesses to the regions (e.g. stop-the-world).
|
|
|
|
CHECK_NE(src, dst);
|
|
|
|
CHECK_NE(sz, 0);
|
|
|
|
uptr diff = dst - src;
|
|
|
|
u32 *src_meta = MemToMeta(src);
|
|
|
|
u32 *dst_meta = MemToMeta(dst);
|
|
|
|
u32 *src_meta_end = MemToMeta(src + sz);
|
|
|
|
uptr inc = 1;
|
|
|
|
if (dst > src) {
|
|
|
|
src_meta = MemToMeta(src + sz) - 1;
|
|
|
|
dst_meta = MemToMeta(dst + sz) - 1;
|
|
|
|
src_meta_end = MemToMeta(src) - 1;
|
|
|
|
inc = -1;
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
2014-09-23 19:59:53 +02:00
|
|
|
for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
|
|
|
|
CHECK_EQ(*dst_meta, 0);
|
|
|
|
u32 idx = *src_meta;
|
|
|
|
*src_meta = 0;
|
|
|
|
*dst_meta = idx;
|
|
|
|
// Patch the addresses in sync objects.
|
|
|
|
while (idx != 0) {
|
|
|
|
if (idx & kFlagBlock)
|
|
|
|
break;
|
|
|
|
CHECK(idx & kFlagSync);
|
|
|
|
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
|
|
|
|
s->addr += diff;
|
|
|
|
idx = s->next;
|
2013-12-05 10:18:38 +01:00
|
|
|
}
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-08 23:04:09 +01:00
|
|
|
void MetaMap::OnProcIdle(Processor *proc) {
|
|
|
|
block_alloc_.FlushCache(&proc->block_cache);
|
|
|
|
sync_alloc_.FlushCache(&proc->sync_cache);
|
2012-11-22 23:03:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace __tsan
|