libsanitizer merge from upstream r196090

From-SVN: r205695
This commit is contained in:
Kostya Serebryany 2013-12-05 09:18:38 +00:00 committed by Kostya Serebryany
parent 649d196dbd
commit df77f0e4ec
130 changed files with 4538 additions and 1642 deletions

View File

@ -1,3 +1,8 @@
2013-12-05 Kostya Serebryany <kcc@google.com>
* c-c++-common/asan/null-deref-1.c: Update the test
to match the fresh asan run-time.
2013-12-05 Richard Biener <rguenther@suse.de>
PR tree-optimization/59374

View File

@ -18,6 +18,5 @@ int main()
/* { dg-output "ERROR: AddressSanitizer:? SEGV on unknown address\[^\n\r]*" } */
/* { dg-output "0x\[0-9a-f\]+ \[^\n\r]*pc 0x\[0-9a-f\]+\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*AddressSanitizer can not provide additional info.*(\n|\r\n|\r)" } */
/* { dg-output " #0 0x\[0-9a-f\]+ (in \[^\n\r]*NullDeref\[^\n\r]* (\[^\n\r]*null-deref-1.c:10|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #1 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*null-deref-1.c:15|\[^\n\r]*:0)|\[(\])\[^\n\r]*(\n|\r\n|\r)" } */

View File

@ -1,3 +1,13 @@
2013-12-05 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r196090.
* tsan/Makefile.am (tsan_files): Added new files.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new fles.
* sanitizer_common/Makefile.in: Regenerate.
* lsan/Makefile.am (lsan_files): Added new files.
* lsan/Makefile.in: Regenerate.
2013-11-29 Jakub Jelinek <jakub@redhat.com>
Yury Gribov <y.gribov@samsung.com>

View File

@ -1,4 +1,4 @@
191666
196090
The first line of this file holds the svn revision number of the
last merge done from the master library sources.

View File

@ -33,10 +33,11 @@ void InitializeAllocator();
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid() { return chunk_ != 0; }
uptr Beg(); // first byte of user memory.
uptr End(); // last byte of user memory.
uptr UsedSize(); // size requested by the user.
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
// or quarantined chunk.
uptr Beg(); // First byte of user memory.
uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
void GetAllocStack(StackTrace *stack);
@ -88,16 +89,12 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
};
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
AsanThreadLocalMallocStorage() {}
};
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
@ -112,7 +109,7 @@ void *asan_pvalloc(uptr size, StackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();

View File

@ -92,7 +92,7 @@ AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
static Allocator allocator;
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
@ -184,14 +184,19 @@ COMPILER_CHECK(kChunkHeader2Size <= 16);
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize() {
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
return *reinterpret_cast<uptr *>(
allocator.GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg() {
if (from_memalign)
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return allocator.GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
// If we don't use stack depot, we store the alloc/free stack traces
@ -211,11 +216,14 @@ struct AsanChunk: ChunkBase {
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
return (available - kChunkHeader2Size) / sizeof(u32);
}
bool AddrIsInside(uptr addr) {
return (addr >= Beg()) && (addr < Beg() + UsedSize());
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
bool AsanChunkView::IsValid() {
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
@ -226,25 +234,16 @@ static void GetStackTraceFromId(u32 id, StackTrace *stack) {
CHECK(id);
uptr size = 0;
const uptr *trace = StackDepotGet(id, &size);
CHECK_LT(size, kStackTraceMax);
internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
stack->size = size;
CHECK(trace);
stack->CopyFrom(trace, size);
}
void AsanChunkView::GetAllocStack(StackTrace *stack) {
if (flags()->use_stack_depot)
GetStackTraceFromId(chunk_->alloc_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
chunk_->AllocStackSize());
GetStackTraceFromId(chunk_->alloc_context_id, stack);
}
void AsanChunkView::GetFreeStack(StackTrace *stack) {
if (flags()->use_stack_depot)
GetStackTraceFromId(chunk_->free_context_id, stack);
else
StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
chunk_->FreeStackSize());
GetStackTraceFromId(chunk_->free_context_id, stack);
}
struct QuarantineCallback;
@ -390,12 +389,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg;
}
if (fl.use_stack_depot) {
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->alloc_context_id = 0;
StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
}
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@ -404,7 +398,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = size & (SHADOW_GRANULARITY - 1);
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
@ -463,12 +457,7 @@ static void QuarantineChunk(AsanChunk *m, void *ptr,
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
if (flags()->use_stack_depot) {
m->free_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->free_context_id = 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
}
m->free_context_id = StackDepotPut(stack->trace, stack->size);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
@ -673,12 +662,13 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
CHECK(stack);
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
if (ptr == 0) return 0;
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0))
ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size;
}
@ -718,7 +708,8 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
return 0;
}
@ -751,7 +742,7 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize();
return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {

View File

@ -130,6 +130,8 @@ extern "C" {
}
}
WRAP_V_V(__asan_handle_no_return)
WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4)

View File

@ -43,7 +43,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
FakeStack *res = reinterpret_cast<FakeStack *>(
MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
res->stack_size_log_ = stack_size_log;
if (flags()->verbosity) {
if (common_flags()->verbosity) {
u8 *p = reinterpret_cast<u8 *>(res);
Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
GetCurrentTidOrInvalid(), p,
@ -132,6 +132,20 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
needs_gc_ = false;
}
void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
uptr begin = reinterpret_cast<uptr>(ff);
callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
}
}
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID
static THREADLOCAL FakeStack *fake_stack_tls;

View File

@ -146,6 +146,8 @@ class FakeStack {
void HandleNoReturn();
void GC(uptr real_stack);
void ForEachFakeFrame(RangeIteratorCallback callback, void *arg);
private:
FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is were the flags begin.

View File

@ -30,8 +30,6 @@ struct Flags {
// Lower value may reduce memory usage but increase the chance of
// false negatives.
int quarantine_size;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Size (in bytes) of redzones around heap objects.
// Requirement: redzone >= 32, is a power of two.
int redzone;
@ -83,6 +81,9 @@ struct Flags {
bool print_legend;
// If set, prints ASan exit stats even after program terminates successfully.
bool atexit;
// If set, coverage information will be dumped at shutdown time if the
// appropriate instrumentation was enabled.
bool coverage;
// By default, disable core dumper on 64-bit - it makes little sense
// to dump 16T+ core.
bool disable_core;
@ -96,10 +97,11 @@ struct Flags {
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
// If true, poison partially addressable 8-byte aligned words (default=true).
// This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2.
bool strict_memcmp;

View File

@ -92,15 +92,13 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l =
(ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
if (dynamic_init_globals == 0) {
void *mem = allocator_for_globals.Allocate(sizeof(VectorOfGlobals));
dynamic_init_globals = new(mem)
dynamic_init_globals = new(allocator_for_globals)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };

View File

@ -92,6 +92,11 @@ void SetThreadName(const char *name) {
asanThreadRegistry().SetThreadName(t->tid(), name);
}
int OnExit() {
// FIXME: ask frontend whether we need to return failure.
return 0;
}
} // namespace __asan
// ---------------------- Wrappers ---------------- {{{1
@ -100,6 +105,19 @@ using namespace __asan; // NOLINT
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \
do { \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
common_flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
do { \
} while (false)
@ -124,16 +142,28 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
do { \
} while (false)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name)
// But asan does not remember UserId's for threads (pthread_t);
// and remembers all ever existed threads, so the linear search by UserId
// can be slow.
#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
(void)(p); \
(void)(s); \
} while (false)
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
do { \
(void)(p); \
(void)(s); \
} while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
@ -144,8 +174,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
extern "C" int pthread_attr_getdetachstate(void *attr, int *v);
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
@ -155,7 +183,7 @@ INTERCEPTOR(int, pthread_create, void *thread,
GET_STACK_TRACE_THREAD;
int detached = 0;
if (attr != 0)
pthread_attr_getdetachstate(attr, &detached);
REAL(pthread_attr_getdetachstate)(attr, &detached);
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(start_routine, arg);
@ -256,7 +284,7 @@ static void MlockIsUnsupported() {
static bool printed = false;
if (printed) return;
printed = true;
if (flags()->verbosity > 0) {
if (common_flags()->verbosity > 0) {
Printf("INFO: AddressSanitizer ignores "
"mlock/mlockall/munlock/munlockall\n");
}
@ -645,16 +673,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
}
#endif // ASAN_INTERCEPT___CXA_ATEXIT
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) do { \
if (!INTERCEPT_FUNCTION(name) && flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
@ -767,7 +785,7 @@ void InitializeAsanInterceptors() {
InitializeWindowsInterceptors();
#endif
if (flags()->verbosity > 0) {
if (common_flags()->verbosity > 0) {
Report("AddressSanitizer: libc interceptors initialized\n");
}
}

View File

@ -96,6 +96,7 @@ void StopInitOrderChecking();
void AsanTSDInit(void (*destructor)(void *tsd));
void *AsanTSDGet();
void AsanTSDSet(void *tsd);
void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
@ -133,6 +134,7 @@ const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;
const int kAsanContiguousContainerOOBMagic = 0xfc;
const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe;

View File

@ -56,6 +56,12 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
# elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
# elif defined(__x86_64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];

View File

@ -172,7 +172,7 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
Report("exec()-ing the program with\n");
Report("%s=%s\n", kDyldInsertLibraries, new_env);
Report("to enable ASan wrappers.\n");
@ -309,7 +309,7 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
if (flags()->verbosity >= 2) {
if (common_flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
@ -344,7 +344,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (flags()->verbosity >= 2) { \
if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
@ -362,7 +362,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
if (common_flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
@ -375,7 +375,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
if (common_flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();

View File

@ -103,8 +103,9 @@ INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign");
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
// We avoid including malloc.h for portability reasons.

View File

@ -96,8 +96,9 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
SANITIZER_INTERFACE_ATTRIBUTE
size_t _msize(void *ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
int _CrtDbgReport(int, const char*, int,

View File

@ -12,6 +12,7 @@
#include "asan_poisoning.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@ -66,7 +67,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
Printf("Trying to poison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
@ -108,7 +109,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
Printf("Trying to unpoison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
@ -242,13 +243,57 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
if (flags()->verbosity > 0)
if (common_flags()->verbosity > 0)
Report("poisoning: %p %zx\n", (void*)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
if (flags()->verbosity > 0)
if (common_flags()->verbosity > 0)
Report("unpoisoning: %p %zx\n", (void*)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
if (common_flags()->verbosity >= 2)
Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end= reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity));
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
if (d1 != d2)
CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
if (d2 + granularity <= c && c <= end)
CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
// New state:
// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
PoisonShadow(a, b1 - a, 0);
PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
if (b1 != b2) {
CHECK_EQ(b2 - b1, granularity);
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}

View File

@ -41,6 +41,7 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap);
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
@ -49,7 +50,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
// first size-i bytes are addressable
*shadow = static_cast<u8>(size - i);
*shadow = poison_partial ? static_cast<u8>(size - i) : 0;
}
}
}

View File

@ -1,4 +1,4 @@
//===-- asan_linux.cc -----------------------------------------------------===//
//===-- asan_posix.cc -----------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@ -42,7 +42,7 @@ static void MaybeInstallSigaction(int signum,
sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum);
}
}
@ -69,7 +69,7 @@ void SetAlternateSignalStack() {
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, 0));
if (flags()->verbosity > 0) {
if (common_flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n",
GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
@ -114,6 +114,15 @@ void AsanTSDSet(void *tsd) {
pthread_setspecific(tsd_key, tsd);
}
void PlatformTSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (context->destructor_iterations > 1) {
context->destructor_iterations--;
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
AsanThread::TSDDtor(tsd);
}
} // namespace __asan
#endif // SANITIZER_LINUX || SANITIZER_MAC

View File

@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@ -71,6 +72,7 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
case kAsanInitializationOrderMagic:
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
@ -117,19 +119,21 @@ static void PrintLegend() {
for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
PrintShadowByte("", i, " ");
Printf("\n");
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
PrintShadowByte(" Contiguous container OOB:",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
}
static void PrintShadowMemoryForAddress(uptr addr) {
@ -178,8 +182,8 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
return (name[0] == '_' && name[1] == 'Z' && &getSymbolizer)
? getSymbolizer()->Demangle(name)
return (name[0] == '_' && name[1] == 'Z')
? Symbolizer::Get()->Demangle(name)
: name;
}
@ -412,7 +416,11 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
void DescribeHeapAddress(uptr addr, uptr access_size) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return;
if (!chunk.IsValid()) {
Printf("AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
return;
}
DescribeAccessToHeapChunk(chunk, addr, access_size);
CHECK(chunk.AllocTid() != kInvalidTid);
asanThreadRegistry().CheckLocked();
@ -479,7 +487,9 @@ void DescribeThread(AsanThreadContext *context) {
context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid,
tname, sizeof(tname)));
PrintStack(&context->stack);
uptr stack_size;
const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
PrintStack(stack_trace, stack_size);
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@ -540,22 +550,6 @@ class ScopedInErrorReport {
}
};
static void ReportSummary(const char *error_type, StackTrace *stack) {
if (!stack->size) return;
if (&getSymbolizer && getSymbolizer()->IsAvailable()) {
AddressInfo ai;
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
getSymbolizer()->SymbolizeCode(pc, &ai, 1);
ReportErrorSummary(error_type,
StripPathPrefix(ai.file,
common_flags()->strip_path_prefix),
ai.line, ai.function);
}
// FIXME: do we need to print anything at all if there is no symbolizer?
}
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
@ -565,13 +559,13 @@ void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
Printf("AddressSanitizer can not provide additional info.\n");
GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
ReportSummary("SEGV", &stack);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
}
void ReportDoubleFree(uptr addr, StackTrace *stack) {
void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -581,14 +575,15 @@ void ReportDoubleFree(uptr addr, StackTrace *stack) {
"thread T%d%s:\n",
addr, curr_tid,
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
PrintStack(stack);
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1);
ReportSummary("double-free", stack);
ReportErrorSummary("double-free", &stack);
}
void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
@ -598,12 +593,14 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
"which was not malloc()-ed: %p in thread T%d%s\n", addr,
curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
PrintStack(stack);
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1);
ReportSummary("bad-free", stack);
ReportErrorSummary("bad-free", &stack);
}
void ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
static const char *alloc_names[] =
@ -617,9 +614,11 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
Printf("%s", d.EndWarning());
PrintStack(stack);
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
DescribeHeapAddress(addr, 1);
ReportSummary("alloc-dealloc-mismatch", stack);
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
@ -634,7 +633,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
ReportSummary("bad-malloc_usable_size", stack);
ReportErrorSummary("bad-malloc_usable_size", stack);
}
void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
@ -647,7 +646,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
ReportSummary("bad-__asan_get_allocated_size", stack);
ReportErrorSummary("bad-__asan_get_allocated_size", stack);
}
void ReportStringFunctionMemoryRangesOverlap(
@ -665,7 +664,7 @@ void ReportStringFunctionMemoryRangesOverlap(
PrintStack(stack);
DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2);
ReportSummary(bug_type, stack);
ReportErrorSummary(bug_type, stack);
}
// ----------------------- Mac-specific reports ----------------- {{{1
@ -747,6 +746,9 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
case kAsanUserPoisonedMemoryMagic:
bug_descr = "use-after-poison";
break;
case kAsanContiguousContainerOOBMagic:
bug_descr = "container-overflow";
break;
case kAsanStackUseAfterScopeMagic:
bug_descr = "stack-use-after-scope";
break;
@ -775,7 +777,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
PrintStack(&stack);
DescribeAddress(addr, access_size);
ReportSummary(bug_descr, &stack);
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
}

View File

@ -31,9 +31,9 @@ void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,

View File

@ -49,6 +49,8 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
}
}
if (flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
if (flags()->abort_on_error)
@ -86,11 +88,11 @@ static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
}
static void ParseFlagsFromString(Flags *f, const char *str) {
ParseCommonFlagsFromString(str);
CHECK((uptr)common_flags()->malloc_context_size <= kStackTraceMax);
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->redzone, "redzone");
CHECK_GE(f->redzone, 16);
CHECK(IsPowerOfTwo(f->redzone));
@ -119,32 +121,25 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->print_stats, "print_stats");
ParseFlag(str, &f->print_legend, "print_legend");
ParseFlag(str, &f->atexit, "atexit");
ParseFlag(str, &f->coverage, "coverage");
ParseFlag(str, &f->disable_core, "disable_core");
ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->poison_partial, "poison_partial");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
ParseFlag(str, &f->strict_init_order, "strict_init_order");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->malloc_context_size = kDefaultMallocContextSize;
cf->fast_unwind_on_fatal = false;
cf->fast_unwind_on_malloc = true;
cf->strip_path_prefix = "";
cf->handle_ioctl = false;
cf->log_path = 0;
cf->detect_leaks = false;
cf->leak_check_at_exit = true;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->verbosity = 0;
f->redzone = 16;
f->debug = false;
f->report_globals = 1;
@ -168,14 +163,15 @@ void InitializeFlags(Flags *f, const char *env) {
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
f->coverage = false;
f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->use_stack_depot = true;
f->strict_memcmp = true;
f->strict_init_order = false;
@ -184,7 +180,7 @@ void InitializeFlags(Flags *f, const char *env) {
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
if (flags()->verbosity) {
if (cf->verbosity) {
Report("Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions());
}
@ -200,10 +196,10 @@ void InitializeFlags(Flags *f, const char *env) {
}
#endif
if (cf->detect_leaks && !f->use_stack_depot) {
Report("%s: detect_leaks is ignored (requires use_stack_depot).\n",
SanitizerToolName);
cf->detect_leaks = false;
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
}
@ -462,7 +458,7 @@ void __asan_init() {
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
if (flags()->verbosity && options) {
if (common_flags()->verbosity && options) {
Report("Parsed ASAN_OPTIONS: %s\n", options);
}
@ -472,11 +468,6 @@ void __asan_init() {
// Setup internal allocator callback.
SetLowLevelAllocateCallback(OnLowLevelAllocate);
if (flags()->atexit) {
Atexit(asan_atexit);
}
// interceptors
InitializeAsanInterceptors();
ReplaceSystemMalloc();
@ -495,7 +486,7 @@ void __asan_init() {
}
#endif
if (flags()->verbosity)
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
if (flags()->disable_core) {
@ -531,17 +522,18 @@ void __asan_init() {
Die();
}
AsanTSDInit(PlatformTSDDtor);
InstallSignalHandlers();
AsanTSDInit(AsanThread::TSDDtor);
// Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator.
InitializeAllocator();
// Start symbolizer process if necessary.
if (common_flags()->symbolize && &getSymbolizer) {
getSymbolizer()
->InitializeExternal(common_flags()->external_symbolizer_path);
if (common_flags()->symbolize) {
Symbolizer::Init(common_flags()->external_symbolizer_path);
} else {
Symbolizer::Disable();
}
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
@ -549,6 +541,13 @@ void __asan_init() {
asan_inited = 1;
asan_init_is_running = false;
if (flags()->atexit)
Atexit(asan_atexit);
if (flags()->coverage)
Atexit(__sanitizer_cov_dump);
// interceptors
InitTlsSize();
// Create main thread.
@ -568,7 +567,7 @@ void __asan_init() {
}
#endif // CAN_SANITIZE_LEAKS
if (flags()->verbosity) {
if (common_flags()->verbosity) {
Report("AddressSanitizer Init done\n");
}
}

View File

@ -22,9 +22,12 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
: false;
}
void PrintStack(const uptr *trace, uptr size) {
StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
}
void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, common_flags()->symbolize,
common_flags()->strip_path_prefix, MaybeCallAsanSymbolize);
PrintStack(stack->trace, stack->size);
}
} // namespace __asan

View File

@ -20,6 +20,7 @@
namespace __asan {
void PrintStack(StackTrace *stack);
void PrintStack(const uptr *trace, uptr size);
} // namespace __asan
@ -29,19 +30,24 @@ void PrintStack(StackTrace *stack);
#if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, 0, 0, fast)
stack.Unwind(max_s, pc, bp, 0, 0, fast)
#else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited && (t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
GetStackTrace(&stack, max_s, pc, bp, stack_top, stack_bottom, fast); \
} \
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited) { \
if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
} else if (t == 0 && !fast) { \
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
stack.Unwind(max_s, pc, bp, 0, 0, false); \
} \
} \
}
#endif // SANITIZER_WINDOWS

View File

@ -45,9 +45,9 @@ struct AsanStats {
uptr malloc_large;
uptr malloc_small_slow;
// Ctor for global AsanStats (accumulated stats and main thread stats).
// Ctor for global AsanStats (accumulated stats for dead threads).
explicit AsanStats(LinkerInitialized) { }
// Default ctor for thread-local stats.
// Creates empty stats.
AsanStats();
void Print(); // Prints formatted stats to stderr.

View File

@ -17,6 +17,7 @@
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "lsan/lsan_common.h"
namespace __asan {
@ -25,9 +26,8 @@ namespace __asan {
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack) {
internal_memcpy(&stack, args->stack, sizeof(stack));
}
if (args->stack)
stack_id = StackDepotPut(args->stack->trace, args->stack->size);
thread = args->thread;
thread->set_context(this);
}
@ -41,9 +41,12 @@ void AsanThreadContext::OnFinished() {
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry;
static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(AsanThreadContext), "AsanThreadContext");
return new(mem) AsanThreadContext(tid);
BlockingMutexLock lock(&mu_for_thread_context);
return new(allocator_for_thread_context) AsanThreadContext(tid);
}
ThreadRegistry &asanThreadRegistry() {
@ -76,24 +79,25 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
thread->context_ = 0;
return thread;
}
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (flags()->verbosity >= 1)
if (common_flags()->verbosity >= 1)
Report("T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
Report("T%d exited\n", tid());
}
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid());
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
@ -136,7 +140,7 @@ void AsanThread::Init() {
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
if (flags()->verbosity >= 1) {
if (common_flags()->verbosity >= 1) {
int local = 0;
Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
tid(), (void*)stack_bottom_, (void*)stack_top_,
@ -160,10 +164,14 @@ thread_return_t AsanThread::ThreadStart(uptr os_id) {
}
thread_return_t res = start_routine_(arg_);
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
this->Destroy();
// On POSIX systems we defer this to the TSD destructor. LSan will consider
// the thread's memory as non-live from the moment we call Destroy(), even
// though that memory might contain pointers to heap objects which will be
// cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
// the TSD destructors have run might cause false positives in LSan.
if (!SANITIZER_POSIX)
this->Destroy();
return res;
}
@ -257,7 +265,7 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
if (flags()->verbosity >= 2) {
if (common_flags()->verbosity >= 2) {
Report("SetCurrentThread: %p for thread %p\n",
t->context(), (void*)GetThreadSelf());
}
@ -286,6 +294,13 @@ void EnsureMainThreadIDIsCorrect() {
if (context && (context->tid == 0))
context->os_id = GetTid();
}
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return 0;
return context->thread;
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
@ -293,10 +308,7 @@ namespace __lsan {
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
__asan::AsanThread *t = context->thread;
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t) return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
@ -308,6 +320,13 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
return true;
}
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (t && t->has_fake_stack())
t->fake_stack()->ForEachFakeFrame(callback, arg);
}
void LockThreadRegistry() {
__asan::asanThreadRegistry().Lock();
}

View File

@ -17,6 +17,7 @@
#include "asan_fake_stack.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
@ -34,11 +35,13 @@ class AsanThreadContext : public ThreadContextBase {
explicit AsanThreadContext(int tid)
: ThreadContextBase(tid),
announced(false),
destructor_iterations(kPthreadDestructorIterations),
stack_id(0),
thread(0) {
internal_memset(&stack, 0, sizeof(stack));
}
bool announced;
StackTrace stack;
u8 destructor_iterations;
u32 stack_id;
AsanThread *thread;
void OnCreated(void *arg);
@ -46,7 +49,7 @@ class AsanThreadContext : public ThreadContextBase {
};
// AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 4096);
COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
@ -96,14 +99,15 @@ class AsanThread {
// True is this thread is currently unwinding stack (i.e. collecting a stack
// trace). Used to prevent deadlocks on platforms where libc unwinder calls
// malloc internally. See PR17116 for more details.
bool isUnwinding() const { return unwinding; }
void setUnwinding(bool b) { unwinding = b; }
bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
private:
AsanThread() : unwinding(false) {}
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
@ -111,18 +115,18 @@ class AsanThread {
AsanThreadContext *context_;
thread_callback_t start_routine_;
void *arg_;
uptr stack_top_;
uptr stack_bottom_;
uptr stack_top_;
uptr stack_bottom_;
// stack_size_ == stack_top_ - stack_bottom_;
// It needs to be set in a async-signal-safe manner.
uptr stack_size_;
uptr stack_size_;
uptr tls_begin_;
uptr tls_end_;
FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding;
bool unwinding_;
};
// ScopedUnwinding is a scope for stacktracing member of a context

View File

@ -58,6 +58,9 @@ void AsanTSDSet(void *tsd) {
fake_tsd = tsd;
}
void PlatformTSDDtor(void *tsd) {
AsanThread::TSDDtor(tsd);
}
// ---------------------- Various stuff ---------------- {{{1
void MaybeReexec() {
// No need to re-exec on Windows.

View File

@ -25,10 +25,6 @@ extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to given file descriptor instead of
// stderr.
void __sanitizer_set_report_fd(int fd);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
@ -49,6 +45,44 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
// end <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
//
// This annotation tells the Sanitizer tool about the current state of the
// container so that the tool can report errors when memory from [mid, end)
// is accessed. Insert this annotation into methods like push_back/pop_back.
// Supply the old and the new values of mid (old_mid/new_mid).
// In the initial state mid == end and so should be the final
// state when the container is destroyed or when it reallocates the storage.
//
// Use with caution and don't use for anything other than vector-like classes.
//
// For AddressSanitizer, 'beg' should be 8-aligned and 'end' should
// be either 8-aligned or it should point to the end of a separate heap-,
// stack-, or global- allocated buffer. I.e. the following will not work:
// int64_t x[2]; // 16 bytes, 8-aligned.
// char *beg = (char *)&x[0];
// char *end = beg + 12; // Not 8 aligned, not the end of the buffer.
// This however will work fine:
// int32_t x[3]; // 12 bytes, but 8-aligned under AddressSanitizer.
// char *beg = (char*)&x[0];
// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer.
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -985,10 +985,11 @@
#else
#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \
__sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
(long)(pos))
(long)(pos0), (long)(pos1))
#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \
__sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
(long)(count), (long)(pos))
(long)(count), (long)(pos0), \
(long)(pos1))
#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \
__sanitizer_syscall_pre_impl_pwrite64( \
(long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))

View File

@ -236,12 +236,18 @@ typedef unsigned long uptr; // NOLINT
#if defined(__linux__)
# include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_MAC(func, symver)
#else // defined(_WIN32)
# include "interception_win.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_WIN(func, symver)
#endif
#undef INCLUDED_FROM_INTERCEPTION_LIB

View File

@ -33,9 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
(::__interception::uptr)&WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
#define INTERCEPT_FUNCTION_VER(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, #symver)
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, symver)
#else
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
INTERCEPT_FUNCTION_LINUX(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H

View File

@ -20,6 +20,7 @@
#define INTERCEPTION_MAC_H
#define INTERCEPT_FUNCTION_MAC(func)
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H
#endif // __APPLE__

View File

@ -39,5 +39,8 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func);
(::__interception::uptr*)&REAL(func))
#endif
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) \
INTERCEPT_FUNCTION_WIN(func)
#endif // INTERCEPTION_WIN_H
#endif // _WIN32

View File

@ -22,6 +22,7 @@ lsan_files = \
lsan.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
lsan_thread.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)

View File

@ -83,7 +83,7 @@ liblsan_la_DEPENDENCIES = \
$(am__DEPENDENCIES_1)
am__objects_1 = lsan_common.lo lsan_common_linux.lo
am__objects_2 = $(am__objects_1) lsan.lo lsan_allocator.lo \
lsan_interceptors.lo lsan_thread.lo
lsan_interceptors.lo lsan_preinit.lo lsan_thread.lo
am_liblsan_la_OBJECTS = $(am__objects_2)
liblsan_la_OBJECTS = $(am_liblsan_la_OBJECTS)
liblsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@ -264,6 +264,7 @@ lsan_files = \
lsan.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
lsan_thread.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
@ -400,6 +401,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_preinit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_thread.Plo@am__quote@
.cc.o:

View File

@ -18,26 +18,30 @@
#include "lsan_common.h"
#include "lsan_thread.h"
bool lsan_inited;
bool lsan_init_is_running;
namespace __lsan {
static void InitializeCommonFlags() {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->strip_path_prefix = "";
cf->fast_unwind_on_malloc = true;
cf->malloc_context_size = 30;
cf->detect_leaks = true;
cf->leak_check_at_exit = true;
ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS"));
ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
}
void Init() {
static bool inited;
if (inited)
} // namespace __lsan
using namespace __lsan; // NOLINT
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return;
inited = true;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
InitializeCommonFlags();
InitializeAllocator();
@ -51,13 +55,14 @@ void Init() {
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
getSymbolizer()
->InitializeExternal(common_flags()->external_symbolizer_path);
Symbolizer::Init(common_flags()->external_symbolizer_path);
} else {
Symbolizer::Disable();
}
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
lsan_inited = true;
lsan_init_is_running = false;
}
} // namespace __lsan

View File

@ -15,7 +15,11 @@
namespace __lsan {
void Init();
void InitializeInterceptors();
} // namespace __lsan
extern bool lsan_inited;
extern bool lsan_init_is_running;
extern "C" void __lsan_init();

View File

@ -18,6 +18,8 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_common.h"
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30;
@ -32,7 +34,7 @@ struct ChunkMetadata {
};
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@ -78,7 +80,10 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return 0;
}
void *p = allocator.Allocate(&cache, size, alignment, cleared);
void *p = allocator.Allocate(&cache, size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
return p;
}

View File

@ -91,8 +91,12 @@ void InitializeSuppressions() {
void InitCommonLsan() {
InitializeFlags();
InitializeSuppressions();
InitializePlatformSpecificModules();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
InitializeSuppressions();
InitializePlatformSpecificModules();
}
}
class Decorator: private __sanitizer::AnsiColorDecorator {
@ -136,6 +140,8 @@ void ScanRangeForPointers(uptr begin, uptr end,
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
uptr chunk = PointsIntoChunk(p);
if (!chunk) continue;
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue;
LsanMetadata m(chunk);
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
@ -149,6 +155,11 @@ void ScanRangeForPointers(uptr begin, uptr end,
}
}
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
}
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
@ -197,6 +208,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable);
ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
}
if (flags()->use_tls) {
@ -261,6 +273,8 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
if (flags()->log_pointers)
Report("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
@ -281,8 +295,7 @@ static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id);
uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size);
StackTrace::PrintStack(trace, size, common_flags()->symbolize,
common_flags()->strip_path_prefix, 0);
StackTrace::PrintStack(trace, size);
}
// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
@ -400,8 +413,8 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
uptr addr_frames_num =
getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames);
uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
@ -479,7 +492,6 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
leaks_[i].total_size, leaks_[i].hit_count);
Printf("%s", d.End());
PrintStackTraceById(leaks_[i].stack_trace_id);
Printf("\n");
leaks_printed++;
if (leaks_printed == num_leaks_to_print) break;
}
@ -497,12 +509,11 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
const int kMaxSummaryLength = 128;
InternalScopedBuffer<char> summary(kMaxSummaryLength);
internal_snprintf(summary.data(), kMaxSummaryLength,
"LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
bytes, allocations);
__sanitizer_report_error_summary(summary.data());
internal_snprintf(summary.data(), summary.size(),
"%zu byte(s) leaked in %zu allocation(s).", bytes,
allocations);
ReportErrorSummary(summary.data());
}
uptr LeakReport::ApplySuppressions() {
@ -528,6 +539,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_ignore_object(const void *p) {
#if CAN_SANITIZE_LEAKS
if (!common_flags()->detect_leaks)
return;
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
// locked.
BlockingMutexLock l(&global_mutex);
@ -552,7 +565,7 @@ void __lsan_disable() {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_enable() {
#if CAN_SANITIZE_LEAKS
if (!__lsan::disable_counter) {
if (!__lsan::disable_counter && common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}

View File

@ -133,6 +133,8 @@ void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end);
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call

View File

@ -42,11 +42,17 @@ int pthread_setspecific(unsigned key, const void *v);
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
}
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
__lsan_init(); \
} while (0)
///// Malloc/free interceptors. /////
const bool kAlwaysClearMemory = true;
@ -56,38 +62,49 @@ namespace std {
}
INTERCEPTOR(void*, malloc, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
INTERCEPTOR(void, free, void *p) {
Init();
ENSURE_LSAN_INITED;
Deallocate(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (lsan_init_is_running) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static uptr allocated;
uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
return mem;
}
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
@ -95,7 +112,7 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
}
INTERCEPTOR(void*, valloc, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
if (size == 0)
size = GetPageSizeCached();
@ -103,7 +120,7 @@ INTERCEPTOR(void*, valloc, uptr size) {
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
Init();
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
@ -122,7 +139,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
}
INTERCEPTOR(void*, pvalloc, uptr size) {
Init();
ENSURE_LSAN_INITED;
GET_STACK_TRACE;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
@ -136,7 +153,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
INTERCEPTOR(void, cfree, void *p) ALIAS("free");
#define OPERATOR_NEW_BODY \
Init(); \
ENSURE_LSAN_INITED; \
GET_STACK_TRACE; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
@ -150,7 +167,7 @@ INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
Init(); \
ENSURE_LSAN_INITED; \
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
@ -190,9 +207,6 @@ struct ThreadParam {
atomic_uintptr_t tid;
};
// PTHREAD_DESTRUCTOR_ITERATIONS from glibc.
const uptr kPthreadDestructorIterations = 4;
extern "C" void *__lsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
@ -215,14 +229,14 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
Init();
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSizeLinux(attr, 0);
AdjustStackSizeLinux(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
@ -243,7 +257,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
}
INTERCEPTOR(int, pthread_join, void *th, void **ret) {
Init();
ENSURE_LSAN_INITED;
int tid = ThreadTid((uptr)th);
int res = REAL(pthread_join)(th, ret);
if (res == 0)

View File

@ -0,0 +1,24 @@
//===-- lsan_preinit.cc ---------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
//
// Call __lsan_init at the very early stage of process startup.
//===----------------------------------------------------------------------===//
#include "lsan.h"
#ifndef LSAN_USE_PREINIT_ARRAY
#define LSAN_USE_PREINIT_ARRAY 1
#endif
#if LSAN_USE_PREINIT_ARRAY && !defined(PIC)
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
void (*__local_lsan_preinit)(void) = __lsan_init;
#endif

View File

@ -143,6 +143,10 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
return true;
}
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg) {
}
void LockThreadRegistry() {
thread_registry->Lock();
}

View File

@ -14,22 +14,28 @@ sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_thread_registry.cc \
sanitizer_win.cc

View File

@ -56,17 +56,19 @@ CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_common_libcdep.lo sanitizer_coverage.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
@ -235,22 +237,28 @@ sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_thread_registry.cc \
sanitizer_win.cc
@ -350,8 +358,10 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
@ -362,8 +372,12 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@

View File

@ -585,7 +585,69 @@ class FlatByteMap {
u8 map_[kSize];
};
// FIXME: Also implement TwoLevelByteMap.
// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
// It is implemented as a two-dimensional array: array of kSize1 pointers
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
// Each value is initially zero and can be set to something else only once.
// Setting and getting values from multiple threads is safe w/o extra locking.
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
class TwoLevelByteMap {
public:
void TestOnlyInit() {
internal_memset(map1_, 0, sizeof(map1_));
mu_.Init();
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i);
if (!p) continue;
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
UnmapOrDie(p, kSize2);
}
}
uptr size() const { return kSize1 * kSize2; }
uptr size1() const { return kSize1; }
uptr size2() const { return kSize2; }
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = GetOrCreate(idx / kSize2);
CHECK_EQ(0U, map2[idx % kSize2]);
map2[idx % kSize2] = val;
}
u8 operator[] (uptr idx) const {
CHECK_LT(idx, kSize1 * kSize2);
u8 *map2 = Get(idx / kSize2);
if (!map2) return 0;
return map2[idx % kSize2];
}
private:
u8 *Get(uptr idx) const {
CHECK_LT(idx, kSize1);
return reinterpret_cast<u8 *>(
atomic_load(&map1_[idx], memory_order_acquire));
}
u8 *GetOrCreate(uptr idx) {
u8 *res = Get(idx);
if (!res) {
SpinMutexLock l(&mu_);
if (!(res = Get(idx))) {
res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
memory_order_release);
}
}
return res;
}
atomic_uintptr_t map1_[kSize1];
StaticSpinMutex mu_;
};
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less
@ -1049,6 +1111,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return 0;
@ -1181,14 +1244,15 @@ class CombinedAllocator {
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
if (primary_.CanAllocate(size, alignment))
bool from_primary = primary_.CanAllocate(size, alignment);
if (from_primary)
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, size, alignment);
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res)
internal_memset(res, 0, size);
if (cleared && res && from_primary)
internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res;
}

View File

@ -25,21 +25,25 @@ static const uptr kInternalAllocatorSpace = 0;
#if SANITIZER_WORDSIZE == 32
static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24;
#endif
static const uptr kInternalAllocatorFlatByteMapSize =
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
typedef SizeClassAllocator32<
kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog,
FlatByteMap<kInternalAllocatorFlatByteMapSize> > PrimaryInternalAllocator;
kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
// We don't want our internal allocator to do any map/unmap operations.
// We don't want our internal allocator to do any map/unmap operations from
// LargeMmapAllocator.
struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");

View File

@ -10,12 +10,14 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
uptr SanitizerVerbosity = 0;
uptr GetPageSizeCached() {
static uptr PageSize;
@ -134,14 +136,71 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
return (void*)res;
}
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (filepath == 0) return 0;
if (strip_path_prefix == 0) return filepath;
const char *pos = internal_strstr(filepath, strip_path_prefix);
if (pos == 0) return filepath;
pos += internal_strlen(strip_path_prefix);
if (pos[0] == '.' && pos[1] == '/')
pos += 2;
return pos;
}
void PrintSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column) {
CHECK(file);
buffer->append("%s",
StripPathPrefix(file, common_flags()->strip_path_prefix));
if (line > 0) {
buffer->append(":%d", line);
if (column > 0)
buffer->append(":%d", column);
}
}
void PrintModuleAndOffset(InternalScopedString *buffer, const char *module,
uptr offset) {
buffer->append("(%s+0x%zx)",
StripPathPrefix(module, common_flags()->strip_path_prefix),
offset);
}
void ReportErrorSummary(const char *error_message) {
if (!common_flags()->print_summary)
return;
InternalScopedBuffer<char> buff(kMaxSummaryLength);
internal_snprintf(buff.data(), buff.size(),
"SUMMARY: %s: %s", SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function) {
const int kMaxSize = 1024; // We don't want a summary too long.
InternalScopedBuffer<char> buff(kMaxSize);
internal_snprintf(buff.data(), kMaxSize, "%s: %s %s:%d %s",
SanitizerToolName, error_type,
file ? file : "??", line, function ? function : "??");
__sanitizer_report_error_summary(buff.data());
if (!common_flags()->print_summary)
return;
InternalScopedBuffer<char> buff(kMaxSummaryLength);
internal_snprintf(
buff.data(), buff.size(), "%s %s:%d %s", error_type,
file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
line, function ? function : "??");
ReportErrorSummary(buff.data());
}
void ReportErrorSummary(const char *error_type, StackTrace *stack) {
if (!common_flags()->print_summary)
return;
AddressInfo ai;
#if !SANITIZER_GO
if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
}
#endif
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
}
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
@ -165,13 +224,25 @@ bool LoadedModule::containsAddress(uptr address) const {
return false;
}
char *StripModuleName(const char *module) {
if (module == 0)
return 0;
const char *short_module_name = internal_strrchr(module, '/');
if (short_module_name)
short_module_name += 1;
else
short_module_name = module;
return internal_strdup(short_module_name);
}
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
extern "C" {
void __sanitizer_set_report_path(const char *path) {
if (!path) return;
if (!path)
return;
uptr len = internal_strlen(path);
if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
@ -179,18 +250,21 @@ void __sanitizer_set_report_path(const char *path) {
path[4], path[5], path[6], path[7]);
Die();
}
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
report_fd = kInvalidFd;
log_to_file = true;
}
void __sanitizer_set_report_fd(int fd) {
if (report_fd != kStdoutFd &&
report_fd != kStderrFd &&
report_fd != kInvalidFd)
internal_close(report_fd);
report_fd = fd;
report_fd = kInvalidFd;
log_to_file = false;
if (internal_strcmp(path, "stdout") == 0) {
report_fd = kStdoutFd;
} else if (internal_strcmp(path, "stderr") == 0) {
report_fd = kStderrFd;
} else {
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
log_to_file = true;
}
}
void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
@ -199,6 +273,6 @@ void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
}
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("SUMMARY: %s\n", error_summary);
Printf("%s\n", error_summary);
}
} // extern "C"

View File

@ -34,7 +34,6 @@ const uptr kCacheLineSize = 64;
const uptr kMaxPathLength = 512;
extern const char *SanitizerToolName; // Can be changed by the tool.
extern uptr SanitizerVerbosity;
uptr GetPageSize();
uptr GetPageSizeCached();
@ -86,6 +85,23 @@ class InternalScopedBuffer {
void operator=(const InternalScopedBuffer&);
};
class InternalScopedString : public InternalScopedBuffer<char> {
public:
explicit InternalScopedString(uptr max_length)
: InternalScopedBuffer<char>(max_length), length_(0) {
(*this)[0] = '\0';
}
uptr length() { return length_; }
void clear() {
(*this)[0] = '\0';
length_ = 0;
}
void append(const char *format, ...);
private:
uptr length_;
};
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
// linker initialized.
@ -110,6 +126,7 @@ bool PrintsToTtyCached();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
void MaybeOpenReportFile();
@ -130,6 +147,14 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size);
// Error report formatting.
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
void PrintSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column);
void PrintModuleAndOffset(InternalScopedString *buffer,
const char *module, uptr offset);
// OS
void DisableCoreDumper();
void DumpProcessMap();
@ -153,6 +178,9 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
// Strip the directories from the module name, return a new string allocated
// with internal_strdup.
char *StripModuleName(const char *module);
// Exit
void NORETURN Abort();
@ -176,11 +204,17 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Construct a one-line string like
// SanitizerToolName: error_type file:line function
// and call __sanitizer_report_error_summary on it.
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as:
// error_type: file:line function
void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function);
void ReportErrorSummary(const char *error_type, StackTrace *trace);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@ -326,6 +360,8 @@ class InternalMmapVector {
return capacity_;
}
void clear() { size_ = 0; }
private:
void Resize(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
@ -431,6 +467,20 @@ typedef bool (*string_predicate_t)(const char *);
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter);
#if SANITIZER_POSIX
const uptr kPthreadDestructorIterations = 4;
#else
// Unused on Windows.
const uptr kPthreadDestructorIterations = 0;
#endif
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
__sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}
#endif // SANITIZER_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@ -86,7 +86,7 @@ static void ioctl_table_fill() {
_(TIOCSTI, READ, sizeof(char));
_(TIOCSWINSZ, READ, struct_winsize_sz);
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
#endif

View File

@ -23,8 +23,16 @@
// COMMON_SYSCALL_POST_WRITE_RANGE
// Called in posthook for regions that were written to by the kernel
// and are now initialized.
// COMMON_SYSCALL_ACQUIRE(addr)
// Acquire memory visibility from addr.
// COMMON_SYSCALL_RELEASE(addr)
// Release memory visibility to addr.
// COMMON_SYSCALL_FD_CLOSE(fd)
// Called before closing file descriptor fd.
// COMMON_SYSCALL_FD_ACQUIRE(fd)
// Acquire memory visibility from fd.
// COMMON_SYSCALL_FD_RELEASE(fd)
// Release memory visibility to fd.
// COMMON_SYSCALL_PRE_FORK()
// Called before fork syscall.
// COMMON_SYSCALL_POST_FORK(long res)
@ -46,20 +54,34 @@
#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
#ifndef COMMON_SYSCALL_ACQUIRE
# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
#endif
#ifndef COMMON_SYSCALL_RELEASE
# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
#endif
#ifndef COMMON_SYSCALL_FD_CLOSE
# define COMMON_SYSCALL_FD_CLOSE(fd)
# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
#endif
#ifndef COMMON_SYSCALL_FD_ACQUIRE
# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
#endif
#ifndef COMMON_SYSCALL_FD_RELEASE
# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
#endif
#ifndef COMMON_SYSCALL_PRE_FORK
# define COMMON_SYSCALL_PRE_FORK()
# define COMMON_SYSCALL_PRE_FORK() {}
#endif
#ifndef COMMON_SYSCALL_POST_FORK
# define COMMON_SYSCALL_POST_FORK(res)
# define COMMON_SYSCALL_POST_FORK(res) {}
#endif
#ifdef SYSCALL_INTERCEPTION
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
extern "C" {
@ -1245,11 +1267,17 @@ PRE_SYSCALL(flock)(long fd, long cmd) {}
POST_SYSCALL(flock)(long res, long fd, long cmd) {}
PRE_SYSCALL(io_setup)(long nr_reqs, void *ctx) {}
PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
}
POST_SYSCALL(io_setup)(long res, long nr_reqs, void *ctx) {
POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
if (res >= 0) {
if (ctx) POST_WRITE(ctx, sizeof(long));
if (ctx) POST_WRITE(ctx, sizeof(*ctx));
// (*ctx) is actually a pointer to a kernel mapped page, and there are
// people out there who are crazy enough to peek into that page's 32-byte
// header.
if (*ctx) POST_WRITE(*ctx, 32);
}
}
@ -1257,29 +1285,70 @@ PRE_SYSCALL(io_destroy)(long ctx) {}
POST_SYSCALL(io_destroy)(long res, long ctx) {}
PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr, void *events,
void *timeout) {
PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
__sanitizer_io_event *ioevpp, void *timeout) {
if (timeout) PRE_READ(timeout, struct_timespec_sz);
}
POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
void *events, void *timeout) {
__sanitizer_io_event *ioevpp, void *timeout) {
if (res >= 0) {
if (events) POST_WRITE(events, res * struct_io_event_sz);
if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
if (timeout) POST_WRITE(timeout, struct_timespec_sz);
}
for (long i = 0; i < res; i++) {
// We synchronize io_submit -> io_getevents/io_cancel using the
// user-provided data context. Data is not necessary a pointer, it can be
// an int, 0 or whatever; acquire/release will correctly handle this.
// This scheme can lead to false negatives, e.g. when all operations
// synchronize on 0. But there does not seem to be a better solution
// (except wrapping all operations in own context, which is unreliable).
// We can not reliably extract fildes in io_getevents.
COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
}
}
PRE_SYSCALL(io_submit)(long, long arg1, void *arg2) {}
PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
for (long i = 0; i < nr; ++i) {
uptr op = iocbpp[i]->aio_lio_opcode;
void *data = (void*)iocbpp[i]->aio_data;
void *buf = (void*)iocbpp[i]->aio_buf;
uptr len = (uptr)iocbpp[i]->aio_nbytes;
if (op == iocb_cmd_pwrite && buf && len) {
PRE_READ(buf, len);
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
__sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
for (uptr v = 0; v < len; v++)
PRE_READ(iovec[i].iov_base, iovec[i].iov_len);
} else if (op == iocb_cmd_preadv) {
__sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
for (uptr v = 0; v < len; v++)
POST_WRITE(iovec[i].iov_base, iovec[i].iov_len);
}
// See comment in io_getevents.
COMMON_SYSCALL_RELEASE(data);
}
}
POST_SYSCALL(io_submit)(long res, long, long arg1, void *arg2) {}
POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
__sanitizer_iocb **iocbpp) {}
PRE_SYSCALL(io_cancel)(long ctx_id, void *iocb, void *result) {}
PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
__sanitizer_io_event *result) {
}
POST_SYSCALL(io_cancel)(long res, long ctx_id, void *iocb, void *result) {
if (res >= 0) {
if (iocb) POST_WRITE(iocb, struct_iocb_sz);
if (result) POST_WRITE(result, struct_io_event_sz);
POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
__sanitizer_io_event *result) {
if (res == 0) {
if (result) {
// See comment in io_getevents.
COMMON_SYSCALL_ACQUIRE((void*)result->data);
POST_WRITE(result, sizeof(*result));
}
if (iocb)
POST_WRITE(iocb, sizeof(*iocb));
}
}
@ -2063,14 +2132,6 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) {
}
}
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
if (buf) POST_WRITE(buf, struct_shmid_ds_sz);
}
}
PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
long fifth) {}
@ -2078,6 +2139,14 @@ POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
void *ptr, long fifth) {}
#if !SANITIZER_ANDROID
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
}
}
PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
@ -2218,9 +2287,49 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if defined(__i386) || defined (__x86_64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
} else if (request == ptrace_setfpregs) {
PRE_READ((void *)data, struct_user_fpregs_struct_sz);
} else if (request == ptrace_setfpxregs) {
PRE_READ((void *)data, struct_user_fpxregs_struct_sz);
} else if (request == ptrace_setsiginfo) {
PRE_READ((void *)data, siginfo_t_sz);
} else if (request == ptrace_setregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
PRE_READ(iov->iov_base, iov->iov_len);
}
}
#endif
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if defined(__i386) || defined (__x86_64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
// PEEK* requests return resulting values through data pointer.
if (request == ptrace_getregs) {
POST_WRITE((void *)data, struct_user_regs_struct_sz);
} else if (request == ptrace_getfpregs) {
POST_WRITE((void *)data, struct_user_fpregs_struct_sz);
} else if (request == ptrace_getfpxregs) {
POST_WRITE((void *)data, struct_user_fpxregs_struct_sz);
} else if (request == ptrace_getsiginfo) {
POST_WRITE((void *)data, siginfo_t_sz);
} else if (request == ptrace_getregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
POST_WRITE(iov->iov_base, iov->iov_len);
} else if (request == ptrace_peekdata || request == ptrace_peektext ||
request == ptrace_peekuser) {
POST_WRITE((void *)data, sizeof(void *));
}
}
#endif
}
PRE_SYSCALL(add_key)(const void *_type, const void *_description,
const void *_payload, long plen, long destringid) {
@ -2648,16 +2757,14 @@ PRE_SYSCALL(syncfs)(long fd) {}
POST_SYSCALL(syncfs)(long res, long fd) {}
PRE_SYSCALL(perf_event_open)(void *attr_uptr, long pid, long cpu, long group_fd,
long flags) {}
POST_SYSCALL(perf_event_open)(long res, void *attr_uptr, long pid, long cpu,
long group_fd, long flags) {
if (res >= 0) {
if (attr_uptr) POST_WRITE(attr_uptr, struct_perf_event_attr_sz);
}
PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
long cpu, long group_fd, long flags) {
if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
}
POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
long pid, long cpu, long group_fd, long flags) {}
PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
long pgoff) {}
@ -2724,8 +2831,6 @@ POST_SYSCALL(vfork)(long res) {
}
} // extern "C"
#endif
#undef PRE_SYSCALL
#undef PRE_READ
#undef PRE_WRITE

View File

@ -0,0 +1,111 @@
//===-- sanitizer_coverage.cc ---------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage.
// This file implements run-time support for a poor man's coverage tool.
//
// Compiler instrumentation:
// For every function F the compiler injects the following code:
// if (*Guard) {
// __sanitizer_cov(&F);
// *Guard = 1;
// }
// It's fine to call __sanitizer_cov more than once for a given function.
//
// Run-time:
// - __sanitizer_cov(pc): record that we've executed a given PC.
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
// it's just a sorted sequence of 4-byte offsets in the module.
//
// Eventually, this coverage implementation should be obsoleted by a more
// powerful general purpose Clang/LLVM coverage instrumentation.
// Consider this implementation as prototype.
//
// FIXME: support (or at least test with) dlclose.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_flags.h"
struct CovData {
BlockingMutex mu;
InternalMmapVector<uptr> v;
};
static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
namespace __sanitizer {
// Simply add the pc into the vector under lock. If the function is called more
// than once for a given PC it will be inserted multiple times, which is fine.
static void CovAdd(uptr pc) {
BlockingMutexLock lock(&cov_data->mu);
cov_data->v.push_back(pc);
}
static inline bool CompareLess(const uptr &a, const uptr &b) {
return a < b;
}
// Dump the coverage on disk.
void CovDump() {
#if !SANITIZER_WINDOWS
BlockingMutexLock lock(&cov_data->mu);
InternalMmapVector<uptr> &v = cov_data->v;
InternalSort(&v, v.size(), CompareLess);
InternalMmapVector<u32> offsets(v.size());
const uptr *vb = v.data();
const uptr *ve = vb + v.size();
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
uptr mb, me, off, prot;
InternalScopedBuffer<char> module(4096);
InternalScopedBuffer<char> path(4096 * 2);
for (int i = 0;
proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
i++) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
if (vb >= ve) break;
if (mb <= *vb && *vb < me) {
offsets.clear();
const uptr *old_vb = vb;
CHECK_LE(off, *vb);
for (; vb < ve && *vb < me; vb++) {
uptr diff = *vb - (i ? mb : 0) + off;
CHECK_LE(diff, 0xffffffffU);
offsets.push_back(static_cast<u32>(diff));
}
char *module_name = StripModuleName(module.data());
internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
module_name, internal_getpid());
InternalFree(module_name);
uptr fd = OpenFile(path.data(), true);
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
internal_close(fd);
if (common_flags()->verbosity)
Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
}
}
#endif // !SANITIZER_WINDOWS
}
} // namespace __sanitizer
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
CovAdd(reinterpret_cast<uptr>(pc));
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
} // extern "C"

View File

@ -16,20 +16,40 @@
namespace __sanitizer {
CommonFlags common_flags_dont_use_directly;
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
f->strip_path_prefix = "";
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->handle_ioctl = false;
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
f->detect_leaks = false;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
}
void ParseCommonFlagsFromString(const char *str) {
CommonFlags *f = common_flags();
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->detect_leaks, "detect_leaks");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
ParseFlag(str, &f->print_summary, "print_summary");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
f->malloc_context_size = 1;
}
static bool GetFlagValue(const char *env, const char *name,

View File

@ -23,7 +23,8 @@ void ParseFlag(const char *env, const char **flag, const char *name);
struct CommonFlags {
// If set, use the online symbolizer from common sanitizer runtime.
bool symbolize;
// Path to external symbolizer.
// Path to external symbolizer. If it is NULL, symbolizer will be looked for
// in PATH. If it is empty, external symbolizer will not be started.
const char *external_symbolizer_path;
// Strips this prefix from file paths in error reports.
const char *strip_path_prefix;
@ -35,8 +36,12 @@ struct CommonFlags {
bool handle_ioctl;
// Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// Write logs to "log_path.pid" instead of stderr.
// Write logs to "log_path.pid".
// The special values are "stdout" and "stderr".
// The default is "stderr".
const char *log_path;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Enable memory leak detection.
bool detect_leaks;
// Invoke leak checking in an atexit handler. Has no effect if
@ -45,15 +50,17 @@ struct CommonFlags {
bool leak_check_at_exit;
// If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
// If false, disable printing error summaries in addition to error reports.
bool print_summary;
};
extern CommonFlags common_flags_dont_use_directly;
inline CommonFlags *common_flags() {
return &common_flags_dont_use_directly;
static CommonFlags f;
return &f;
}
void ParseCommonFlagsFromString(const char *str);
void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
} // namespace __sanitizer

View File

@ -32,6 +32,12 @@
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
#endif
// GCC does not understand __has_feature
#if !defined(__has_feature)
# define __has_feature(x) 0
@ -77,18 +83,20 @@ typedef u64 OFF_T;
typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_size_type;
#else
typedef u32 operator_new_size_type;
#endif
} // namespace __sanitizer
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to given file descriptor instead of
// stderr.
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(int fd);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
@ -100,6 +108,14 @@ extern "C" {
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
} // extern "C"
@ -169,12 +185,6 @@ typedef void* thread_return_t;
#endif // _WIN32
typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
#endif
// NOTE: Functions below must be defined in each run-time.
namespace __sanitizer {
void NORETURN Die();

View File

@ -14,6 +14,16 @@
namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
#if SANITIZER_WINDOWS
// FIXME: make sure this is actually enough.
__asm;
#else
__asm__ __volatile__("" : : "r" (arg) : "memory");
#endif
}
s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10);
}
@ -60,6 +70,16 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
// Semi-fast bzero for 16-aligned data. Still far from peak performance.
void internal_bzero_aligned16(void *s, uptr n) {
struct S16 { u64 a, b; } ALIGNED(16);
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0;
break_optimization(0); // Make sure this does not become memset.
}
}
void *internal_memset(void* s, int c, uptr n) {
// The next line prevents Clang from making a call to memset() instead of the
// loop below.

View File

@ -27,6 +27,8 @@ void *internal_memchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
// Set [s, s + n) to 0. Both s and n should be 16-aligned.
void internal_bzero_aligned16(void *s, uptr n);
// Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n);
char* internal_strchr(const char *s, int c);

View File

@ -0,0 +1,103 @@
//===-- sanitizer_libignore.cc --------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
namespace __sanitizer {
LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::Init(const SuppressionContext &supp) {
BlockingMutexLock lock(&mutex_);
CHECK_EQ(count_, 0);
const uptr n = supp.SuppressionCount();
for (uptr i = 0; i < n; i++) {
const Suppression *s = supp.SuppressionAt(i);
if (s->type != SuppressionLib)
continue;
if (count_ >= kMaxLibs) {
Report("%s: too many called_from_lib suppressions (max: %d)\n",
SanitizerToolName, kMaxLibs);
Die();
}
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(s->templ);
lib->name = 0;
lib->loaded = false;
}
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalScopedBuffer<char> buf(4096);
if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf.data()[0]) {
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
if (!lib->loaded && lib->real_name == 0 &&
TemplateMatch(lib->templ, name))
lib->real_name = internal_strdup(buf.data());
}
}
// Scan suppressions list and find newly loaded and unloaded libraries.
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
InternalScopedBuffer<char> module(4096);
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
proc_maps.Reset();
uptr b, e, off, prot;
while (proc_maps.Next(&b, &e, &off, module.data(), module.size(), &prot)) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
if (TemplateMatch(lib->templ, module.data()) ||
(lib->real_name != 0 &&
internal_strcmp(lib->real_name, module.data()) == 0)) {
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
" 2 libraries: '%s' and '%s'\n",
SanitizerToolName, lib->templ, lib->name, module.data());
Die();
}
loaded = true;
if (lib->loaded)
continue;
if (common_flags()->verbosity)
Report("Matched called_from_lib suppression '%s' against library"
" '%s'\n", lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
code_ranges_[idx].begin = b;
code_ranges_[idx].end = e;
atomic_store(&loaded_count_, idx + 1, memory_order_release);
}
}
if (lib->loaded && !loaded) {
Report("%s: library '%s' that was matched against called_from_lib"
" suppression '%s' is unloaded\n",
SanitizerToolName, lib->name, lib->templ);
Die();
}
}
}
void LibIgnore::OnLibraryUnloaded() {
OnLibraryLoaded(0);
}
} // namespace __sanitizer
#endif // #if SANITIZER_LINUX

View File

@ -0,0 +1,82 @@
//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LibIgnore allows to ignore all interceptors called from a particular set
// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
// from the provided SuppressionContext; finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LIBIGNORE_H
#define SANITIZER_LIBIGNORE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
class LibIgnore {
public:
explicit LibIgnore(LinkerInitialized);
// Fetches all "called_from_lib" suppressions from the SuppressionContext.
void Init(const SuppressionContext &supp);
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
// Must be called after a dynamic library is unloaded.
void OnLibraryUnloaded();
// Checks whether the provided PC belongs to one of the ignored libraries.
bool IsIgnored(uptr pc) const;
private:
struct Lib {
char *templ;
char *name;
char *real_name; // target of symlink
bool loaded;
};
struct LibCodeRange {
uptr begin;
uptr end;
};
static const uptr kMaxLibs = 128;
// Hot part:
atomic_uintptr_t loaded_count_;
LibCodeRange code_ranges_[kMaxLibs];
// Cold part:
BlockingMutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
// Disallow copying of LibIgnore objects.
LibIgnore(const LibIgnore&); // not implemented
void operator = (const LibIgnore&); // not implemented
};
inline bool LibIgnore::IsIgnored(uptr pc) const {
const uptr n = atomic_load(&loaded_count_, memory_order_acquire);
for (uptr i = 0; i < n; i++) {
if (pc >= code_ranges_[i].begin && pc < code_ranges_[i].end)
return true;
}
return false;
}
} // namespace __sanitizer
#endif // SANITIZER_LIBIGNORE_H

View File

@ -77,7 +77,8 @@ namespace __sanitizer {
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd, offset);
return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
offset);
#else
return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
#endif
@ -216,7 +217,8 @@ uptr GetTid() {
}
u64 NanoTime() {
kernel_timeval tv = {};
kernel_timeval tv;
internal_memset(&tv, 0, sizeof(tv));
internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
@ -309,7 +311,8 @@ void PrepareForSandboxing() {
MemoryMappingLayout::CacheMemoryMappings();
// Same for /proc/self/exe in the symbolizer.
#if !SANITIZER_GO
getSymbolizer()->PrepareForSandboxing();
if (Symbolizer *sym = Symbolizer::GetOrNull())
sym->PrepareForSandboxing();
#endif
}
@ -572,7 +575,8 @@ uptr internal_ptrace(int request, int pid, void *addr, void *data) {
}
uptr internal_waitpid(int pid, int *status, int options) {
return internal_syscall(__NR_wait4, pid, (uptr)status, options, 0 /* rusage */);
return internal_syscall(__NR_wait4, pid, (uptr)status, options,
0 /* rusage */);
}
uptr internal_getpid() {
@ -600,6 +604,31 @@ uptr internal_sigaltstack(const struct sigaltstack *ss,
return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
}
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact) {
return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
sizeof(__sanitizer_kernel_sigset_t));
}
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset) {
return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
&oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
}
void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
const uptr idx = signum / (sizeof(set->sig[0]) * 8);
const uptr bit = signum % (sizeof(set->sig[0]) * 8);
set->sig[idx] &= ~(1 << bit);
}
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
: pid_(pid),
@ -775,8 +804,8 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg;
register void *r8 __asm__ ("r8") = newtls;
register int *r10 __asm__ ("r10") = child_tidptr;
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
/* %rax = syscall(%rax = __NR_clone,
* %rdi = flags,

View File

@ -15,6 +15,7 @@
#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
struct link_map; // Opaque type returned by dlopen().
struct sigaltstack;
@ -29,6 +30,13 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact);
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset);
void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
#ifdef __x86_64__
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
@ -56,7 +64,7 @@ class ThreadLister {
int bytes_read_;
};
void AdjustStackSizeLinux(void *attr, int verbosity);
void AdjustStackSizeLinux(void *attr);
// Exposed for testing.
uptr ThreadDescriptorSize();
@ -74,7 +82,6 @@ void CacheBinaryName();
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
#endif // SANITIZER_LINUX

View File

@ -14,6 +14,7 @@
#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
@ -30,6 +31,12 @@
#include <link.h>
#endif
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
SANITIZER_WEAK_ATTRIBUTE
int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
}
namespace __sanitizer {
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
@ -71,7 +78,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
__sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
@ -137,35 +144,33 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#endif
}
struct UnwindTraceArg {
StackTrace *stack;
uptr max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
StackTrace *b = (StackTrace*)param;
CHECK(b->size < b->max_size);
UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP;
arg->stack->trace[arg->stack->size++] = pc;
if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
this->size = 0;
this->max_size = max_depth;
if (max_depth > 1) {
_Unwind_Backtrace(Unwind_Trace, this);
// We need to pop a few frames so that pc is on top.
// trace[0] belongs to the current function so we always pop it.
int to_pop = 1;
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
this->PopStackFrames(to_pop);
}
this->trace[0] = pc;
size = 0;
if (max_depth == 0)
return;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace[0] belongs to the current function so we always pop it.
if (to_pop == 0)
to_pop = 1;
PopStackFrames(to_pop);
trace[0] = pc;
}
#endif // !SANITIZER_GO
@ -265,11 +270,11 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif // SANITIZER_GO
}
void AdjustStackSizeLinux(void *attr_, int verbosity) {
void AdjustStackSizeLinux(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
size_t stacksize = 0;
pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
__sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
@ -277,7 +282,7 @@ void AdjustStackSizeLinux(void *attr_, int verbosity) {
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
if (verbosity && stacksize != 0)
if (common_flags()->verbosity && stacksize != 0)
Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize);
pthread_attr_setstacksize(attr, minstacksize);

View File

@ -143,7 +143,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
const char *GetEnv(const char *name) {
char ***env_ptr = _NSGetEnviron();
CHECK(env_ptr);
if (!env_ptr) {
Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
"called after libSystem_initializer().\n");
CHECK(env_ptr);
}
char **environ = *env_ptr;
CHECK(environ);
uptr name_len = internal_strlen(name);

View File

@ -38,6 +38,10 @@ class StaticSpinMutex {
atomic_store(&state_, 0, memory_order_release);
}
void CheckLocked() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
private:
atomic_uint8_t state_;

View File

@ -16,15 +16,7 @@
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_ptr_type;
#else
typedef u32 operator_new_ptr_type;
#endif
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_ptr_type sz, void *p) {
inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {
return p;
}

View File

@ -23,8 +23,15 @@
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
# define SANITIZER_IOS 0
# endif
#else
# define SANITIZER_MAC 0
# define SANITIZER_IOS 0
#endif
#if defined(_WIN32)

View File

@ -39,6 +39,12 @@
# define SI_MAC 0
#endif
#if SANITIZER_IOS
# define SI_IOS 1
#else
# define SI_IOS 0
#endif
# define SANITIZER_INTERCEPT_STRCMP 1
# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
@ -61,6 +67,7 @@
# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
@ -111,6 +118,7 @@
# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
@ -124,5 +132,41 @@
# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SHMCTL \
(SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
// FIXME: getline seems to be available on OSX 10.7
# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT__EXIT SI_LINUX
# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H

View File

@ -14,34 +14,88 @@
// userspace headers.
// Most "normal" includes go in sanitizer_platform_limits_posix.cc
#ifdef SYSCALL_INTERCEPTION
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
// For offsetof -> __builtin_offsetof definition.
#include <stddef.h>
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <asm/posix_types.h>
#define ino_t __kernel_ino_t
#define mode_t __kernel_mode_t
#define nlink_t __kernel_nlink_t
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
// This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h>
#undef ino_t
#undef mode_t
#undef nlink_t
#undef uid_t
#undef gid_t
#undef off_t
#include <linux/aio_abi.h>
#if SANITIZER_ANDROID
#include <asm/statfs.h>
#else
#include <sys/statfs.h>
#endif
#if !SANITIZER_ANDROID
#include <linux/perf_event.h>
#endif
namespace __sanitizer {
unsigned struct___old_kernel_stat_sz = sizeof(struct __old_kernel_stat);
unsigned struct_kernel_stat_sz = sizeof(struct stat);
unsigned struct_io_event_sz = sizeof(struct io_event);
unsigned struct_iocb_sz = sizeof(struct iocb);
#if !defined(_LP64) && !defined(__x86_64__)
unsigned struct_kernel_stat64_sz = sizeof(struct stat64);
#else
unsigned struct_kernel_stat64_sz = 0;
#endif
#if !SANITIZER_ANDROID
unsigned struct_perf_event_attr_sz = sizeof(struct perf_event_attr);
#endif
unsigned struct_statfs64_sz = sizeof(struct statfs64);
} // namespace __sanitizer
#endif // SANITIZER_LINUX
#if !defined(__powerpc64__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
COMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat));
#if defined(__i386__)
COMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64));
#endif
CHECK_TYPE_SIZE(io_event);
CHECK_SIZE_AND_OFFSET(io_event, data);
CHECK_SIZE_AND_OFFSET(io_event, obj);
CHECK_SIZE_AND_OFFSET(io_event, res);
CHECK_SIZE_AND_OFFSET(io_event, res2);
#if !SANITIZER_ANDROID
COMPILER_CHECK(sizeof(struct __sanitizer_perf_event_attr) <=
sizeof(struct perf_event_attr));
CHECK_SIZE_AND_OFFSET(perf_event_attr, type);
CHECK_SIZE_AND_OFFSET(perf_event_attr, size);
#endif
COMPILER_CHECK(iocb_cmd_pread == IOCB_CMD_PREAD);
COMPILER_CHECK(iocb_cmd_pwrite == IOCB_CMD_PWRITE);
#if !SANITIZER_ANDROID
COMPILER_CHECK(iocb_cmd_preadv == IOCB_CMD_PREADV);
COMPILER_CHECK(iocb_cmd_pwritev == IOCB_CMD_PWRITEV);
#endif
CHECK_TYPE_SIZE(iocb);
CHECK_SIZE_AND_OFFSET(iocb, aio_data);
// Skip aio_key, it's weird.
CHECK_SIZE_AND_OFFSET(iocb, aio_lio_opcode);
CHECK_SIZE_AND_OFFSET(iocb, aio_reqprio);
CHECK_SIZE_AND_OFFSET(iocb, aio_fildes);
CHECK_SIZE_AND_OFFSET(iocb, aio_buf);
CHECK_SIZE_AND_OFFSET(iocb, aio_nbytes);
CHECK_SIZE_AND_OFFSET(iocb, aio_offset);
#endif // SANITIZER_LINUX

View File

@ -19,6 +19,7 @@
#include <arpa/inet.h>
#include <dirent.h>
#include <errno.h>
#include <grp.h>
#include <limits.h>
#include <net/if.h>
@ -42,6 +43,8 @@
#include <wchar.h>
#if SANITIZER_LINUX
#include <mntent.h>
#include <netinet/ether.h>
#include <utime.h>
#include <sys/mount.h>
#include <sys/ptrace.h>
@ -75,6 +78,7 @@
#include <sys/mtio.h>
#include <sys/kd.h>
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
#include <sys/user.h>
#include <sys/ustat.h>
@ -87,6 +91,8 @@
#include <linux/scc.h>
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_ANDROID
@ -100,20 +106,22 @@
#include <link.h>
#include <sys/vfs.h>
#include <sys/epoll.h>
// #include <asm/stat.h>
#include <linux/capability.h>
#endif // SANITIZER_LINUX
#if SANITIZER_MAC
#include <netinet/ip_mroute.h>
#include <net/ethernet.h>
#include <sys/filio.h>
#include <sys/mount.h>
#include <sys/sockio.h>
#endif
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
#if !SANITIZER_IOS
unsigned struct_stat64_sz = sizeof(struct stat64);
#endif // !SANITIZER_IOS
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@ -122,6 +130,7 @@ namespace __sanitizer {
unsigned struct_sigaction_sz = sizeof(struct sigaction);
unsigned struct_itimerval_sz = sizeof(struct itimerval);
unsigned pthread_t_sz = sizeof(pthread_t);
unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
unsigned pid_t_sz = sizeof(pid_t);
unsigned timeval_sz = sizeof(timeval);
unsigned uid_t_sz = sizeof(uid_t);
@ -131,6 +140,11 @@ namespace __sanitizer {
unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
#if SANITIZER_MAC && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID
unsigned ucontext_t_sz = sizeof(ucontext_t);
@ -138,7 +152,6 @@ namespace __sanitizer {
#if SANITIZER_LINUX
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned struct_timespec_sz = sizeof(struct timespec);
@ -155,11 +168,11 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
unsigned struct_statfs64_sz = sizeof(struct statfs64);
unsigned struct_timex_sz = sizeof(struct timex);
unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
unsigned struct_shmid_ds_sz = sizeof(struct shmid_ds);
unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
unsigned struct_statvfs_sz = sizeof(struct statvfs);
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
uptr sig_ign = (uptr)SIG_IGN;
@ -170,6 +183,16 @@ namespace __sanitizer {
int e_tabsz = (int)E_TABSZ;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_shminfo_sz = sizeof(struct shminfo);
unsigned struct_shm_info_sz = sizeof(struct shm_info);
int shmctl_ipc_stat = (int)IPC_STAT;
int shmctl_ipc_info = (int)IPC_INFO;
int shmctl_shm_info = (int)SHM_INFO;
int shmctl_shm_stat = (int)SHM_INFO;
#endif
int af_inet = (int)AF_INET;
int af_inet6 = (int)AF_INET6;
@ -197,6 +220,9 @@ namespace __sanitizer {
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif
int ptrace_peektext = PTRACE_PEEKTEXT;
int ptrace_peekdata = PTRACE_PEEKDATA;
int ptrace_peekuser = PTRACE_PEEKUSER;
int ptrace_getregs = PTRACE_GETREGS;
int ptrace_setregs = PTRACE_SETREGS;
int ptrace_getfpregs = PTRACE_GETFPREGS;
@ -295,7 +321,7 @@ namespace __sanitizer {
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
#endif
#if !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
@ -346,7 +372,7 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
unsigned IOCTL_TIOCSTI = TIOCSTI;
unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
#endif
@ -733,25 +759,10 @@ namespace __sanitizer {
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
#endif
extern const int errno_EOWNERDEAD = EOWNERDEAD;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((struct CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
@ -855,7 +866,6 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif
#ifdef SYSCALL_INTERCEPTION
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(__sysctl_args);
CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
@ -873,7 +883,6 @@ CHECK_TYPE_SIZE(__kernel_off_t);
CHECK_TYPE_SIZE(__kernel_loff_t);
CHECK_TYPE_SIZE(__kernel_fd_set);
#endif
#endif
#if !SANITIZER_ANDROID
CHECK_TYPE_SIZE(wordexp_t);
@ -882,4 +891,52 @@ CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
#endif
CHECK_TYPE_SIZE(tm);
CHECK_SIZE_AND_OFFSET(tm, tm_sec);
CHECK_SIZE_AND_OFFSET(tm, tm_min);
CHECK_SIZE_AND_OFFSET(tm, tm_hour);
CHECK_SIZE_AND_OFFSET(tm, tm_mday);
CHECK_SIZE_AND_OFFSET(tm, tm_mon);
CHECK_SIZE_AND_OFFSET(tm, tm_year);
CHECK_SIZE_AND_OFFSET(tm, tm_wday);
CHECK_SIZE_AND_OFFSET(tm, tm_yday);
CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
CHECK_SIZE_AND_OFFSET(tm, tm_zone);
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(mntent);
CHECK_SIZE_AND_OFFSET(mntent, mnt_fsname);
CHECK_SIZE_AND_OFFSET(mntent, mnt_dir);
CHECK_SIZE_AND_OFFSET(mntent, mnt_type);
CHECK_SIZE_AND_OFFSET(mntent, mnt_opts);
CHECK_SIZE_AND_OFFSET(mntent, mnt_freq);
CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
#endif
CHECK_TYPE_SIZE(ether_addr);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ipc_perm);
CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
#endif
CHECK_TYPE_SIZE(clock_t);
#endif // SANITIZER_LINUX || SANITIZER_MAC

View File

@ -13,19 +13,22 @@
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
#if !SANITIZER_IOS
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
extern unsigned struct_tm_sz;
extern unsigned struct_passwd_sz;
extern unsigned struct_group_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
extern unsigned pthread_t_sz;
extern unsigned pthread_cond_t_sz;
extern unsigned pid_t_sz;
extern unsigned timeval_sz;
extern unsigned uid_t_sz;
@ -35,30 +38,52 @@ namespace __sanitizer {
extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
#if !SANITIZER_ANDROID
extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID
#if SANITIZER_LINUX
extern unsigned struct___old_kernel_stat_sz;
extern unsigned struct_kernel_stat_sz;
extern unsigned struct_kernel_stat64_sz;
extern unsigned struct_io_event_sz;
extern unsigned struct_iocb_sz;
#if defined(__x86_64__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 96;
#elif defined(__arm__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc__) && !defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 32;
const unsigned struct_kernel_stat_sz = 72;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned size;
// More fields that vary with the kernel version.
};
extern unsigned struct_utimbuf_sz;
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
extern unsigned struct_msqid_ds_sz;
extern unsigned struct_shmid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_perf_event_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz;
extern unsigned struct_timespec_sz;
@ -67,6 +92,32 @@ namespace __sanitizer {
const unsigned old_sigset_t_sz = sizeof(unsigned long);
const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
struct __sanitizer_iocb {
u64 aio_data;
u32 aio_key_or_aio_reserved1; // Simply crazy.
u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
u16 aio_lio_opcode;
s16 aio_reqprio;
u32 aio_fildes;
u64 aio_buf;
u64 aio_nbytes;
s64 aio_offset;
u64 aio_reserved2;
u64 aio_reserved3;
};
struct __sanitizer_io_event {
u64 data;
u64 obj;
u64 res;
u64 res2;
};
const unsigned iocb_cmd_pread = 0;
const unsigned iocb_cmd_pwrite = 1;
const unsigned iocb_cmd_preadv = 7;
const unsigned iocb_cmd_pwritev = 8;
struct __sanitizer___sysctl_args {
int *name;
int nlen;
@ -80,8 +131,55 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statfs64_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_statvfs_sz;
extern unsigned struct_statvfs64_sz;
struct __sanitizer_ipc_perm {
int __key;
int uid;
int gid;
int cuid;
int cgid;
#ifdef __powerpc64__
unsigned mode;
unsigned __seq;
#else
unsigned short mode;
unsigned short __pad1;
unsigned short __seq;
unsigned short __pad2;
#endif
uptr __unused1;
uptr __unused2;
};
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
#ifndef __powerpc__
uptr shm_segsz;
#endif
uptr shm_atime;
#ifndef _LP64
uptr __unused1;
#endif
uptr shm_dtime;
#ifndef _LP64
uptr __unused2;
#endif
uptr shm_ctime;
#ifndef _LP64
uptr __unused3;
#endif
#ifdef __powerpc__
uptr shm_segsz;
#endif
int shm_cpid;
int shm_lpid;
uptr shm_nattch;
uptr __unused4;
uptr __unused5;
};
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
void *iov_base;
@ -94,6 +192,35 @@ namespace __sanitizer {
typedef unsigned __sanitizer_pthread_key_t;
#endif
struct __sanitizer_ether_addr {
u8 octet[6];
};
struct __sanitizer_tm {
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
int tm_year;
int tm_wday;
int tm_yday;
int tm_isdst;
long int tm_gmtoff;
const char *tm_zone;
};
#if SANITIZER_LINUX
struct __sanitizer_mntent {
char *mnt_fsname;
char *mnt_dir;
char *mnt_type;
char *mnt_opts;
int mnt_freq;
int mnt_passno;
};
#endif
#if SANITIZER_ANDROID || SANITIZER_MAC
struct __sanitizer_msghdr {
void *msg_name;
@ -158,6 +285,8 @@ namespace __sanitizer {
};
#endif
typedef long __sanitizer_clock_t;
#if SANITIZER_LINUX
#if defined(_LP64) || defined(__x86_64__)
typedef unsigned __sanitizer___kernel_uid_t;
@ -168,8 +297,15 @@ namespace __sanitizer {
typedef unsigned short __sanitizer___kernel_gid_t;
typedef long __sanitizer___kernel_off_t;
#endif
#if defined(__powerpc64__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t;
#endif
typedef long long __sanitizer___kernel_loff_t;
typedef struct {
unsigned long fds_bits[1024 / (8 * sizeof(long))];
@ -207,6 +343,20 @@ namespace __sanitizer {
#endif
};
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
};
struct __sanitizer_kernel_sigaction_t {
union {
void (*sigaction)(int signo, void *info, void *ctx);
void (*handler)(int signo);
};
unsigned long sa_flags;
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
extern uptr sig_ign;
extern uptr sig_dfl;
extern uptr sa_siginfo;
@ -297,6 +447,9 @@ namespace __sanitizer {
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
extern int ptrace_peektext;
extern int ptrace_peekdata;
extern int ptrace_peekuser;
extern int ptrace_getregs;
extern int ptrace_setregs;
extern int ptrace_getfpregs;
@ -309,6 +462,15 @@ namespace __sanitizer {
extern int ptrace_setregset;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_shminfo_sz;
extern unsigned struct_shm_info_sz;
extern int shmctl_ipc_stat;
extern int shmctl_ipc_info;
extern int shmctl_shm_info;
extern int shmctl_shm_stat;
#endif
// ioctl arguments
struct __sanitizer_ifconf {
int ifc_len;
@ -390,7 +552,7 @@ namespace __sanitizer {
extern unsigned struct_unimapinit_sz;
#endif
#if !SANITIZER_ANDROID
#if !SANITIZER_ANDROID && !SANITIZER_MAC
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
#endif
@ -445,7 +607,7 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSPGRP;
extern unsigned IOCTL_TIOCSTI;
extern unsigned IOCTL_TIOCSWINSZ;
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
extern unsigned IOCTL_SIOCGETSGCNT;
extern unsigned IOCTL_SIOCGETVIFCNT;
#endif
@ -807,6 +969,25 @@ namespace __sanitizer {
extern unsigned IOCTL_TIOCSERSETMULTI;
extern unsigned IOCTL_TIOCSSERIAL;
#endif
extern const int errno_EOWNERDEAD;
} // namespace __sanitizer
#define CHECK_TYPE_SIZE(TYPE) \
COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
sizeof(((struct CLASS *) NULL)->MEMBER)); \
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
#endif

View File

@ -87,28 +87,6 @@ int internal_isatty(fd_t fd) {
return isatty(fd);
}
#ifndef SANITIZER_GO
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom, bool fast) {
#if !SANITIZER_CAN_FAST_UNWIND
fast = false;
#endif
#if SANITIZER_MAC
// Always unwind fast on Mac.
(void)fast;
#else
if (!fast)
return stack->SlowUnwindStack(pc, max_s);
#endif // SANITIZER_MAC
stack->size = 0;
stack->trace[0] = pc;
if (max_s > 1) {
stack->max_size = max_s;
stack->FastUnwindStack(pc, bp, stack_top, stack_bottom);
}
}
#endif // SANITIZER_GO
} // namespace __sanitizer
#endif

View File

@ -193,17 +193,22 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) {
PrintfAndReportCallback = callback;
}
#if SANITIZER_SUPPORTS_WEAK_HOOKS
// Can be overriden in frontend.
#if SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
(void)str;
}
#endif
static void CallPrintfAndReportCallback(const char *str) {
#if SANITIZER_SUPPORTS_WEAK_HOOKS
if (&OnPrint != NULL)
OnPrint(str);
#endif
OnPrint(str);
if (PrintfAndReportCallback)
PrintfAndReportCallback(str);
}
@ -287,4 +292,13 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size());
va_list args;
va_start(args, format);
VSNPrintf(data() + length_, size() - length_, format, args);
va_end(args);
length_ += internal_strlen(data() + length_);
}
} // namespace __sanitizer

View File

@ -24,13 +24,15 @@ namespace __sanitizer {
template<typename Node> class QuarantineCache;
struct QuarantineBatch {
static const uptr kSize = 1024;
static const uptr kSize = 1021;
QuarantineBatch *next;
uptr size;
uptr count;
void *batch[kSize];
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
// The callback interface is:
// void Callback::Recycle(Node *ptr);
// void *cb.Allocate(uptr size);
@ -121,8 +123,10 @@ class QuarantineCache {
}
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
AllocBatch(cb);
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
}
QuarantineBatch *b = list_.back();
b->batch[b->count++] = ptr;
b->size += size;
@ -145,9 +149,7 @@ class QuarantineCache {
return 0;
QuarantineBatch *b = list_.front();
list_.pop_front();
// FIXME: should probably add SizeSub method?
// See https://code.google.com/p/thread-sanitizer/issues/detail?id=20
SizeAdd(0 - b->size);
SizeSub(b->size);
return b;
}
@ -158,6 +160,9 @@ class QuarantineCache {
void SizeAdd(uptr add) {
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
}
NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));

View File

@ -10,21 +10,13 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix) {
if (filepath == 0) return 0;
const char *prefix_beg = internal_strstr(filepath, strip_file_prefix);
if (prefix_beg)
return prefix_beg + internal_strlen(strip_file_prefix);
return filepath;
}
// ----------------------- StackTrace ----------------------------- {{{1
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#ifdef __arm__
// Cancel Thumb bit.
@ -40,32 +32,21 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#endif
}
static void PrintStackFramePrefix(uptr frame_num, uptr pc) {
Printf(" #%zu 0x%zx", frame_num, pc);
}
static void PrintSourceLocation(const char *file, int line, int column,
const char *strip_file_prefix) {
CHECK(file);
Printf(" %s", StripPathPrefix(file, strip_file_prefix));
if (line > 0) {
Printf(":%d", line);
if (column > 0)
Printf(":%d", column);
}
}
static void PrintModuleAndOffset(const char *module, uptr offset,
const char *strip_file_prefix) {
Printf(" (%s+0x%zx)", StripPathPrefix(module, strip_file_prefix), offset);
static void PrintStackFramePrefix(InternalScopedString *buffer, uptr frame_num,
uptr pc) {
buffer->append(" #%zu 0x%zx", frame_num, pc);
}
void StackTrace::PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix,
SymbolizeCallback symbolize_callback ) {
SymbolizeCallback symbolize_callback) {
if (addr == 0 || size == 0) {
Printf(" <empty stack>\n\n");
return;
}
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
InternalScopedBuffer<AddressInfo> addr_frames(64);
InternalScopedString frame_desc(GetPageSizeCached() * 2);
uptr frame_num = 0;
for (uptr i = 0; i < size && addr[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
@ -76,50 +57,60 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
if (symbolize_callback) {
if (symbolize_callback((void*)pc, buff.data(), buff.size())) {
addr_frames_num = 1;
PrintStackFramePrefix(frame_num, pc);
frame_desc.clear();
PrintStackFramePrefix(&frame_desc, frame_num, pc);
// We can't know anything about the string returned by external
// symbolizer, but if it starts with filename, try to strip path prefix
// from it.
Printf(" %s\n", StripPathPrefix(buff.data(), strip_file_prefix));
frame_desc.append(
" %s",
StripPathPrefix(buff.data(), common_flags()->strip_path_prefix));
Printf("%s\n", frame_desc.data());
frame_num++;
}
}
if (symbolize && addr_frames_num == 0 && &getSymbolizer) {
if (common_flags()->symbolize && addr_frames_num == 0) {
// Use our own (online) symbolizer, if necessary.
addr_frames_num = getSymbolizer()->SymbolizeCode(
pc, addr_frames.data(), addr_frames.size());
if (Symbolizer *sym = Symbolizer::GetOrNull())
addr_frames_num =
sym->SymbolizeCode(pc, addr_frames.data(), addr_frames.size());
for (uptr j = 0; j < addr_frames_num; j++) {
AddressInfo &info = addr_frames[j];
PrintStackFramePrefix(frame_num, pc);
frame_desc.clear();
PrintStackFramePrefix(&frame_desc, frame_num, pc);
if (info.function) {
Printf(" in %s", info.function);
frame_desc.append(" in %s", info.function);
}
if (info.file) {
PrintSourceLocation(info.file, info.line, info.column,
strip_file_prefix);
frame_desc.append(" ");
PrintSourceLocation(&frame_desc, info.file, info.line, info.column);
} else if (info.module) {
PrintModuleAndOffset(info.module, info.module_offset,
strip_file_prefix);
frame_desc.append(" ");
PrintModuleAndOffset(&frame_desc, info.module, info.module_offset);
}
Printf("\n");
info.Clear();
Printf("%s\n", frame_desc.data());
frame_num++;
info.Clear();
}
}
if (addr_frames_num == 0) {
// If online symbolization failed, try to output at least module and
// offset for instruction.
PrintStackFramePrefix(frame_num, pc);
frame_desc.clear();
PrintStackFramePrefix(&frame_desc, frame_num, pc);
uptr offset;
if (proc_maps.GetObjectNameAndOffset(pc, &offset,
buff.data(), buff.size(),
/* protection */0)) {
PrintModuleAndOffset(buff.data(), offset, strip_file_prefix);
frame_desc.append(" ");
PrintModuleAndOffset(&frame_desc, buff.data(), offset);
}
Printf("\n");
Printf("%s\n", frame_desc.data());
frame_num++;
}
}
// Always print a trailing empty line after stack trace.
Printf("\n");
}
uptr StackTrace::GetCurrentPc() {
@ -127,8 +118,13 @@ uptr StackTrace::GetCurrentPc() {
}
void StackTrace::FastUnwindStack(uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom) {
CHECK(size == 0 && trace[0] == pc);
uptr stack_top, uptr stack_bottom,
uptr max_depth) {
if (max_depth == 0) {
size = 0;
return;
}
trace[0] = pc;
size = 1;
uhwptr *frame = (uhwptr *)bp;
uhwptr *prev_frame = frame - 1;
@ -138,7 +134,7 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp,
frame < (uhwptr *)stack_top - 2 &&
frame > (uhwptr *)stack_bottom &&
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_size) {
size < max_depth) {
uhwptr pc1 = frame[1];
if (pc1 != pc) {
trace[size++] = (uptr) pc1;
@ -156,111 +152,19 @@ void StackTrace::PopStackFrames(uptr count) {
}
}
// On 32-bits we don't compress stack traces.
// On 64-bits we compress stack traces: if a given pc differes slightly from
// the previous one, we record a 31-bit offset instead of the full pc.
SANITIZER_INTERFACE_ATTRIBUTE
uptr StackTrace::CompressStack(StackTrace *stack, u32 *compressed, uptr size) {
#if SANITIZER_WORDSIZE == 32
// Don't compress, just copy.
uptr res = 0;
for (uptr i = 0; i < stack->size && i < size; i++) {
compressed[i] = stack->trace[i];
res++;
}
if (stack->size < size)
compressed[stack->size] = 0;
#else // 64 bits, compress.
uptr prev_pc = 0;
const uptr kMaxOffset = (1ULL << 30) - 1;
uptr c_index = 0;
uptr res = 0;
for (uptr i = 0, n = stack->size; i < n; i++) {
uptr pc = stack->trace[i];
if (!pc) break;
if ((s64)pc < 0) break;
// Printf("C pc[%zu] %zx\n", i, pc);
if (prev_pc - pc < kMaxOffset || pc - prev_pc < kMaxOffset) {
uptr offset = (s64)(pc - prev_pc);
offset |= (1U << 31);
if (c_index >= size) break;
// Printf("C co[%zu] offset %zx\n", i, offset);
compressed[c_index++] = offset;
} else {
uptr hi = pc >> 32;
uptr lo = (pc << 32) >> 32;
CHECK_EQ((hi & (1 << 31)), 0);
if (c_index + 1 >= size) break;
// Printf("C co[%zu] hi/lo: %zx %zx\n", c_index, hi, lo);
compressed[c_index++] = hi;
compressed[c_index++] = lo;
}
res++;
prev_pc = pc;
}
if (c_index < size)
compressed[c_index] = 0;
if (c_index + 1 < size)
compressed[c_index + 1] = 0;
#endif // SANITIZER_WORDSIZE
// debug-only code
#if 0
StackTrace check_stack;
UncompressStack(&check_stack, compressed, size);
if (res < check_stack.size) {
Printf("res %zu check_stack.size %zu; c_size %zu\n", res,
check_stack.size, size);
}
// |res| may be greater than check_stack.size, because
// UncompressStack(CompressStack(stack)) eliminates the 0x0 frames.
CHECK(res >= check_stack.size);
CHECK_EQ(0, REAL(memcmp)(check_stack.trace, stack->trace,
check_stack.size * sizeof(uptr)));
#endif
return res;
static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
}
SANITIZER_INTERFACE_ATTRIBUTE
void StackTrace::UncompressStack(StackTrace *stack,
u32 *compressed, uptr size) {
#if SANITIZER_WORDSIZE == 32
// Don't uncompress, just copy.
stack->size = 0;
for (uptr i = 0; i < size && i < kStackTraceMax; i++) {
if (!compressed[i]) break;
stack->size++;
stack->trace[i] = compressed[i];
uptr StackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
const int kPcThreshold = 192;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
}
#else // 64 bits, uncompress
uptr prev_pc = 0;
stack->size = 0;
for (uptr i = 0; i < size && stack->size < kStackTraceMax; i++) {
u32 x = compressed[i];
uptr pc = 0;
if (x & (1U << 31)) {
// Printf("U co[%zu] offset: %x\n", i, x);
// this is an offset
s32 offset = x;
offset = (offset << 1) >> 1; // remove the 31-byte and sign-extend.
pc = prev_pc + offset;
CHECK(pc);
} else {
// CHECK(i + 1 < size);
if (i + 1 >= size) break;
uptr hi = x;
uptr lo = compressed[i+1];
// Printf("U co[%zu] hi/lo: %zx %zx\n", i, hi, lo);
i++;
pc = (hi << 32) | lo;
if (!pc) break;
}
// Printf("U pc[%zu] %zx\n", stack->size, pc);
stack->trace[stack->size++] = pc;
prev_pc = pc;
}
#endif // SANITIZER_WORDSIZE
return 0;
}
} // namespace __sanitizer

View File

@ -21,58 +21,55 @@ static const uptr kStackTraceMax = 256;
defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__) || \
defined(__mips__))
#define SANITIZER_CAN_FAST_UNWIND 0
# define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
#else
#define SANITIZER_CAN_FAST_UNWIND 1
# define SANITIZER_CAN_FAST_UNWIND 1
#endif
struct StackTrace {
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
uptr top_frame_bp;
uptr size;
uptr max_size;
uptr trace[kStackTraceMax];
static void PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix,
SymbolizeCallback symbolize_callback);
void CopyTo(uptr *dst, uptr dst_size) {
for (uptr i = 0; i < size && i < dst_size; i++)
dst[i] = trace[i];
for (uptr i = size; i < dst_size; i++)
dst[i] = 0;
}
void CopyFrom(uptr *src, uptr src_size) {
// Prints a symbolized stacktrace, followed by an empty line.
static void PrintStack(const uptr *addr, uptr size,
SymbolizeCallback symbolize_callback = 0);
void CopyFrom(const uptr *src, uptr src_size) {
top_frame_bp = 0;
size = src_size;
if (size > kStackTraceMax) size = kStackTraceMax;
for (uptr i = 0; i < size; i++) {
for (uptr i = 0; i < size; i++)
trace[i] = src[i];
}
}
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
void SlowUnwindStack(uptr pc, uptr max_depth);
static bool WillUseFastUnwind(bool request_fast_unwind) {
// Check if fast unwind is available. Fast unwind is the only option on Mac.
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
else if (SANITIZER_MAC)
return true;
return request_fast_unwind;
}
void PopStackFrames(uptr count);
void Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
static uptr GetCurrentPc();
static uptr GetPreviousInstructionPc(uptr pc);
SANITIZER_INTERFACE_ATTRIBUTE
static uptr CompressStack(StackTrace *stack,
u32 *compressed, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
static void UncompressStack(StackTrace *stack,
u32 *compressed, uptr size);
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
uptr max_depth);
void SlowUnwindStack(uptr pc, uptr max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
};
const char *StripPathPrefix(const char *filepath,
const char *strip_file_prefix);
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom, bool fast);
} // namespace __sanitizer
// Use this macro if you want to print stack trace with the caller

View File

@ -0,0 +1,26 @@
//===-- sanitizer_stacktrace_libcdep.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind) {
if (!WillUseFastUnwind(request_fast_unwind))
SlowUnwindStack(pc, max_depth);
else
FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth);
top_frame_bp = size ? bp : 0;
}
} // namespace __sanitizer

View File

@ -16,6 +16,8 @@
#include "sanitizer_stoptheworld.h"
#include "sanitizer_platform_limits_posix.h"
#include <errno.h>
#include <sched.h> // for CLONE_* definitions
#include <stddef.h>
@ -29,7 +31,16 @@
#endif
#include <sys/wait.h> // for signal-related stuff
#ifdef sa_handler
# undef sa_handler
#endif
#ifdef sa_sigaction
# undef sa_sigaction
#endif
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
#include "sanitizer_mutex.h"
@ -45,30 +56,14 @@
// clone() interface (we want to share the address space with the caller
// process, so we prefer clone() over fork()).
//
// We avoid the use of libc for two reasons:
// We don't use any libc functions, relying instead on direct syscalls. There
// are two reasons for this:
// 1. calling a library function while threads are suspended could cause a
// deadlock, if one of the treads happens to be holding a libc lock;
// 2. it's generally not safe to call libc functions from the tracer task,
// because clone() does not set up a thread-local storage for it. Any
// thread-local variables used by libc will be shared between the tracer task
// and the thread which spawned it.
//
// We deal with this by replacing libc calls with calls to our own
// implementations defined in sanitizer_libc.h and sanitizer_linux.h. However,
// there are still some libc functions which are used here:
//
// * All of the system calls ultimately go through the libc syscall() function.
// We're operating under the assumption that syscall()'s implementation does
// not acquire any locks or use any thread-local data (except for the errno
// variable, which we handle separately).
//
// * We lack custom implementations of sigfillset() and sigaction(), so we use
// the libc versions instead. The same assumptions as above apply.
//
// * It is safe to call libc functions before the cloned thread is spawned or
// after it has exited. The following functions are used in this manner:
// sigdelset()
// sigprocmask()
COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
@ -103,10 +98,11 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
if (common_flags()->verbosity)
Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
return false;
} else {
if (SanitizerVerbosity > 0)
if (common_flags()->verbosity)
Report("Attached to thread %d.\n", thread_id);
// The thread is not guaranteed to stop before ptrace returns, so we must
// wait on it.
@ -116,8 +112,9 @@ bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
if (internal_iserror(waitpid_status, &wperrno)) {
// Got a ECHILD error. I don't think this situation is possible, but it
// doesn't hurt to report it.
Report("Waiting on thread %d failed, detaching (errno %d).\n", thread_id,
wperrno);
if (common_flags()->verbosity)
Report("Waiting on thread %d failed, detaching (errno %d).\n",
thread_id, wperrno);
internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
return false;
}
@ -132,13 +129,14 @@ void ThreadSuspender::ResumeAllThreads() {
int pterrno;
if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
&pterrno)) {
if (SanitizerVerbosity > 0)
if (common_flags()->verbosity)
Report("Detached from thread %d.\n", tid);
} else {
// Either the thread is dead, or we are already detached.
// The latter case is possible, for instance, if this function was called
// from a signal handler.
Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
if (common_flags()->verbosity)
Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
}
}
}
@ -183,15 +181,16 @@ static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
// The tracer thread waits on this mutex while the parent finished its
// The tracer thread waits on this mutex while the parent finishes its
// preparations.
BlockingMutex mutex;
uptr parent_pid;
};
static DieCallbackType old_die_callback;
// Signal handler to wake up suspended threads when the tracer thread dies.
void TracerThreadSignalHandler(int signum, siginfo_t *siginfo, void *) {
void TracerThreadSignalHandler(int signum, void *siginfo, void *) {
if (thread_suspender_instance != NULL) {
if (signum == SIGABRT)
thread_suspender_instance->KillAllThreads();
@ -222,6 +221,11 @@ static int TracerThread(void* argument) {
TracerThreadArgument *tracer_thread_argument =
(TracerThreadArgument *)argument;
internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
// Check if parent is already dead.
if (internal_getppid() != tracer_thread_argument->parent_pid)
internal__exit(4);
// Wait for the parent thread to finish preparations.
tracer_thread_argument->mutex.Lock();
tracer_thread_argument->mutex.Unlock();
@ -244,17 +248,18 @@ static int TracerThread(void* argument) {
// the mask we inherited from the caller thread.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
struct sigaction new_sigaction;
__sanitizer_kernel_sigaction_t new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.sa_sigaction = TracerThreadSignalHandler;
new_sigaction.sigaction = TracerThreadSignalHandler;
new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
sigfillset(&new_sigaction.sa_mask);
sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
internal_sigfillset(&new_sigaction.sa_mask);
internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
Report("Failed suspending threads.\n");
if (common_flags()->verbosity)
Report("Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
@ -292,44 +297,36 @@ class ScopedStackSpaceWithGuard {
uptr guard_start_;
};
NOINLINE static void WipeStack() {
char arr[256];
internal_memset(arr, 0, sizeof(arr));
}
// We have a limitation on the stack frame size, so some stuff had to be moved
// into globals.
static sigset_t blocked_sigset;
static sigset_t old_sigset;
static struct sigaction old_sigactions[ARRAY_SIZE(kUnblockedSignals)];
static __sanitizer_kernel_sigset_t blocked_sigset;
static __sanitizer_kernel_sigset_t old_sigset;
static __sanitizer_kernel_sigaction_t old_sigactions
[ARRAY_SIZE(kUnblockedSignals)];
class StopTheWorldScope {
public:
StopTheWorldScope() {
// Glibc's sigaction() has a side-effect where it copies garbage stack
// values into oldact, which can cause false negatives in LSan. As a quick
// workaround we zero some stack space here.
WipeStack();
// Block all signals that can be blocked safely, and install
// default handlers for the remaining signals.
// We cannot allow user-defined handlers to run while the ThreadSuspender
// thread is active, because they could conceivably call some libc functions
// which modify errno (which is shared between the two threads).
sigfillset(&blocked_sigset);
internal_sigfillset(&blocked_sigset);
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
// Remove the signal from the set of blocked signals.
sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
// Install the default handler.
struct sigaction new_sigaction;
__sanitizer_kernel_sigaction_t new_sigaction;
internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
new_sigaction.sa_handler = SIG_DFL;
sigfillset(&new_sigaction.sa_mask);
sigaction(kUnblockedSignals[signal_index], &new_sigaction,
new_sigaction.handler = SIG_DFL;
internal_sigfillset(&new_sigaction.sa_mask);
internal_sigaction(kUnblockedSignals[signal_index], &new_sigaction,
&old_sigactions[signal_index]);
}
int sigprocmask_status =
sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
// Make this process dumpable. Processes that are not dumpable cannot be
// attached to.
@ -347,10 +344,10 @@ class StopTheWorldScope {
// Restore the signal handlers.
for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
signal_index++) {
sigaction(kUnblockedSignals[signal_index],
internal_sigaction(kUnblockedSignals[signal_index],
&old_sigactions[signal_index], NULL);
}
sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
}
private:
@ -363,6 +360,7 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
struct TracerThreadArgument tracer_thread_argument;
tracer_thread_argument.callback = callback;
tracer_thread_argument.callback_argument = argument;
tracer_thread_argument.parent_pid = internal_getpid();
const uptr kTracerStackSize = 2 * 1024 * 1024;
ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
// Block the execution of TracerThread until after we have set ptrace
@ -375,7 +373,8 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
/* child_tidptr */);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
Report("Failed spawning a tracer thread (errno %d).\n", local_errno);
if (common_flags()->verbosity)
Report("Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
// On some systems we have to explicitly declare that we want to be traced
@ -390,8 +389,11 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
// At this point, any signal will either be blocked or kill us, so waitpid
// should never return (and set errno) while the tracer thread is alive.
uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
if (internal_iserror(waitpid_status, &local_errno))
Report("Waiting on the tracer thread failed (errno %d).\n", local_errno);
if (internal_iserror(waitpid_status, &local_errno)) {
if (common_flags()->verbosity)
Report("Waiting on the tracer thread failed (errno %d).\n",
local_errno);
}
}
}
@ -432,7 +434,8 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index,
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
&pterrno)) {
Report("Could not get registers from thread %d (errno %d).\n",
if (common_flags()->verbosity)
Report("Could not get registers from thread %d (errno %d).\n",
tid, pterrno);
return -1;
}

View File

@ -18,7 +18,7 @@
namespace __sanitizer {
static const char *const kTypeStrings[SuppressionTypeCount] = {
"none", "race", "mutex", "thread", "signal", "leak"
"none", "race", "mutex", "thread", "signal", "leak", "called_from_lib"
};
bool TemplateMatch(char *templ, const char *str) {
@ -127,10 +127,15 @@ void SuppressionContext::Parse(const char *str) {
}
}
uptr SuppressionContext::SuppressionCount() {
uptr SuppressionContext::SuppressionCount() const {
return suppressions_.size();
}
const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
CHECK_LT(i, suppressions_.size());
return &suppressions_[i];
}
void SuppressionContext::GetMatched(
InternalMmapVector<Suppression *> *matched) {
for (uptr i = 0; i < suppressions_.size(); i++)

View File

@ -23,6 +23,7 @@ enum SuppressionType {
SuppressionThread,
SuppressionSignal,
SuppressionLeak,
SuppressionLib,
SuppressionTypeCount
};
@ -38,7 +39,8 @@ class SuppressionContext {
SuppressionContext() : suppressions_(1), can_parse_(true) {}
void Parse(const char *str);
bool Match(const char* str, SuppressionType type, Suppression **s);
uptr SuppressionCount();
uptr SuppressionCount() const;
const Suppression *SuppressionAt(uptr i) const;
void GetMatched(InternalMmapVector<Suppression *> *matched);
private:
@ -50,7 +52,6 @@ class SuppressionContext {
const char *SuppressionTypeString(SuppressionType t);
// Exposed for testing.
bool TemplateMatch(char *templ, const char *str);
} // namespace __sanitizer

View File

@ -0,0 +1,61 @@
//===-- sanitizer_symbolizer.cc -------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
Symbolizer *Symbolizer::symbolizer_;
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
Symbolizer *Symbolizer::GetOrNull() {
SpinMutexLock l(&init_mu_);
return symbolizer_;
}
Symbolizer *Symbolizer::Get() {
SpinMutexLock l(&init_mu_);
RAW_CHECK_MSG(symbolizer_ != 0, "Using uninitialized symbolizer!");
return symbolizer_;
}
Symbolizer *Symbolizer::Disable() {
CHECK_EQ(0, symbolizer_);
// Initialize a dummy symbolizer.
symbolizer_ = new(symbolizer_allocator_) Symbolizer;
return symbolizer_;
}
void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
Symbolizer::EndSymbolizationHook end_hook) {
CHECK(start_hook_ == 0 && end_hook_ == 0);
start_hook_ = start_hook;
end_hook_ = end_hook;
}
Symbolizer::Symbolizer() : start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
: sym_(sym) {
if (sym_->start_hook_)
sym_->start_hook_();
}
Symbolizer::SymbolizerScope::~SymbolizerScope() {
if (sym_->end_hook_)
sym_->end_hook_();
}
} // namespace __sanitizer

View File

@ -5,19 +5,14 @@
//
//===----------------------------------------------------------------------===//
//
// Symbolizer is intended to be used by both
// AddressSanitizer and ThreadSanitizer to symbolize a given
// address. It is an analogue of addr2line utility and allows to map
// instruction address to a location in source code at run-time.
// Symbolizer is used by sanitizers to map instruction address to a location in
// source code at run-time. Symbolizer either uses __sanitizer_symbolize_*
// defined in the program, or (if they are missing) tries to find and
// launch "llvm-symbolizer" commandline tool in a separate process and
// communicate with it.
//
// Symbolizer is planned to use debug information (in DWARF format)
// in a binary via interface defined in "llvm/DebugInfo/DIContext.h"
//
// Symbolizer code should be called from the run-time library of
// dynamic tools, and generally should not call memory allocation
// routines or other system library functions intercepted by those tools.
// Instead, Symbolizer code should use their replacements, defined in
// "compiler-rt/lib/sanitizer_common/sanitizer_libc.h".
// Generally we should try to avoid calling system library functions during
// symbolization (and use their replacements from sanitizer_libc.h instead).
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SYMBOLIZER_H
#define SANITIZER_SYMBOLIZER_H
@ -25,7 +20,6 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
// WARNING: Do not include system headers here. See details above.
namespace __sanitizer {
@ -67,8 +61,24 @@ struct DataInfo {
uptr size;
};
class SymbolizerInterface {
class Symbolizer {
public:
/// Returns platform-specific implementation of Symbolizer. The symbolizer
/// must be initialized (with init or disable) before calling this function.
static Symbolizer *Get();
/// Returns platform-specific implementation of Symbolizer, or null if not
/// initialized.
static Symbolizer *GetOrNull();
/// Returns platform-specific implementation of Symbolizer. Will
/// automatically initialize symbolizer as if by calling Init(0) if needed.
static Symbolizer *GetOrInit();
/// Initialize and return the symbolizer, given an optional path to an
/// external symbolizer. The path argument is only required for legacy
/// reasons as this function will check $PATH for an external symbolizer. Not
/// thread safe.
static Symbolizer *Init(const char* path_to_external = 0);
/// Initialize the symbolizer in a disabled state. Not thread safe.
static Symbolizer *Disable();
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
@ -82,6 +92,9 @@ class SymbolizerInterface {
virtual bool IsAvailable() {
return false;
}
virtual bool IsExternalAvailable() {
return false;
}
// Release internal caches (if any).
virtual void Flush() {}
// Attempts to demangle the provided C++ mangled name.
@ -89,17 +102,42 @@ class SymbolizerInterface {
return name;
}
virtual void PrepareForSandboxing() {}
// Starts external symbolizer program in a subprocess. Sanitizer communicates
// with external symbolizer via pipes. If path_to_symbolizer is NULL or empty,
// tries to look for llvm-symbolizer in PATH.
virtual bool InitializeExternal(const char *path_to_symbolizer) {
return false;
}
};
// Returns platform-specific implementation of SymbolizerInterface. It can't be
// used from multiple threads simultaneously.
SANITIZER_WEAK_ATTRIBUTE SymbolizerInterface *getSymbolizer();
// Allow user to install hooks that would be called before/after Symbolizer
// does the actual file/line info fetching. Specific sanitizers may need this
// to distinguish system library calls made in user code from calls made
// during in-process symbolization.
typedef void (*StartSymbolizationHook)();
typedef void (*EndSymbolizationHook)();
// May be called at most once.
void AddHooks(StartSymbolizationHook start_hook,
EndSymbolizationHook end_hook);
private:
/// Platform-specific function for creating a Symbolizer object.
static Symbolizer *PlatformInit(const char *path_to_external);
/// Create a symbolizer and store it to symbolizer_ without checking if one
/// already exists. Not thread safe.
static Symbolizer *CreateAndStore(const char *path_to_external);
static Symbolizer *symbolizer_;
static StaticSpinMutex init_mu_;
protected:
Symbolizer();
static LowLevelAllocator symbolizer_allocator_;
StartSymbolizationHook start_hook_;
EndSymbolizationHook end_hook_;
class SymbolizerScope {
public:
explicit SymbolizerScope(const Symbolizer *sym);
~SymbolizerScope();
private:
const Symbolizer *sym_;
};
};
} // namespace __sanitizer

View File

@ -0,0 +1,144 @@
//===-- sanitizer_symbolizer_libbacktrace.cc ------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// Libbacktrace implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_symbolizer_libbacktrace.h"
#if SANITIZER_LIBBACKTRACE
# include "backtrace-supported.h"
# if SANITIZER_POSIX && BACKTRACE_SUPPORTED && !BACKTRACE_USES_MALLOC
# include "backtrace.h"
# else
# define SANITIZER_LIBBACKTRACE 0
# endif
#endif
namespace __sanitizer {
#if SANITIZER_LIBBACKTRACE
namespace {
struct SymbolizeCodeData {
AddressInfo *frames;
uptr n_frames;
uptr max_frames;
const char *module_name;
uptr module_offset;
};
extern "C" {
static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
const char *filename, int lineno,
const char *function) {
SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
if (function) {
AddressInfo *info = &cdata->frames[cdata->n_frames++];
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
info->function = internal_strdup(function);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
if (cdata->n_frames == cdata->max_frames)
return 1;
}
return 0;
}
static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
const char *symname, uintptr_t, uintptr_t) {
SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
if (symname) {
AddressInfo *info = &cdata->frames[0];
info->Clear();
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
info->function = internal_strdup(symname);
cdata->n_frames = 1;
}
}
static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
uintptr_t symval, uintptr_t symsize) {
DataInfo *info = (DataInfo *)vdata;
if (symname && symval) {
info->name = internal_strdup(symname);
info->start = symval;
info->size = symsize;
}
}
static void ErrorCallback(void *, const char *, int) {}
} // extern "C"
} // namespace
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
// State created in backtrace_create_state is leaked.
void *state = (void *)(backtrace_create_state("/proc/self/exe", 0,
ErrorCallback, NULL));
if (!state)
return 0;
return new(*alloc) LibbacktraceSymbolizer(state);
}
uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
uptr max_frames,
const char *module_name,
uptr module_offset) {
SymbolizeCodeData data;
data.frames = frames;
data.n_frames = 0;
data.max_frames = max_frames;
data.module_name = module_name;
data.module_offset = module_offset;
backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,
ErrorCallback, &data);
if (data.n_frames)
return data.n_frames;
backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,
ErrorCallback, &data);
return data.n_frames;
}
bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
backtrace_syminfo((backtrace_state *)state_, info->address,
SymbolizeDataCallback, ErrorCallback, info);
return true;
}
#else // SANITIZER_LIBBACKTRACE
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
return 0;
}
uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
uptr max_frames,
const char *module_name,
uptr module_offset) {
(void)state_;
return 0;
}
bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
return false;
}
#endif // SANITIZER_LIBBACKTRACE
} // namespace __sanitizer

View File

@ -0,0 +1,38 @@
//===-- sanitizer_symbolizer_libbacktrace.h -------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// Header for libbacktrace symbolizer.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_common.h"
#include "sanitizer_symbolizer.h"
#ifndef SANITIZER_LIBBACKTRACE
# define SANITIZER_LIBBACKTRACE 0
#endif
namespace __sanitizer {
class LibbacktraceSymbolizer {
public:
static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames,
const char *module_name, uptr module_offset);
bool SymbolizeData(DataInfo *info);
private:
explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
void *state_; // Leaked.
};
} // namespace __sanitizer

View File

@ -0,0 +1,37 @@
//===-- sanitizer_symbolizer_libcdep.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
Symbolizer *Symbolizer::CreateAndStore(const char *path_to_external) {
Symbolizer *platform_symbolizer = PlatformInit(path_to_external);
if (!platform_symbolizer)
return Disable();
symbolizer_ = platform_symbolizer;
return platform_symbolizer;
}
Symbolizer *Symbolizer::Init(const char *path_to_external) {
CHECK_EQ(0, symbolizer_);
return CreateAndStore(path_to_external);
}
Symbolizer *Symbolizer::GetOrInit() {
SpinMutexLock l(&init_mu_);
if (symbolizer_ == 0)
return CreateAndStore(0);
return symbolizer_;
}
} // namespace __sanitizer

View File

@ -19,6 +19,7 @@
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_symbolizer_libbacktrace.h"
#include <errno.h>
#include <stdlib.h>
@ -203,19 +204,49 @@ static const char *ExtractUptr(const char *str, const char *delims,
// <file_name>:<line_number>:<column_number>
// ...
// <empty line>
// ExternalSymbolizer may not be used from two threads simultaneously.
class ExternalSymbolizer {
public:
ExternalSymbolizer(const char *path, int input_fd, int output_fd)
explicit ExternalSymbolizer(const char *path)
: path_(path),
input_fd_(input_fd),
output_fd_(output_fd),
times_restarted_(0) {
input_fd_(kInvalidFd),
output_fd_(kInvalidFd),
times_restarted_(0),
failed_to_start_(false) {
CHECK(path_);
CHECK_NE(input_fd_, kInvalidFd);
CHECK_NE(output_fd_, kInvalidFd);
CHECK_NE(path[0], '\0');
}
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
// Start or restart symbolizer if we failed to send command to it.
if (char *res = SendCommandImpl(is_data, module_name, module_offset))
return res;
Restart();
}
if (!failed_to_start_) {
Report("WARNING: Failed to use and restart external symbolizer!\n");
failed_to_start_ = true;
}
return 0;
}
void Flush() {
}
private:
bool Restart() {
if (input_fd_ != kInvalidFd)
internal_close(input_fd_);
if (output_fd_ != kInvalidFd)
internal_close(output_fd_);
return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
}
char *SendCommandImpl(bool is_data, const char *module_name,
uptr module_offset) {
if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
return 0;
CHECK(module_name);
internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
is_data ? "DATA " : "", module_name, module_offset);
@ -226,18 +257,6 @@ class ExternalSymbolizer {
return buffer_;
}
bool Restart() {
if (times_restarted_ >= kMaxTimesRestarted) return false;
times_restarted_++;
internal_close(input_fd_);
internal_close(output_fd_);
return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_);
}
void Flush() {
}
private:
bool readFromSymbolizer(char *buffer, uptr max_length) {
if (max_length == 0)
return true;
@ -281,10 +300,9 @@ class ExternalSymbolizer {
static const uptr kMaxTimesRestarted = 5;
uptr times_restarted_;
bool failed_to_start_;
};
static LowLevelAllocator symbolizer_allocator; // Linker initialized.
#if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
@ -304,11 +322,10 @@ class InternalSymbolizer {
public:
typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int);
static InternalSymbolizer *get() {
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
if (__sanitizer_symbolize_code != 0 &&
__sanitizer_symbolize_data != 0) {
void *mem = symbolizer_allocator.Allocate(sizeof(InternalSymbolizer));
return new(mem) InternalSymbolizer();
return new(*alloc) InternalSymbolizer();
}
return 0;
}
@ -355,7 +372,7 @@ class InternalSymbolizer {
class InternalSymbolizer {
public:
static InternalSymbolizer *get() { return 0; }
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
return 0;
}
@ -365,11 +382,18 @@ class InternalSymbolizer {
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
class Symbolizer : public SymbolizerInterface {
// This class has no constructor, as global constructors are forbidden in
// sanitizer_common. It should be linker initialized instead.
class POSIXSymbolizer : public Symbolizer {
public:
POSIXSymbolizer(ExternalSymbolizer *external_symbolizer,
InternalSymbolizer *internal_symbolizer,
LibbacktraceSymbolizer *libbacktrace_symbolizer)
: Symbolizer(),
external_symbolizer_(external_symbolizer),
internal_symbolizer_(internal_symbolizer),
libbacktrace_symbolizer_(libbacktrace_symbolizer) {}
uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) {
BlockingMutexLock l(&mu_);
if (max_frames == 0)
return 0;
LoadedModule *module = FindModuleForAddress(addr);
@ -377,9 +401,17 @@ class Symbolizer : public SymbolizerInterface {
return 0;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
// First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
uptr res = libbacktrace_symbolizer_->SymbolizeCode(
addr, frames, max_frames, module_name, module_offset);
if (res > 0)
return res;
}
const char *str = SendCommand(false, module_name, module_offset);
if (str == 0) {
// External symbolizer was not initialized or failed. Fill only data
// Symbolizer was not initialized or failed. Fill only data
// about module name and offset.
AddressInfo *info = &frames[0];
info->Clear();
@ -430,6 +462,7 @@ class Symbolizer : public SymbolizerInterface {
}
bool SymbolizeData(uptr addr, DataInfo *info) {
BlockingMutexLock l(&mu_);
LoadedModule *module = FindModuleForAddress(addr);
if (module == 0)
return false;
@ -439,6 +472,11 @@ class Symbolizer : public SymbolizerInterface {
info->address = addr;
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
if (libbacktrace_symbolizer_->SymbolizeData(info))
return true;
}
const char *str = SendCommand(true, module_name, module_offset);
if (str == 0)
return true;
@ -449,42 +487,38 @@ class Symbolizer : public SymbolizerInterface {
return true;
}
bool InitializeExternal(const char *path_to_symbolizer) {
if (!path_to_symbolizer || path_to_symbolizer[0] == '\0') {
path_to_symbolizer = FindPathToBinary("llvm-symbolizer");
if (!path_to_symbolizer)
return false;
}
int input_fd, output_fd;
if (!StartSymbolizerSubprocess(path_to_symbolizer, &input_fd, &output_fd))
return false;
void *mem = symbolizer_allocator.Allocate(sizeof(ExternalSymbolizer));
external_symbolizer_ = new(mem) ExternalSymbolizer(path_to_symbolizer,
input_fd, output_fd);
return true;
bool IsAvailable() {
return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
libbacktrace_symbolizer_ != 0;
}
bool IsAvailable() {
if (internal_symbolizer_ == 0)
internal_symbolizer_ = InternalSymbolizer::get();
return internal_symbolizer_ || external_symbolizer_;
bool IsExternalAvailable() {
return external_symbolizer_ != 0;
}
void Flush() {
if (internal_symbolizer_)
BlockingMutexLock l(&mu_);
if (internal_symbolizer_ != 0) {
SymbolizerScope sym_scope(this);
internal_symbolizer_->Flush();
if (external_symbolizer_)
}
if (external_symbolizer_ != 0)
external_symbolizer_->Flush();
}
const char *Demangle(const char *name) {
if (IsAvailable() && internal_symbolizer_ != 0)
BlockingMutexLock l(&mu_);
// Run hooks even if we don't use internal symbolizer, as cxxabi
// demangle may call system functions.
SymbolizerScope sym_scope(this);
if (internal_symbolizer_ != 0)
return internal_symbolizer_->Demangle(name);
return DemangleCXXABI(name);
}
void PrepareForSandboxing() {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
BlockingMutexLock l(&mu_);
// Cache /proc/self/exe on Linux.
CacheBinaryName();
#endif
@ -492,41 +526,26 @@ class Symbolizer : public SymbolizerInterface {
private:
char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
mu_.CheckLocked();
// First, try to use internal symbolizer.
if (!IsAvailable()) {
return 0;
}
if (internal_symbolizer_) {
SymbolizerScope sym_scope(this);
return internal_symbolizer_->SendCommand(is_data, module_name,
module_offset);
}
// Otherwise, fall back to external symbolizer.
if (external_symbolizer_ == 0) {
ReportExternalSymbolizerError(
"WARNING: Trying to symbolize code, but external "
"symbolizer is not initialized!\n");
return 0;
}
for (;;) {
char *reply = external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
if (reply)
return reply;
// Try to restart symbolizer subprocess. If we don't succeed, forget
// about it and don't try to use it later.
if (!external_symbolizer_->Restart()) {
ReportExternalSymbolizerError(
"WARNING: Failed to use and restart external symbolizer!\n");
external_symbolizer_ = 0;
return 0;
}
if (external_symbolizer_) {
return external_symbolizer_->SendCommand(is_data, module_name,
module_offset);
}
return 0;
}
LoadedModule *FindModuleForAddress(uptr address) {
mu_.CheckLocked();
bool modules_were_reloaded = false;
if (modules_ == 0 || !modules_fresh_) {
modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
modules_ = (LoadedModule*)(symbolizer_allocator_.Allocate(
kMaxNumberOfModuleContexts * sizeof(LoadedModule)));
CHECK(modules_);
n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts,
@ -553,41 +572,40 @@ class Symbolizer : public SymbolizerInterface {
return 0;
}
void ReportExternalSymbolizerError(const char *msg) {
// Don't use atomics here for now, as SymbolizeCode can't be called
// from multiple threads anyway.
static bool reported;
if (!reported) {
Report(msg);
reported = true;
}
}
// 16K loaded modules should be enough for everyone.
static const uptr kMaxNumberOfModuleContexts = 1 << 14;
LoadedModule *modules_; // Array of module descriptions is leaked.
uptr n_modules_;
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
BlockingMutex mu_;
ExternalSymbolizer *external_symbolizer_; // Leaked.
InternalSymbolizer *internal_symbolizer_; // Leaked.
ExternalSymbolizer *external_symbolizer_; // Leaked.
InternalSymbolizer *const internal_symbolizer_; // Leaked.
LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
};
static ALIGNED(64) char symbolizer_placeholder[sizeof(Symbolizer)];
static Symbolizer *symbolizer;
Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
InternalSymbolizer* internal_symbolizer =
InternalSymbolizer::get(&symbolizer_allocator_);
ExternalSymbolizer *external_symbolizer = 0;
LibbacktraceSymbolizer *libbacktrace_symbolizer = 0;
SymbolizerInterface *getSymbolizer() {
static atomic_uint8_t initialized;
static StaticSpinMutex init_mu;
if (atomic_load(&initialized, memory_order_acquire) == 0) {
SpinMutexLock l(&init_mu);
if (atomic_load(&initialized, memory_order_relaxed) == 0) {
symbolizer = new(symbolizer_placeholder) Symbolizer();
atomic_store(&initialized, 1, memory_order_release);
if (!internal_symbolizer) {
libbacktrace_symbolizer =
LibbacktraceSymbolizer::get(&symbolizer_allocator_);
if (!libbacktrace_symbolizer) {
// Find path to llvm-symbolizer if it's not provided.
if (!path_to_external)
path_to_external = FindPathToBinary("llvm-symbolizer");
if (path_to_external && path_to_external[0] != '\0')
external_symbolizer = new(symbolizer_allocator_)
ExternalSymbolizer(path_to_external);
}
}
return symbolizer;
return new(symbolizer_allocator_) POSIXSymbolizer(
external_symbolizer, internal_symbolizer, libbacktrace_symbolizer);
}
} // namespace __sanitizer

View File

@ -12,16 +12,11 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
static SymbolizerInterface win_symbolizer; // Linker initialized.
SymbolizerInterface *getSymbolizer() {
return &win_symbolizer;
}
Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) { return 0; }
} // namespace __sanitizer

View File

@ -11,7 +11,8 @@
static uptr internal_syscall(u64 nr) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11");
asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
"memory", "cc");
return retval;
}
@ -19,7 +20,7 @@ template <typename T1>
static uptr internal_syscall(u64 nr, T1 arg1) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1) :
"rcx", "r11");
"rcx", "r11", "memory", "cc");
return retval;
}
@ -27,7 +28,7 @@ template <typename T1, typename T2>
static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
"S"((u64)arg2) : "rcx", "r11");
"S"((u64)arg2) : "rcx", "r11", "memory", "cc");
return retval;
}
@ -35,7 +36,7 @@ template <typename T1, typename T2, typename T3>
static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) {
u64 retval;
asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
"S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11");
"S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11", "memory", "cc");
return retval;
}
@ -45,7 +46,7 @@ static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) {
asm volatile("mov %5, %%r10;"
"syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
"S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4) :
"rcx", "r11", "r10");
"rcx", "r11", "r10", "memory", "cc");
return retval;
}
@ -57,7 +58,7 @@ static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
"mov %6, %%r8;"
"syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
"S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5) :
"rcx", "r11", "r10", "r8");
"rcx", "r11", "r10", "r8", "memory", "cc");
return retval;
}
@ -71,7 +72,8 @@ static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
"mov %7, %%r9;"
"syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
"S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5),
"r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9");
"r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9",
"memory", "cc");
return retval;
}

View File

@ -198,6 +198,18 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
tctx->SetName(name);
}
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
BlockingMutexLock l(&mtx_);
for (u32 tid = 0; tid < n_contexts_; tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && tctx->user_id == user_id &&
tctx->status != ThreadStatusInvalid) {
tctx->SetName(name);
return;
}
}
}
void ThreadRegistry::DetachThread(u32 tid) {
BlockingMutexLock l(&mtx_);
CHECK_LT(tid, n_contexts_);

View File

@ -107,6 +107,7 @@ class ThreadRegistry {
ThreadContextBase *FindThreadContextByOsIDLocked(uptr os_id);
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
void DetachThread(u32 tid);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);

View File

@ -213,7 +213,7 @@ u64 NanoTime() {
void Abort() {
abort();
_exit(-1); // abort is not NORETURN on Windows.
internal__exit(-1); // abort is not NORETURN on Windows.
}
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
@ -303,7 +303,7 @@ uptr internal_sched_yield() {
}
void internal__exit(int exitcode) {
_exit(exitcode);
ExitProcess(exitcode);
}
// ---------------------- BlockingMutex ---------------- {{{1
@ -374,31 +374,15 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom, bool fast) {
(void)fast;
(void)stack_top;
(void)stack_bottom;
stack->max_size = max_s;
void *tmp[kStackTraceMax];
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0);
uptr offset = 0;
size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
(void**)trace, 0);
// Skip the RTL frames by searching for the PC in the stacktrace.
// FIXME: this doesn't work well for the malloc/free stacks yet.
for (uptr i = 0; i < cs_ret; i++) {
if (pc != (uptr)tmp[i])
continue;
offset = i;
break;
}
stack->size = cs_ret - offset;
for (uptr i = 0; i < stack->size; i++)
stack->trace[i] = (uptr)tmp[i + offset];
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
}
void MaybeOpenReportFile() {

View File

@ -23,6 +23,7 @@ tsan_files = \
tsan_rtl.cc \
tsan_stat.cc \
tsan_sync.cc \
tsan_ignoreset.cc \
tsan_interceptors.cc \
tsan_md5.cc \
tsan_platform_mac.cc \

View File

@ -84,12 +84,12 @@ libtsan_la_DEPENDENCIES = \
am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
tsan_report.lo tsan_rtl_thread.lo tsan_symbolize.lo \
tsan_flags.lo tsan_interface.lo tsan_platform_linux.lo \
tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_interceptors.lo \
tsan_md5.lo tsan_platform_mac.lo tsan_rtl_mutex.lo \
tsan_suppressions.lo tsan_interface_ann.lo tsan_mman.lo \
tsan_rtl_report.lo tsan_fd.lo tsan_interface_java.lo \
tsan_mutexset.lo tsan_symbolize_addr2line_linux.lo \
tsan_rtl_amd64.lo
tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_ignoreset.lo \
tsan_interceptors.lo tsan_md5.lo tsan_platform_mac.lo \
tsan_rtl_mutex.lo tsan_suppressions.lo tsan_interface_ann.lo \
tsan_mman.lo tsan_rtl_report.lo tsan_fd.lo \
tsan_interface_java.lo tsan_mutexset.lo \
tsan_symbolize_addr2line_linux.lo tsan_rtl_amd64.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@ -283,6 +283,7 @@ tsan_files = \
tsan_rtl.cc \
tsan_stat.cc \
tsan_sync.cc \
tsan_ignoreset.cc \
tsan_interceptors.cc \
tsan_md5.cc \
tsan_platform_mac.cc \
@ -417,6 +418,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_ignoreset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@

View File

@ -39,10 +39,8 @@ const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 42;
#ifndef TSAN_GO
const int kShadowStackSize = 4 * 1024;
const int kTraceStackSize = 256;
#endif
const uptr kShadowStackSize = 64 * 1024;
const uptr kTraceStackSize = 256;
#ifdef TSAN_SHADOW_COUNT
# if TSAN_SHADOW_COUNT == 2 \
@ -154,6 +152,7 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
struct ThreadState;
class ThreadContext;
struct Context;
struct ReportStack;
class ReportDesc;

View File

@ -40,6 +40,11 @@ struct FdContext {
static FdContext fdctx;
static bool bogusfd(int fd) {
// Apparently a bogus fd value.
return fd < 0 || fd >= kTableSize;
}
static FdSync *allocsync() {
FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
atomic_store(&s->rc, 1, memory_order_relaxed);
@ -67,6 +72,7 @@ static void unref(ThreadState *thr, uptr pc, FdSync *s) {
}
static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
CHECK_GE(fd, 0);
CHECK_LT(fd, kTableSize);
atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
uptr l1 = atomic_load(pl1, memory_order_consume);
@ -146,6 +152,8 @@ bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
}
void FdAcquire(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
@ -155,6 +163,8 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
}
void FdRelease(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
@ -165,12 +175,16 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
// To catch races between fd usage and close.
MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
@ -185,11 +199,15 @@ void FdClose(ThreadState *thr, uptr pc, int fd) {
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, &fdctx.filesync);
}
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
if (bogusfd(oldfd) || bogusfd(newfd))
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
MemoryRead(thr, pc, (uptr)od, kSizeLog8);
@ -207,32 +225,44 @@ void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, allocsync());
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, 0);
}
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, 0);
}
void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, allocsync());
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
// It can be a UDP socket.
init(thr, pc, fd, &fdctx.socksync);
}
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
if (bogusfd(fd))
return;
// Synchronize connect->accept.
Acquire(thr, pc, (uptr)&fdctx.connectsync);
init(thr, pc, newfd, &fdctx.socksync);
@ -240,12 +270,16 @@ void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
// Synchronize connect->accept.
Release(thr, pc, (uptr)&fdctx.connectsync);
}
void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
init(thr, pc, fd, &fdctx.socksync);
}

View File

@ -24,13 +24,43 @@ Flags *flags() {
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
void OverrideFlags(Flags *f);
extern "C" const char* __tsan_default_options();
#else
SANITIZER_INTERFACE_ATTRIBUTE
void WEAK OverrideFlags(Flags *f) {
(void)f;
}
extern "C" const char *WEAK __tsan_default_options() {
return "";
}
#endif
static void ParseFlags(Flags *f, const char *env) {
ParseFlag(env, &f->enable_annotations, "enable_annotations");
ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
ParseFlag(env, &f->suppress_java, "suppress_java");
ParseFlag(env, &f->report_bugs, "report_bugs");
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
ParseFlag(env, &f->suppressions, "suppressions");
ParseFlag(env, &f->print_suppressions, "print_suppressions");
ParseFlag(env, &f->print_benign, "print_benign");
ParseFlag(env, &f->exitcode, "exitcode");
ParseFlag(env, &f->halt_on_error, "halt_on_error");
ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
ParseFlag(env, &f->profile_memory, "profile_memory");
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb");
ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind");
ParseFlag(env, &f->history_size, "history_size");
ParseFlag(env, &f->io_sync, "io_sync");
}
void InitializeFlags(Flags *f, const char *env) {
internal_memset(f, 0, sizeof(*f));
@ -45,57 +75,35 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
f->strip_path_prefix = "";
f->suppressions = "";
f->print_suppressions = false;
f->print_benign = false;
f->exitcode = 66;
f->halt_on_error = false;
f->log_path = "stderr";
f->atexit_sleep_ms = 1000;
f->verbosity = 0;
f->profile_memory = "";
f->flush_memory_ms = 0;
f->flush_symbolizer_ms = 5000;
f->memory_limit_mb = 0;
f->stop_on_start = false;
f->running_on_valgrind = false;
f->external_symbolizer_path = "";
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
f->io_sync = 1;
f->allocator_may_return_null = false;
SetCommonFlagsDefaults(f);
// Let a frontend override.
OverrideFlags(f);
ParseFlags(f, __tsan_default_options());
ParseCommonFlagsFromString(f, __tsan_default_options());
// Override from command line.
ParseFlag(env, &f->enable_annotations, "enable_annotations");
ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
ParseFlag(env, &f->suppress_java, "suppress_java");
ParseFlag(env, &f->report_bugs, "report_bugs");
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe");
ParseFlag(env, &f->report_atomic_races, "report_atomic_races");
ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics");
ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(env, &f->suppressions, "suppressions");
ParseFlag(env, &f->print_suppressions, "print_suppressions");
ParseFlag(env, &f->print_benign, "print_benign");
ParseFlag(env, &f->exitcode, "exitcode");
ParseFlag(env, &f->halt_on_error, "halt_on_error");
ParseFlag(env, &f->log_path, "log_path");
ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
ParseFlag(env, &f->verbosity, "verbosity");
ParseFlag(env, &f->profile_memory, "profile_memory");
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms");
ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(env, &f->history_size, "history_size");
ParseFlag(env, &f->io_sync, "io_sync");
ParseFlag(env, &f->allocator_may_return_null, "allocator_may_return_null");
ParseFlags(f, env);
ParseCommonFlagsFromString(f, env);
// Copy back to common flags.
*common_flags() = *f;
// Sanity check.
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
@ -113,8 +121,6 @@ void InitializeFlags(Flags *f, const char *env) {
" (must be [0..2])\n");
Die();
}
common_flags()->allocator_may_return_null = f->allocator_may_return_null;
}
} // namespace __tsan

View File

@ -18,9 +18,11 @@
// header may be included in the user code, and shouldn't include
// other headers from TSan or common sanitizer runtime.
#include "sanitizer_common/sanitizer_flags.h"
namespace __tsan {
struct Flags {
struct Flags : CommonFlags {
// Enable dynamic annotations, otherwise they are no-ops.
bool enable_annotations;
// Supress a race report if we've already output another race report
@ -46,8 +48,6 @@ struct Flags {
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;
// Strip that prefix from file paths in reports.
const char *strip_path_prefix;
// Suppressions filename.
const char *suppressions;
// Print matched suppressions at exit.
@ -58,27 +58,22 @@ struct Flags {
int exitcode;
// Exit after first reported error.
bool halt_on_error;
// Write logs to "log_path.pid".
// The special values are "stdout" and "stderr".
// The default is "stderr".
const char *log_path;
// Sleep in main thread before exiting for that many ms
// (useful to catch "at exit" races).
int atexit_sleep_ms;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// If set, periodically write memory profile to that file.
const char *profile_memory;
// Flush shadow memory every X ms.
int flush_memory_ms;
// Flush symbolizer caches every X ms.
int flush_symbolizer_ms;
// Resident memory limit in MB to aim at.
// If the process consumes more memory, then TSan will flush shadow memory.
int memory_limit_mb;
// Stops on start until __tsan_resume() is called (for debugging).
bool stop_on_start;
// Controls whether RunningOnValgrind() returns true or false.
bool running_on_valgrind;
// Path to external symbolizer.
const char *external_symbolizer_path;
// Per-thread history size, controls how many previous memory accesses
// are remembered per thread. Possible values are [0..7].
// history_size=0 amounts to 32K memory accesses. Each next value doubles
@ -90,8 +85,6 @@ struct Flags {
// 1 - reasonable level of synchronization (write->read)
// 2 - global synchronization of all IO operations
int io_sync;
// If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
};
Flags *flags();

View File

@ -0,0 +1,45 @@
//===-- tsan_ignoreset.cc -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "tsan_ignoreset.h"
namespace __tsan {
const uptr IgnoreSet::kMaxSize;
IgnoreSet::IgnoreSet()
: size_() {
}
void IgnoreSet::Add(u32 stack_id) {
if (size_ == kMaxSize)
return;
for (uptr i = 0; i < size_; i++) {
if (stacks_[i] == stack_id)
return;
}
stacks_[size_++] = stack_id;
}
void IgnoreSet::Reset() {
size_ = 0;
}
uptr IgnoreSet::Size() const {
return size_;
}
u32 IgnoreSet::At(uptr i) const {
CHECK_LT(i, size_);
CHECK_LE(size_, kMaxSize);
return stacks_[i];
}
} // namespace __tsan

Some files were not shown because too many files have changed in this diff Show More