libsanitizer: merge from master

Merged revision: f58e0513dd95944b81ce7a6e7b49ba656de7d75f
This commit is contained in:
Martin Liska 2021-05-12 14:37:22 +02:00
parent 810afb0b5f
commit d0fee87e0c
153 changed files with 2538 additions and 1239 deletions

View File

@ -1,4 +1,4 @@
6e7dd1e3e1170080b76b5dcc5716bdd974343233 f58e0513dd95944b81ce7a6e7b49ba656de7d75f
The first line of this file holds the git revision number of the The first line of this file holds the git revision number of the
last merge done from the master library sources. last merge done from the master library sources.

View File

@ -476,7 +476,7 @@ struct Allocator {
return false; return false;
if (m->Beg() != addr) return false; if (m->Beg() != addr) return false;
AsanThread *t = GetCurrentThread(); AsanThread *t = GetCurrentThread();
m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack)); m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
return true; return true;
} }
@ -570,7 +570,7 @@ struct Allocator {
m->SetUsedSize(size); m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log; m->user_requested_alignment_log = user_requested_alignment_log;
m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack)); m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity = uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY); RoundDownTo(size, SHADOW_GRANULARITY);
@ -1183,6 +1183,34 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
m->lsan_tag = __lsan::kIgnored; m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess; return kIgnoreObjectSuccess;
} }
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
// Look for the arg pointer of threads that have been created or are running.
// This is necessary to prevent false positive leaks due to the AsanThread
// holding the only live reference to a heap object. This can happen because
// the `pthread_create()` interceptor doesn't wait for the child thread to
// start before returning and thus loosing the the only live reference to the
// heap object on the stack.
__asan::AsanThreadContext *atctx =
reinterpret_cast<__asan::AsanThreadContext *>(tctx);
__asan::AsanThread *asan_thread = atctx->thread;
// Note ThreadStatusRunning is required because there is a small window where
// the thread status switches to `ThreadStatusRunning` but the `arg` pointer
// still isn't on the stack yet.
if (atctx->status != ThreadStatusCreated &&
atctx->status != ThreadStatusRunning)
return;
uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
if (!thread_arg)
return;
auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
ptrsVec->push_back(thread_arg);
}
} // namespace __lsan } // namespace __lsan
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1

View File

@ -44,11 +44,11 @@ void DescribeThread(AsanThreadContext *context) {
CHECK(context); CHECK(context);
asanThreadRegistry().CheckLocked(); asanThreadRegistry().CheckLocked();
// No need to announce the main thread. // No need to announce the main thread.
if (context->tid == 0 || context->announced) { if (context->tid == kMainTid || context->announced) {
return; return;
} }
context->announced = true; context->announced = true;
InternalScopedString str(1024); InternalScopedString str;
str.append("Thread %s", AsanThreadIdAndName(context).c_str()); str.append("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) { if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n"); str.append(" created by unknown thread\n");
@ -77,7 +77,6 @@ static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
} else if (AddrIsInLowShadow(addr)) { } else if (AddrIsInLowShadow(addr)) {
*shadow_kind = kShadowKindLow; *shadow_kind = kShadowKindLow;
} else { } else {
CHECK(0 && "Address is not in memory and not in shadow?");
return false; return false;
} }
return true; return true;
@ -126,7 +125,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) { static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d; Decorator d;
InternalScopedString str(4096); InternalScopedString str;
str.append("%s", d.Location()); str.append("%s", d.Location());
switch (descr.access_type) { switch (descr.access_type) {
case kAccessTypeLeft: case kAccessTypeLeft:
@ -243,7 +242,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end) else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows"; pos_descr = "underflows";
} }
InternalScopedString str(1024); InternalScopedString str;
str.append(" [%zd, %zd)", var.beg, var_end); str.append(" [%zd, %zd)", var.beg, var_end);
// Render variable name. // Render variable name.
str.append(" '"); str.append(" '");
@ -276,7 +275,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
// Global descriptions // Global descriptions
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) { const __asan_global &g) {
InternalScopedString str(4096); InternalScopedString str;
Decorator d; Decorator d;
str.append("%s", d.Location()); str.append("%s", d.Location());
if (addr < g.beg) { if (addr < g.beg) {
@ -464,7 +463,13 @@ AddressDescription::AddressDescription(uptr addr, uptr access_size,
return; return;
} }
data.kind = kAddressKindWild; data.kind = kAddressKindWild;
addr = 0; data.wild.addr = addr;
data.wild.access_size = access_size;
}
void WildAddressDescription::Print() const {
Printf("Address %p is a wild pointer inside of access range of size %p.\n",
addr, access_size);
} }
void PrintAddressDescription(uptr addr, uptr access_size, void PrintAddressDescription(uptr addr, uptr access_size,

View File

@ -146,6 +146,13 @@ struct StackAddressDescription {
bool GetStackAddressInformation(uptr addr, uptr access_size, bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr); StackAddressDescription *descr);
struct WildAddressDescription {
uptr addr;
uptr access_size;
void Print() const;
};
struct GlobalAddressDescription { struct GlobalAddressDescription {
uptr addr; uptr addr;
// Assume address is close to at most four globals. // Assume address is close to at most four globals.
@ -193,7 +200,7 @@ class AddressDescription {
HeapAddressDescription heap; HeapAddressDescription heap;
StackAddressDescription stack; StackAddressDescription stack;
GlobalAddressDescription global; GlobalAddressDescription global;
uptr addr; WildAddressDescription wild;
}; };
}; };
@ -211,7 +218,7 @@ class AddressDescription {
uptr Address() const { uptr Address() const {
switch (data.kind) { switch (data.kind) {
case kAddressKindWild: case kAddressKindWild:
return data.addr; return data.wild.addr;
case kAddressKindShadow: case kAddressKindShadow:
return data.shadow.addr; return data.shadow.addr;
case kAddressKindHeap: case kAddressKindHeap:
@ -226,7 +233,7 @@ class AddressDescription {
void Print(const char *bug_descr = nullptr) const { void Print(const char *bug_descr = nullptr) const {
switch (data.kind) { switch (data.kind) {
case kAddressKindWild: case kAddressKindWild:
Printf("Address %p is a wild pointer.\n", data.addr); data.wild.Print();
return; return;
case kAddressKindShadow: case kAddressKindShadow:
return data.shadow.Print(); return data.shadow.Print();

View File

@ -343,7 +343,8 @@ void ErrorODRViolation::Print() {
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg); global1.beg);
Printf("%s", d.Default()); Printf("%s", d.Default());
InternalScopedString g1_loc(256), g2_loc(256); InternalScopedString g1_loc;
InternalScopedString g2_loc;
PrintGlobalLocation(&g1_loc, global1); PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2); PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size, Printf(" [1] size=%zd '%s' %s\n", global1.size,
@ -360,7 +361,7 @@ void ErrorODRViolation::Print() {
Report( Report(
"HINT: if you don't care about these errors you may set " "HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n"); "ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg(256); InternalScopedString error_msg;
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(), error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data()); MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data()); ReportErrorSummary(error_msg.data());
@ -554,7 +555,7 @@ static void PrintShadowMemoryForAddress(uptr addr) {
uptr shadow_addr = MemToShadow(addr); uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16; const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str(4096 * 8); InternalScopedString str;
str.append("Shadow bytes around the buggy address:\n"); str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) { for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row; uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;

View File

@ -65,7 +65,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
void FakeStack::Destroy(int tid) { void FakeStack::Destroy(int tid) {
PoisonAll(0); PoisonAll(0);
if (Verbosity() >= 2) { if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50); InternalScopedString str;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id)); NumberOfFrames(stack_size_log(), class_id));

View File

@ -81,7 +81,7 @@ void AsanTSDInit(void (*destructor)(void *tsd)) {
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
static inline size_t AsanThreadMmapSize() { static inline size_t AsanThreadMmapSize() {
return RoundUpTo(sizeof(AsanThread), PAGE_SIZE); return RoundUpTo(sizeof(AsanThread), _zx_system_get_page_size());
} }
struct AsanThread::InitOptions { struct AsanThread::InitOptions {

View File

@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
} }
} }
// Check ODR violation for given global G by checking if it's already poisoned.
// We use this method in case compiler doesn't use private aliases for global
// variables.
static void CheckODRViolationViaPoisoning(const Global *g) {
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
}
// Clang provides two different ways for global variables protection: // Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former // it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to // case we may poison same symbol multiple times, that can help us to
@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
// where two globals with the same name are defined in different modules. // where two globals with the same name are defined in different modules.
if (UseODRIndicator(g)) if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g); CheckODRViolationViaIndicator(g);
else
CheckODRViolationViaPoisoning(g);
} }
if (CanPoisonMemory()) if (CanPoisonMemory())
PoisonRedZones(*g); PoisonRedZones(*g);

View File

@ -191,20 +191,11 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#include "sanitizer_common/sanitizer_common_syscalls.inc" #include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
struct ThreadStartParam {
atomic_uintptr_t t;
atomic_uintptr_t is_registered;
};
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg); AsanThread *t = (AsanThread *)arg;
AsanThread *t = nullptr;
while ((t = reinterpret_cast<AsanThread *>(
atomic_load(&param->t, memory_order_acquire))) == nullptr)
internal_sched_yield();
SetCurrentThread(t); SetCurrentThread(t);
return t->ThreadStart(GetTid(), &param->is_registered); return t->ThreadStart(GetTid());
} }
INTERCEPTOR(int, pthread_create, void *thread, INTERCEPTOR(int, pthread_create, void *thread,
@ -217,9 +208,11 @@ INTERCEPTOR(int, pthread_create, void *thread,
int detached = 0; int detached = 0;
if (attr) if (attr)
REAL(pthread_attr_getdetachstate)(attr, &detached); REAL(pthread_attr_getdetachstate)(attr, &detached);
ThreadStartParam param;
atomic_store(&param.t, 0, memory_order_relaxed); u32 current_tid = GetCurrentTidOrInvalid();
atomic_store(&param.is_registered, 0, memory_order_relaxed); AsanThread *t =
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
int result; int result;
{ {
// Ignore all allocations made by pthread_create: thread stack/TLS may be // Ignore all allocations made by pthread_create: thread stack/TLS may be
@ -229,21 +222,13 @@ INTERCEPTOR(int, pthread_create, void *thread,
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler; __lsan::ScopedInterceptorDisabler disabler;
#endif #endif
result = REAL(pthread_create)(thread, attr, asan_thread_start, &param); result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
} }
if (result == 0) { if (result != 0) {
u32 current_tid = GetCurrentTidOrInvalid(); // If the thread didn't start delete the AsanThread to avoid leaking it.
AsanThread *t = // Note AsanThreadContexts never get destroyed so the AsanThreadContext
AsanThread::Create(start_routine, arg, current_tid, &stack, detached); // that was just created for the AsanThread is wasted.
atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release); t->Destroy();
// Wait until the AsanThread object is initialized and the ThreadRegistry
// entry is in "started" state. One reason for this is that after this
// interceptor exits, the child thread's stack may be the only thing holding
// the |arg| pointer. This may cause LSan to report a leak if leak checking
// happens at a point when the interceptor has already exited, but the stack
// range for the child thread is not yet known.
while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
internal_sched_yield();
} }
return result; return result;
} }

View File

@ -60,7 +60,7 @@ void InitializePlatformInterceptors();
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif #endif
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS #if SANITIZER_GLIBC || SANITIZER_SOLARIS
# define ASAN_INTERCEPT_SWAPCONTEXT 1 # define ASAN_INTERCEPT_SWAPCONTEXT 1
#else #else
# define ASAN_INTERCEPT_SWAPCONTEXT 0 # define ASAN_INTERCEPT_SWAPCONTEXT 0
@ -72,7 +72,7 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT_SIGLONGJMP 0 # define ASAN_INTERCEPT_SIGLONGJMP 0
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
# define ASAN_INTERCEPT___LONGJMP_CHK 1 # define ASAN_INTERCEPT___LONGJMP_CHK 1
#else #else
# define ASAN_INTERCEPT___LONGJMP_CHK 0 # define ASAN_INTERCEPT___LONGJMP_CHK 0
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \ #if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
!SANITIZER_NETBSD !SANITIZER_NETBSD
# define ASAN_INTERCEPT___CXA_THROW 1 # define ASAN_INTERCEPT___CXA_THROW 1
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \ # define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# else
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
# endif
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) # if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 # define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else # else
@ -111,7 +106,7 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT_ATEXIT 0 # define ASAN_INTERCEPT_ATEXIT 0
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
# define ASAN_INTERCEPT___STRDUP 1 # define ASAN_INTERCEPT___STRDUP 1
#else #else
# define ASAN_INTERCEPT___STRDUP 0 # define ASAN_INTERCEPT___STRDUP 0
@ -139,10 +134,10 @@ DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2) DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
#if !SANITIZER_MAC #if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \ #define ASAN_INTERCEPT_FUNC(name) \
do { \ do { \
if (!INTERCEPT_FUNCTION(name)) \ if (!INTERCEPT_FUNCTION(name)) \
VReport(1, "AddressSanitizer: failed to intercept '%s'\n'", #name); \ VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
} while (0) } while (0)
#define ASAN_INTERCEPT_FUNC_VER(name, ver) \ #define ASAN_INTERCEPT_FUNC_VER(name, ver) \
do { \ do { \

View File

@ -55,6 +55,7 @@ extern Elf_Dyn _DYNAMIC;
#else #else
#include <sys/ucontext.h> #include <sys/ucontext.h>
#include <link.h> #include <link.h>
extern ElfW(Dyn) _DYNAMIC[];
#endif #endif
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in // x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
@ -84,7 +85,7 @@ bool IsSystemHeapAddress (uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() { void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static. // This will fail to link with -static.
return &_DYNAMIC; // defined in link.h return &_DYNAMIC;
} }
#if ASAN_PREMAP_SHADOW #if ASAN_PREMAP_SHADOW

View File

@ -72,6 +72,13 @@
// || `[0x2000000000, 0x23ffffffff]` || LowShadow || // || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem || // || `[0x0000000000, 0x1fffffffff]` || LowMem ||
// //
// Default Linux/RISCV64 Sv39 mapping:
// || `[0x1555550000, 0x3fffffffff]` || HighMem ||
// || `[0x0fffffa000, 0x1555555fff]` || HighShadow ||
// || `[0x0effffa000, 0x0fffff9fff]` || ShadowGap ||
// || `[0x0d55550000, 0x0effff9fff]` || LowShadow ||
// || `[0x0000000000, 0x0d5554ffff]` || LowMem ||
//
// Default Linux/AArch64 (39-bit VMA) mapping: // Default Linux/AArch64 (39-bit VMA) mapping:
// || `[0x2000000000, 0x7fffffffff]` || highmem || // || `[0x2000000000, 0x7fffffffff]` || highmem ||
// || `[0x1400000000, 0x1fffffffff]` || highshadow || // || `[0x1400000000, 0x1fffffffff]` || highshadow ||
@ -79,20 +86,6 @@
// || `[0x1000000000, 0x11ffffffff]` || lowshadow || // || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem || // || `[0x0000000000, 0x0fffffffff]` || lowmem ||
// //
// RISC-V has only 38 bits for task size
// Low mem size is set with kRiscv64_ShadowOffset64 in
// compiler-rt/lib/asan/asan_allocator.h and in
// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
// kRiscv64_ShadowOffset64, High mem top border is set with
// GetMaxVirtualAddress() in
// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
// Default Linux/RISCV64 Sv39/Sv48 mapping:
// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
//
// Default Linux/AArch64 (42-bit VMA) mapping: // Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem || // || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || // || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
@ -175,10 +168,10 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kRiscv64_ShadowOffset64 = 0x20000000; static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000

View File

@ -45,7 +45,7 @@ COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
#endif #endif
#undef COMMENT_EXPORT #undef COMMENT_EXPORT
#else #else
#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE #define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
#endif #endif
using namespace __asan; using namespace __asan;

View File

@ -364,7 +364,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
&stack); &stack);
} }
CHECK_LE(end - beg, CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check. FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity); uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity); uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);

View File

@ -56,7 +56,7 @@ bool PlatformUnpoisonStacks() {
if (signal_stack.ss_flags != SS_ONSTACK) if (signal_stack.ss_flags != SS_ONSTACK)
return false; return false;
// Since we're on the signal altnerate stack, we cannot find the DEFAULT // Since we're on the signal alternate stack, we cannot find the DEFAULT
// stack bottom using a local variable. // stack bottom using a local variable.
uptr default_bottom, tls_addr, tls_size, stack_size; uptr default_bottom, tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr, GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,

View File

@ -62,19 +62,9 @@ static void AsanDie() {
} }
} }
static void AsanCheckFailed(const char *file, int line, const char *cond, static void CheckUnwind() {
u64 v1, u64 v2) { GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, stack.Print();
line, cond, (uptr)v1, (uptr)v2);
// Print a stack trace the first time we come here. Otherwise, we probably
// failed a CHECK during symbolization.
static atomic_uint32_t num_calls;
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
PRINT_CURRENT_STACK_CHECK();
}
Die();
} }
// -------------------------- Globals --------------------- {{{1 // -------------------------- Globals --------------------- {{{1
@ -432,7 +422,7 @@ static void AsanInitInternal() {
// Install tool-specific callbacks in sanitizer_common. // Install tool-specific callbacks in sanitizer_common.
AddDieCallback(AsanDie); AddDieCallback(AsanDie);
SetCheckFailedCallback(AsanCheckFailed); SetCheckUnwindCallback(CheckUnwind);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer); SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
__sanitizer_set_report_path(common_flags()->log_path); __sanitizer_set_report_path(common_flags()->log_path);
@ -568,7 +558,7 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) {
type, top, bottom, top - bottom, top - bottom); type, top, bottom, top - bottom, top - bottom);
return; return;
} }
PoisonShadow(bottom, top - bottom, 0); PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
} }
static void UnpoisonDefaultStack() { static void UnpoisonDefaultStack() {

View File

@ -54,9 +54,6 @@ u32 GetMallocContextSize();
#define GET_STACK_TRACE_FATAL_HERE \ #define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_CHECK_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
#define GET_STACK_TRACE_THREAD \ #define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true) GET_STACK_TRACE(kStackTraceMax, true)
@ -71,10 +68,4 @@ u32 GetMallocContextSize();
stack.Print(); \ stack.Print(); \
} }
#define PRINT_CURRENT_STACK_CHECK() \
{ \
GET_STACK_TRACE_CHECK_HERE; \
stack.Print(); \
}
#endif // ASAN_STACK_H #endif // ASAN_STACK_H

View File

@ -100,18 +100,27 @@ void AsanThread::Destroy() {
int tid = this->tid(); int tid = this->tid();
VReport(1, "T%d exited\n", tid); VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack(); bool was_running =
if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack(); (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning);
asanThreadRegistry().FinishThread(tid); if (was_running) {
FlushToDeadThreadStats(&stats_); if (AsanThread *thread = GetCurrentThread())
// We also clear the shadow on thread destruction because CHECK_EQ(this, thread);
// some code may still be executing in later TSD destructors malloc_storage().CommitBack();
// and we don't want it to have any poisoned stack. if (common_flags()->use_sigaltstack)
ClearShadowForThreadStackAndTLS(); UnsetAlternateSignalStack();
DeleteFakeStack(tid); FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
DeleteFakeStack(tid);
} else {
CHECK_NE(this, GetCurrentThread());
}
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached()); uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size); UnmapOrDie(this, size);
DTLS_Destroy(); if (was_running)
DTLS_Destroy();
} }
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
@ -219,7 +228,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
} }
void AsanThread::Init(const InitOptions *options) { void AsanThread::Init(const InitOptions *options) {
DCHECK_NE(tid(), ThreadRegistry::kUnknownTid); DCHECK_NE(tid(), kInvalidTid);
next_stack_top_ = next_stack_bottom_ = 0; next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release); atomic_store(&stack_switching_, false, memory_order_release);
CHECK_EQ(this->stack_size(), 0U); CHECK_EQ(this->stack_size(), 0U);
@ -253,12 +262,9 @@ void AsanThread::Init(const InitOptions *options) {
// SetThreadStackAndTls. // SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
thread_return_t AsanThread::ThreadStart( thread_return_t AsanThread::ThreadStart(tid_t os_id) {
tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init(); Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr); asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
if (signal_thread_is_registered)
atomic_store(signal_thread_is_registered, 1, memory_order_release);
if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
@ -285,11 +291,10 @@ thread_return_t AsanThread::ThreadStart(
AsanThread *CreateMainThread() { AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create( AsanThread *main_thread = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true); /* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread); SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid(), main_thread->ThreadStart(internal_getpid());
/* signal_thread_is_registered */ nullptr);
return main_thread; return main_thread;
} }
@ -300,9 +305,9 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr); DCHECK_EQ(options, nullptr);
uptr tls_size = 0; uptr tls_size = 0;
uptr stack_size = 0; uptr stack_size = 0;
GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_, GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_size); &tls_begin_, &tls_size);
stack_top_ = stack_bottom_ + stack_size; stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size; tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get(); dtls_ = DTLS_Get();
@ -426,7 +431,7 @@ AsanThread *GetCurrentThread() {
// address. We are not entirely sure that we have correct main thread // address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread // limits, so only do this magic on Android, and only if the found thread
// is the main thread. // is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0); AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid);
if (tctx && ThreadStackContainsAddress(tctx, &context)) { if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread); SetCurrentThread(tctx->thread);
return tctx->thread; return tctx->thread;
@ -463,7 +468,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) {
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
AsanThreadContext *context = AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (context && (context->tid == 0)) if (context && (context->tid == kMainTid))
context->os_id = GetTid(); context->os_id = GetTid();
} }

View File

@ -28,7 +28,6 @@ struct DTLS;
namespace __asan { namespace __asan {
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
const u32 kMaxNumberOfThreads = (1 << 22); // 4M const u32 kMaxNumberOfThreads = (1 << 22); // 4M
class AsanThread; class AsanThread;
@ -69,8 +68,7 @@ class AsanThread {
struct InitOptions; struct InitOptions;
void Init(const InitOptions *options = nullptr); void Init(const InitOptions *options = nullptr);
thread_return_t ThreadStart(tid_t os_id, thread_return_t ThreadStart(tid_t os_id);
atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top(); uptr stack_top();
uptr stack_bottom(); uptr stack_bottom();
@ -132,6 +130,8 @@ class AsanThread {
void *extra_spill_area() { return &extra_spill_area_; } void *extra_spill_area() { return &extra_spill_area_; }
void *get_arg() { return arg_; }
private: private:
// NOTE: There is no AsanThread constructor. It is allocated // NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state. // via mmap() and *must* be valid in zero-initialized state.

View File

@ -134,7 +134,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg; AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t); SetCurrentThread(t);
return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); return t->ThreadStart(GetTid());
} }
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security, INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,

View File

@ -14,8 +14,8 @@
#ifndef COMPILERRT_ASSEMBLY_H #ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H #define COMPILERRT_ASSEMBLY_H
#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__) #if defined(__APPLE__) && defined(__aarch64__)
#define SEPARATOR @ #define SEPARATOR %%
#else #else
#define SEPARATOR ; #define SEPARATOR ;
#endif #endif
@ -35,14 +35,14 @@
#define HIDDEN(name) .hidden name #define HIDDEN(name) .hidden name
#define LOCAL_LABEL(name) .L_##name #define LOCAL_LABEL(name) .L_##name
#define FILE_LEVEL_DIRECTIVE #define FILE_LEVEL_DIRECTIVE
#if defined(__arm__) #if defined(__arm__) || defined(__aarch64__)
#define SYMBOL_IS_FUNC(name) .type name,%function #define SYMBOL_IS_FUNC(name) .type name,%function
#else #else
#define SYMBOL_IS_FUNC(name) .type name,@function #define SYMBOL_IS_FUNC(name) .type name,@function
#endif #endif
#define CONST_SECTION .section .rodata #define CONST_SECTION .section .rodata
#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \ #if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
defined(__linux__) defined(__linux__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits #define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else #else
@ -65,6 +65,68 @@
#endif #endif
#if defined(__arm__) || defined(__aarch64__)
#define FUNC_ALIGN \
.text SEPARATOR \
.balign 16 SEPARATOR
#else
#define FUNC_ALIGN
#endif
// BTI and PAC gnu property note
#define NT_GNU_PROPERTY_TYPE_0 5
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
#if defined(__ARM_FEATURE_BTI_DEFAULT)
#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
#else
#define BTI_FLAG 0
#endif
#if __ARM_FEATURE_PAC_DEFAULT & 3
#define PAC_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_PAC
#else
#define PAC_FLAG 0
#endif
#define GNU_PROPERTY(type, value) \
.pushsection .note.gnu.property, "a" SEPARATOR \
.p2align 3 SEPARATOR \
.word 4 SEPARATOR \
.word 16 SEPARATOR \
.word NT_GNU_PROPERTY_TYPE_0 SEPARATOR \
.asciz "GNU" SEPARATOR \
.word type SEPARATOR \
.word 4 SEPARATOR \
.word value SEPARATOR \
.word 0 SEPARATOR \
.popsection
#if BTI_FLAG != 0
#define BTI_C hint #34
#define BTI_J hint #36
#else
#define BTI_C
#define BTI_J
#endif
#if (BTI_FLAG | PAC_FLAG) != 0
#define GNU_PROPERTY_BTI_PAC \
GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
#else
#define GNU_PROPERTY_BTI_PAC
#endif
#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
#define CFI_START .cfi_startproc
#define CFI_END .cfi_endproc
#else
#define CFI_START
#define CFI_END
#endif
#if defined(__arm__) #if defined(__arm__)
// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros: // Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
@ -131,15 +193,24 @@
#define DEFINE_CODE_STATE #define DEFINE_CODE_STATE
#endif #endif
#define GLUE2(a, b) a##b #define GLUE2_(a, b) a##b
#define GLUE(a, b) GLUE2(a, b) #define GLUE(a, b) GLUE2_(a, b)
#define GLUE2(a, b) GLUE2_(a, b)
#define GLUE3_(a, b, c) a##b##c
#define GLUE3(a, b, c) GLUE3_(a, b, c)
#define GLUE4_(a, b, c, d) a##b##c##d
#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name) #define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
#ifdef VISIBILITY_HIDDEN #ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \ #define DECLARE_SYMBOL_VISIBILITY(name) \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR HIDDEN(SYMBOL_NAME(name)) SEPARATOR
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) \
HIDDEN(name) SEPARATOR
#else #else
#define DECLARE_SYMBOL_VISIBILITY(name) #define DECLARE_SYMBOL_VISIBILITY(name)
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name)
#endif #endif
#define DEFINE_COMPILERRT_FUNCTION(name) \ #define DEFINE_COMPILERRT_FUNCTION(name) \
@ -177,6 +248,16 @@
DECLARE_FUNC_ENCODING \ DECLARE_FUNC_ENCODING \
name: name:
#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
DEFINE_CODE_STATE \
FUNC_ALIGN \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
CFI_START SEPARATOR \
DECLARE_FUNC_ENCODING \
name: SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \ #define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \ .globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
@ -193,8 +274,13 @@
#ifdef __ELF__ #ifdef __ELF__
#define END_COMPILERRT_FUNCTION(name) \ #define END_COMPILERRT_FUNCTION(name) \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name) .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
CFI_END SEPARATOR \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#else #else
#define END_COMPILERRT_FUNCTION(name) #define END_COMPILERRT_FUNCTION(name)
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
CFI_END
#endif #endif
#endif // COMPILERRT_ASSEMBLY_H #endif // COMPILERRT_ASSEMBLY_H

View File

@ -128,16 +128,11 @@ static void InitializeFlags() {
if (common_flags()->help) parser.PrintFlagDescriptions(); if (common_flags()->help) parser.PrintFlagDescriptions();
} }
static void HWAsanCheckFailed(const char *file, int line, const char *cond, static void CheckUnwind() {
u64 v1, u64 v2) { GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, stack.Print();
line, cond, (uptr)v1, (uptr)v2);
PRINT_CURRENT_STACK_CHECK();
Die();
} }
static constexpr uptr kMemoryUsageBufferSize = 4096;
static void HwasanFormatMemoryUsage(InternalScopedString &s) { static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList(); HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats(); auto thread_stats = thread_list.GetThreadStats();
@ -155,6 +150,8 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
} }
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
static constexpr uptr kMemoryUsageBufferSize = 4096;
static char *memory_usage_buffer = nullptr; static char *memory_usage_buffer = nullptr;
static void InitMemoryUsage() { static void InitMemoryUsage() {
@ -171,7 +168,7 @@ void UpdateMemoryUsage() {
return; return;
if (!memory_usage_buffer) if (!memory_usage_buffer)
InitMemoryUsage(); InitMemoryUsage();
InternalScopedString s(kMemoryUsageBufferSize); InternalScopedString s;
HwasanFormatMemoryUsage(s); HwasanFormatMemoryUsage(s);
internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1); internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0'; memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
@ -271,7 +268,7 @@ void __hwasan_init() {
InitializeFlags(); InitializeFlags();
// Install tool-specific callbacks in sanitizer_common. // Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(HWAsanCheckFailed); SetCheckUnwindCallback(CheckUnwind);
__sanitizer_set_report_path(common_flags()->log_path); __sanitizer_set_report_path(common_flags()->log_path);
@ -493,7 +490,7 @@ extern "C" void *__hwasan_extra_spill_area() {
} }
void __hwasan_print_memory_usage() { void __hwasan_print_memory_usage() {
InternalScopedString s(kMemoryUsageBufferSize); InternalScopedString s;
HwasanFormatMemoryUsage(s); HwasanFormatMemoryUsage(s);
Printf("%s\n", s.data()); Printf("%s\n", s.data());
} }

View File

@ -14,11 +14,12 @@
#ifndef HWASAN_H #ifndef HWASAN_H
#define HWASAN_H #define HWASAN_H
#include "hwasan_flags.h"
#include "hwasan_interface_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "hwasan_interface_internal.h"
#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h" #include "ubsan/ubsan_platform.h"
#ifndef HWASAN_CONTAINS_UBSAN #ifndef HWASAN_CONTAINS_UBSAN
@ -35,10 +36,31 @@
typedef u8 tag_t; typedef u8 tag_t;
#if defined(__x86_64__)
// Tags are done in middle bits using userspace aliasing.
constexpr unsigned kAddressTagShift = 39;
constexpr unsigned kTagBits = 3;
// The alias region is placed next to the shadow so the upper bits of all
// taggable addresses matches the upper bits of the shadow base. This shift
// value determines which upper bits must match. It has a floor of 44 since the
// shadow is always 8TB.
// TODO(morehouse): In alias mode we can shrink the shadow and use a
// simpler/faster shadow calculation.
constexpr unsigned kTaggableRegionCheckShift =
__sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
#else
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag. // translation and can be used to store a tag.
const unsigned kAddressTagShift = 56; constexpr unsigned kAddressTagShift = 56;
const uptr kAddressTagMask = 0xFFUL << kAddressTagShift; constexpr unsigned kTagBits = 8;
#endif // defined(__x86_64__)
// Mask for extracting tag bits from the lower 8 bits.
constexpr uptr kTagMask = (1UL << kTagBits) - 1;
// Mask for extracting tag bits from full pointers.
constexpr uptr kAddressTagMask = kTagMask << kAddressTagShift;
// Minimal alignment of the shadow base address. Determines the space available // Minimal alignment of the shadow base address. Determines the space available
// for threads and stack histories. This is an ABI constant. // for threads and stack histories. This is an ABI constant.
@ -50,7 +72,7 @@ const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift); const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
static inline tag_t GetTagFromPointer(uptr p) { static inline tag_t GetTagFromPointer(uptr p) {
return p >> kAddressTagShift; return (p >> kAddressTagShift) & kTagMask;
} }
static inline uptr UntagAddr(uptr tagged_addr) { static inline uptr UntagAddr(uptr tagged_addr) {
@ -105,15 +127,6 @@ void InstallAtExitHandler();
if (hwasan_inited) \ if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal) stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
#define GET_FATAL_STACK_TRACE_HERE \
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
#define PRINT_CURRENT_STACK_CHECK() \
{ \
GET_FATAL_STACK_TRACE_HERE; \
stack.Print(); \
}
void HwasanTSDInit(); void HwasanTSDInit();
void HwasanTSDThreadInit(); void HwasanTSDThreadInit();

View File

@ -29,8 +29,8 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex; static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled; static atomic_uint8_t hwasan_allocator_tagging_enabled;
static const tag_t kFallbackAllocTag = 0xBB; static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
static const tag_t kFallbackFreeTag = 0xBC; static constexpr tag_t kFallbackFreeTag = 0xBC;
enum RightAlignMode { enum RightAlignMode {
kRightAlignNever, kRightAlignNever,
@ -84,7 +84,8 @@ void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging); !flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.Init(common_flags()->allocator_release_to_os_interval_ms); allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
kAliasRegionStart);
for (uptr i = 0; i < sizeof(tail_magic); i++) for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag(); tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
} }
@ -148,7 +149,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
// retag to 0. // retag to 0.
if ((flags()->tag_in_malloc || flags()->tag_in_free) && if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
(flags()->tag_in_malloc || flags()->tag_in_free) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) { atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) { if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag; tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
@ -175,6 +177,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) { static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
CHECK(tagged_ptr); CHECK(tagged_ptr);
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr); uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
if (!InTaggableRegion(tagged_uptr))
return true;
tag_t mem_tag = *reinterpret_cast<tag_t *>( tag_t mem_tag = *reinterpret_cast<tag_t *>(
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr)))); MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1); return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
@ -187,7 +191,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (!PointerAndMemoryTagsMatch(tagged_ptr)) if (!PointerAndMemoryTagsMatch(tagged_ptr))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr)); ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
void *untagged_ptr = UntagPtr(tagged_ptr); void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
? UntagPtr(tagged_ptr)
: tagged_ptr;
void *aligned_ptr = reinterpret_cast<void *>( void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)); RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta = Metadata *meta =
@ -219,10 +225,14 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
} }
if (flags()->tag_in_free && malloc_bisect(stack, 0) && if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) flags()->tag_in_free && malloc_bisect(stack, 0) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
// Always store full 8-bit tags on free to maximize UAF detection.
tag_t tag = t ? t->GenerateRandomTag(/*num_bits=*/8) : kFallbackFreeTag;
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size), TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
t ? t->GenerateRandomTag() : kFallbackFreeTag); tag);
}
if (t) { if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr); allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations()) if (auto *ha = t->heap_allocations())
@ -365,7 +375,7 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
// OOM error is already taken care of by HwasanAllocate. // OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM; return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment)); CHECK(IsAligned((uptr)ptr, alignment));
*(void **)UntagPtr(memptr) = ptr; *memptr = ptr;
return 0; return 0;
} }

View File

@ -13,13 +13,15 @@
#ifndef HWASAN_ALLOCATOR_H #ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H #define HWASAN_ALLOCATOR_H
#include "hwasan.h"
#include "hwasan_interface_internal.h"
#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h" #include "sanitizer_common/sanitizer_ring_buffer.h"
#include "hwasan_poisoning.h"
#if !defined(__aarch64__) && !defined(__x86_64__) #if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform #error Unsupported platform
@ -55,7 +57,12 @@ static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 { struct AP64 {
static const uptr kSpaceBeg = ~0ULL; static const uptr kSpaceBeg = ~0ULL;
#if defined(__x86_64__)
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
#else
static const uptr kSpaceSize = 0x2000000000ULL; static const uptr kSpaceSize = 0x2000000000ULL;
#endif
static const uptr kMetadataSize = sizeof(Metadata); static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap; typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView; using AddressSpaceView = LocalAddressSpaceView;
@ -102,6 +109,16 @@ typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s); void GetAllocatorStats(AllocatorStatCounters s);
inline bool InTaggableRegion(uptr addr) {
#if defined(__x86_64__)
// Aliases are mapped next to shadow so that the upper bits match the shadow
// base.
return (addr >> kTaggableRegionCheckShift) ==
(__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
#endif
return true;
}
} // namespace __hwasan } // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H #endif // HWASAN_ALLOCATOR_H

View File

@ -13,6 +13,7 @@
#ifndef HWASAN_CHECKS_H #ifndef HWASAN_CHECKS_H
#define HWASAN_CHECKS_H #define HWASAN_CHECKS_H
#include "hwasan_allocator.h"
#include "hwasan_mapping.h" #include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
@ -81,6 +82,8 @@ enum class AccessType { Load, Store };
template <ErrorAction EA, AccessType AT, unsigned LogSize> template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) { __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
if (!InTaggableRegion(p))
return;
uptr ptr_raw = p & ~kAddressTagMask; uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw); tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) { if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
@ -94,7 +97,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
template <ErrorAction EA, AccessType AT> template <ErrorAction EA, AccessType AT>
__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p, __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
uptr sz) { uptr sz) {
if (sz == 0) if (sz == 0 || !InTaggableRegion(p))
return; return;
tag_t ptr_tag = GetTagFromPointer(p); tag_t ptr_tag = GetTagFromPointer(p);
uptr ptr_raw = p & ~kAddressTagMask; uptr ptr_raw = p & ~kAddressTagMask;

View File

@ -12,15 +12,17 @@
/// ///
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "hwasan.h"
#include "hwasan_dynamic_shadow.h" #include "hwasan_dynamic_shadow.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_posix.h"
#include <elf.h> #include <elf.h>
#include <link.h> #include <link.h>
#include "hwasan.h"
#include "hwasan_mapping.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_posix.h"
// The code in this file needs to run in an unrelocated binary. It should not // The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals. // access any external symbol, including its own non-hidden globals.
@ -117,6 +119,12 @@ namespace __hwasan {
void InitShadowGOT() {} void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) { uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
#if defined(__x86_64__)
constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
constexpr uptr kNumAliases = 1ULL << kTagBits;
return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
RingBufferSize());
#endif
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment, return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd); kHighMemEnd);
} }

View File

@ -12,6 +12,8 @@
#ifndef HWASAN_FLAGS_H #ifndef HWASAN_FLAGS_H
#define HWASAN_FLAGS_H #define HWASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __hwasan { namespace __hwasan {
struct Flags { struct Flags {

View File

@ -72,3 +72,12 @@ HWASAN_FLAG(uptr, malloc_bisect_right, 0,
HWASAN_FLAG(bool, malloc_bisect_dump, false, HWASAN_FLAG(bool, malloc_bisect_dump, false,
"Print all allocations within [malloc_bisect_left, " "Print all allocations within [malloc_bisect_left, "
"malloc_bisect_right] range ") "malloc_bisect_right] range ")
// Exit if we fail to enable the AArch64 kernel ABI relaxation which allows
// tagged pointers in syscalls. This is the default, but being able to disable
// that behaviour is useful for running the testsuite on more platforms (the
// testsuite can run since we manually ensure any pointer arguments to syscalls
// are untagged before the call.
HWASAN_FLAG(bool, fail_without_syscall_abi, true,
"Exit if fail to request relaxed syscall ABI.")

View File

@ -221,8 +221,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie( ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create")); GetPageSizeCached(), "pthread_create"));
*A = {callback, param}; *A = {callback, param};
int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr), int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
&HwasanThreadStartFunc, A);
return res; return res;
} }

View File

@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h" #include "sanitizer_common/sanitizer_asm.h"
#include "builtins/assembly.h"
#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS #if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area #define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
@ -9,3 +10,5 @@
#endif #endif
NO_EXEC_STACK_DIRECTIVE NO_EXEC_STACK_DIRECTIVE
GNU_PROPERTY_BTI_PAC

View File

@ -222,6 +222,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memset(void *s, int c, uptr n); void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n); void *__hwasan_memmove(void *dest, const void *src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C" } // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H #endif // HWASAN_INTERFACE_INTERNAL_H

View File

@ -76,6 +76,8 @@ uptr kHighShadowEnd;
uptr kHighMemStart; uptr kHighMemStart;
uptr kHighMemEnd; uptr kHighMemEnd;
uptr kAliasRegionStart; // Always 0 on non-x86.
static void PrintRange(uptr start, uptr end, const char *name) { static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
} }
@ -119,9 +121,11 @@ void InitPrctl() {
#define PR_GET_TAGGED_ADDR_CTRL 56 #define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0) #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI. // Check we're running on a kernel that can use the tagged address ABI.
if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 && int local_errno = 0;
errno == EINVAL) { if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
#if SANITIZER_ANDROID &local_errno) &&
local_errno == EINVAL) {
#if SANITIZER_ANDROID || defined(__x86_64__)
// Some older Android kernels have the tagged pointer ABI on // Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still // unconditionally, and hence don't have the tagged-addr prctl while still
// allow the ABI. // allow the ABI.
@ -129,17 +133,20 @@ void InitPrctl() {
// case. // case.
return; return;
#else #else
Printf( if (flags()->fail_without_syscall_abi) {
"FATAL: " Printf(
"HWAddressSanitizer requires a kernel with tagged address ABI.\n"); "FATAL: "
Die(); "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
Die();
}
#endif #endif
} }
// Turn on the tagged address ABI. // Turn on the tagged address ABI.
if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
(uptr)-1 || PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
!internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) { !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &&
flags()->fail_without_syscall_abi) {
Printf( Printf(
"FATAL: HWAddressSanitizer failed to enable tagged address syscall " "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` " "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
@ -174,6 +181,18 @@ bool InitShadow() {
// High memory starts where allocated shadow allows. // High memory starts where allocated shadow allows.
kHighMemStart = ShadowToMem(kHighShadowStart); kHighMemStart = ShadowToMem(kHighShadowStart);
#if defined(__x86_64__)
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
kAliasRegionStart =
__hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
CHECK_EQ(kAliasRegionStart >> kTaggableRegionCheckShift,
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
CHECK_EQ(
(kAliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
__hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
#endif
// Check the sanity of the defined memory ranges (there might be gaps). // Check the sanity of the defined memory ranges (there might be gaps).
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
CHECK_GT(kHighMemStart, kHighShadowEnd); CHECK_GT(kHighMemStart, kHighShadowEnd);
@ -217,7 +236,9 @@ void InitThreads() {
} }
bool MemIsApp(uptr p) { bool MemIsApp(uptr p) {
#if !defined(__x86_64__) // Memory outside the alias range has non-zero tags.
CHECK(GetTagFromPointer(p) == 0); CHECK(GetTagFromPointer(p) == 0);
#endif
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
} }

View File

@ -48,6 +48,8 @@ extern uptr kHighShadowEnd;
extern uptr kHighMemStart; extern uptr kHighMemStart;
extern uptr kHighMemEnd; extern uptr kHighMemEnd;
extern uptr kAliasRegionStart;
inline uptr MemToShadow(uptr untagged_addr) { inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) + return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address; __hwasan_shadow_memory_dynamic_address;

View File

@ -24,7 +24,7 @@ using namespace __hwasan;
void *__hwasan_memset(void *block, int c, uptr size) { void *__hwasan_memset(void *block, int c, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>( CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(block), size); reinterpret_cast<uptr>(block), size);
return memset(UntagPtr(block), c, size); return memset(block, c, size);
} }
void *__hwasan_memcpy(void *to, const void *from, uptr size) { void *__hwasan_memcpy(void *to, const void *from, uptr size) {
@ -32,7 +32,7 @@ void *__hwasan_memcpy(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(to), size); reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>( CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size); reinterpret_cast<uptr>(from), size);
return memcpy(UntagPtr(to), UntagPtr(from), size); return memcpy(to, from, size);
} }
void *__hwasan_memmove(void *to, const void *from, uptr size) { void *__hwasan_memmove(void *to, const void *from, uptr size) {

View File

@ -27,6 +27,12 @@
void *res = hwasan_malloc(size, &stack);\ void *res = hwasan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res return res
#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
if (!nothrow && UNLIKELY(!res)) \
ReportOutOfMemory(size, &stack); \
return res
#define OPERATOR_DELETE_BODY \ #define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \ GET_MALLOC_STACK_TRACE; \
@ -50,6 +56,7 @@ using namespace __hwasan;
// Fake std::nothrow_t to avoid including <new>. // Fake std::nothrow_t to avoid including <new>.
namespace std { namespace std {
struct nothrow_t {}; struct nothrow_t {};
enum class align_val_t : size_t {};
} // namespace std } // namespace std
@ -66,6 +73,22 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) { void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/); OPERATOR_NEW_BODY(true /*nothrow*/);
} }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
size_t size, std::align_val_t align) {
OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size, std::align_val_t align) {
OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
size_t size, std::align_val_t align, std::nothrow_t const &) {
OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
size_t size, std::align_val_t align, std::nothrow_t const &) {
OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
@ -77,5 +100,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) { void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY; OPERATOR_DELETE_BODY;
} }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, std::align_val_t align) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
#endif // OPERATOR_NEW_BODY #endif // OPERATOR_NEW_BODY

View File

@ -43,12 +43,16 @@ class ScopedReport {
} }
~ScopedReport() { ~ScopedReport() {
void (*report_cb)(const char *);
{ {
BlockingMutexLock lock(&error_message_lock_); BlockingMutexLock lock(&error_message_lock_);
if (fatal) report_cb = error_report_callback_;
SetAbortMessage(error_message_.data());
error_message_ptr_ = nullptr; error_message_ptr_ = nullptr;
} }
if (report_cb)
report_cb(error_message_.data());
if (fatal)
SetAbortMessage(error_message_.data());
if (common_flags()->print_module_map >= 2 || if (common_flags()->print_module_map >= 2 ||
(fatal && common_flags()->print_module_map)) (fatal && common_flags()->print_module_map))
DumpProcessMap(); DumpProcessMap();
@ -66,6 +70,12 @@ class ScopedReport {
// overwrite old trailing '\0', keep new trailing '\0' untouched. // overwrite old trailing '\0', keep new trailing '\0' untouched.
internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len); internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
} }
static void SetErrorReportCallback(void (*callback)(const char *)) {
BlockingMutexLock lock(&error_message_lock_);
error_report_callback_ = callback;
}
private: private:
ScopedErrorReportLock error_report_lock_; ScopedErrorReportLock error_report_lock_;
InternalMmapVector<char> error_message_; InternalMmapVector<char> error_message_;
@ -73,10 +83,12 @@ class ScopedReport {
static InternalMmapVector<char> *error_message_ptr_; static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_; static BlockingMutex error_message_lock_;
static void (*error_report_callback_)(const char *);
}; };
InternalMmapVector<char> *ScopedReport::error_message_ptr_; InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_; BlockingMutex ScopedReport::error_message_lock_;
void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message. // If there is an active ScopedReport, append to its error message.
void AppendToErrorMessageBuffer(const char *buffer) { void AppendToErrorMessageBuffer(const char *buffer) {
@ -212,7 +224,7 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
// We didn't find any locals. Most likely we don't have symbols, so dump // We didn't find any locals. Most likely we don't have symbols, so dump
// the information that we have for offline analysis. // the information that we have for offline analysis.
InternalScopedString frame_desc(GetPageSizeCached() * 2); InternalScopedString frame_desc;
Printf("Previously allocated frames:\n"); Printf("Previously allocated frames:\n");
for (uptr i = 0; i < frames; i++) { for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i]; const uptr *record_addr = &(*sa)[i];
@ -447,7 +459,7 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len)); RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
tag_t *beg_row = center_row_beg - row_len * (num_rows / 2); tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2); tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
InternalScopedString s(GetPageSizeCached() * 8); InternalScopedString s;
for (tag_t *row = beg_row; row < end_row; row += row_len) { for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " "); s.append("%s", row == center_row_beg ? "=>" : " ");
s.append("%p:", row); s.append("%p:", row);
@ -535,7 +547,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
GetStackTraceFromId(chunk.GetAllocStackId()).Print(); GetStackTraceFromId(chunk.GetAllocStackId()).Print();
} }
InternalScopedString s(GetPageSizeCached() * 8); InternalScopedString s;
CHECK_GT(tail_size, 0U); CHECK_GT(tail_size, 0U);
CHECK_LT(tail_size, kShadowAlignment); CHECK_LT(tail_size, kShadowAlignment);
u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size); u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
@ -650,3 +662,7 @@ void ReportRegisters(uptr *frame, uptr pc) {
} }
} // namespace __hwasan } // namespace __hwasan
void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
__hwasan::ScopedReport::SetErrorReportCallback(callback);
}

View File

@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_asm.h" #include "sanitizer_common/sanitizer_asm.h"
#include "builtins/assembly.h"
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__) #if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
@ -34,6 +35,7 @@
ASM_TYPE_FUNCTION(__interceptor_setjmp) ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp: __interceptor_setjmp:
CFI_STARTPROC CFI_STARTPROC
BTI_C
mov x1, #0 mov x1, #0
b __interceptor_sigsetjmp b __interceptor_sigsetjmp
CFI_ENDPROC CFI_ENDPROC
@ -46,6 +48,7 @@ ASM_SIZE(__interceptor_setjmp)
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic) ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
__interceptor_setjmp_bionic: __interceptor_setjmp_bionic:
CFI_STARTPROC CFI_STARTPROC
BTI_C
mov x1, #1 mov x1, #1
b __interceptor_sigsetjmp b __interceptor_sigsetjmp
CFI_ENDPROC CFI_ENDPROC
@ -56,6 +59,7 @@ ASM_SIZE(__interceptor_setjmp_bionic)
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp) ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
__interceptor_sigsetjmp: __interceptor_sigsetjmp:
CFI_STARTPROC CFI_STARTPROC
BTI_C
stp x19, x20, [x0, #0<<3] stp x19, x20, [x0, #0<<3]
stp x21, x22, [x0, #2<<3] stp x21, x22, [x0, #2<<3]
stp x23, x24, [x0, #4<<3] stp x23, x24, [x0, #4<<3]
@ -98,3 +102,5 @@ ALIAS __interceptor_setjmp, _setjmp
// We do not need executable stack. // We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE NO_EXEC_STACK_DIRECTIVE
GNU_PROPERTY_BTI_PAC

View File

@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h" #include "sanitizer_common/sanitizer_asm.h"
#include "builtins/assembly.h"
// The content of this file is AArch64-only: // The content of this file is AArch64-only:
#if defined(__aarch64__) #if defined(__aarch64__)
@ -74,6 +75,8 @@
.global __hwasan_tag_mismatch .global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function .type __hwasan_tag_mismatch, %function
__hwasan_tag_mismatch: __hwasan_tag_mismatch:
BTI_J
// Compute the granule position one past the end of the access. // Compute the granule position one past the end of the access.
mov x16, #1 mov x16, #1
and x17, x1, #0xf and x17, x1, #0xf
@ -106,6 +109,7 @@ __hwasan_tag_mismatch:
.type __hwasan_tag_mismatch_v2, %function .type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2: __hwasan_tag_mismatch_v2:
CFI_STARTPROC CFI_STARTPROC
BTI_J
// Set the CFA to be the return address for caller of __hwasan_check_*. Note // Set the CFA to be the return address for caller of __hwasan_check_*. Note
// that we do not emit CFI predicates to describe the contents of this stack // that we do not emit CFI predicates to describe the contents of this stack
@ -150,3 +154,5 @@ __hwasan_tag_mismatch_v2:
// We do not need executable stack. // We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE NO_EXEC_STACK_DIRECTIVE
GNU_PROPERTY_BTI_PAC

View File

@ -35,6 +35,10 @@ void Thread::InitRandomState() {
} }
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) { void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
CHECK_EQ(0, stack_top_);
CHECK_EQ(0, stack_bottom_);
static u64 unique_id; static u64 unique_id;
unique_id_ = unique_id++; unique_id_ = unique_id++;
if (auto sz = flags()->heap_history_size) if (auto sz = flags()->heap_history_size)
@ -113,18 +117,21 @@ static u32 xorshift(u32 state) {
} }
// Generate a (pseudo-)random non-zero tag. // Generate a (pseudo-)random non-zero tag.
tag_t Thread::GenerateRandomTag() { tag_t Thread::GenerateRandomTag(uptr num_bits) {
DCHECK_GT(num_bits, 0);
if (tagging_disabled_) return 0; if (tagging_disabled_) return 0;
tag_t tag; tag_t tag;
const uptr tag_mask = (1ULL << num_bits) - 1;
do { do {
if (flags()->random_tags) { if (flags()->random_tags) {
if (!random_buffer_) if (!random_buffer_)
random_buffer_ = random_state_ = xorshift(random_state_); random_buffer_ = random_state_ = xorshift(random_state_);
CHECK(random_buffer_); CHECK(random_buffer_);
tag = random_buffer_ & 0xFF; tag = random_buffer_ & tag_mask;
random_buffer_ >>= 8; random_buffer_ >>= num_bits;
} else { } else {
tag = random_state_ = (random_state_ + 1) & 0xFF; random_state_ += 1;
tag = random_state_ & tag_mask;
} }
} while (!tag); } while (!tag);
return tag; return tag;

View File

@ -42,7 +42,7 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; } HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; } StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
tag_t GenerateRandomTag(); tag_t GenerateRandomTag(uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; } void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; } void EnableTagging() { tagging_disabled_--; }
@ -74,8 +74,6 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_; HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_; StackAllocationsRingBuffer *stack_allocations_;
Thread *next_; // All live threads form a linked list.
u64 unique_id_; // counting from zero. u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread. u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.

View File

@ -66,40 +66,6 @@ static uptr RingBufferSize() {
return 0; return 0;
} }
struct ThreadListHead {
Thread *list_;
ThreadListHead() : list_(nullptr) {}
void Push(Thread *t) {
t->next_ = list_;
list_ = t;
}
Thread *Pop() {
Thread *t = list_;
if (t)
list_ = t->next_;
return t;
}
void Remove(Thread *t) {
Thread **cur = &list_;
while (*cur != t) cur = &(*cur)->next_;
CHECK(*cur && "thread not found");
*cur = (*cur)->next_;
}
template <class CB>
void ForEach(CB cb) {
Thread *t = list_;
while (t) {
cb(t);
t = t->next_;
}
}
};
struct ThreadStats { struct ThreadStats {
uptr n_live_threads; uptr n_live_threads;
uptr total_stack_size; uptr total_stack_size;
@ -120,17 +86,23 @@ class HwasanThreadList {
} }
Thread *CreateCurrentThread() { Thread *CreateCurrentThread() {
Thread *t; Thread *t = nullptr;
{ {
SpinMutexLock l(&list_mutex_); SpinMutexLock l(&free_list_mutex_);
t = free_list_.Pop(); if (!free_list_.empty()) {
if (t) { t = free_list_.back();
uptr start = (uptr)t - ring_buffer_size_; free_list_.pop_back();
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
} }
live_list_.Push(t); }
if (t) {
uptr start = (uptr)t - ring_buffer_size_;
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
}
{
SpinMutexLock l(&live_list_mutex_);
live_list_.push_back(t);
} }
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_); t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t); AddThreadStats(t);
@ -142,13 +114,26 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_); ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
} }
void RemoveThreadFromLiveList(Thread *t) {
SpinMutexLock l(&live_list_mutex_);
for (Thread *&t2 : live_list_)
if (t2 == t) {
// To remove t2, copy the last element of the list in t2's position, and
// pop_back(). This works even if t2 is itself the last element.
t2 = live_list_.back();
live_list_.pop_back();
return;
}
CHECK(0 && "thread not found in live list");
}
void ReleaseThread(Thread *t) { void ReleaseThread(Thread *t) {
RemoveThreadStats(t); RemoveThreadStats(t);
t->Destroy(); t->Destroy();
SpinMutexLock l(&list_mutex_);
live_list_.Remove(t);
free_list_.Push(t);
DontNeedThread(t); DontNeedThread(t);
RemoveThreadFromLiveList(t);
SpinMutexLock l(&free_list_mutex_);
free_list_.push_back(t);
} }
Thread *GetThreadByBufferAddress(uptr p) { Thread *GetThreadByBufferAddress(uptr p) {
@ -165,8 +150,8 @@ class HwasanThreadList {
template <class CB> template <class CB>
void VisitAllLiveThreads(CB cb) { void VisitAllLiveThreads(CB cb) {
SpinMutexLock l(&list_mutex_); SpinMutexLock l(&live_list_mutex_);
live_list_.ForEach(cb); for (Thread *t : live_list_) cb(t);
} }
void AddThreadStats(Thread *t) { void AddThreadStats(Thread *t) {
@ -188,6 +173,7 @@ class HwasanThreadList {
private: private:
Thread *AllocThread() { Thread *AllocThread() {
SpinMutexLock l(&free_space_mutex_);
uptr align = ring_buffer_size_ * 2; uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align)); CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_); Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
@ -196,14 +182,16 @@ class HwasanThreadList {
return t; return t;
} }
SpinMutex free_space_mutex_;
uptr free_space_; uptr free_space_;
uptr free_space_end_; uptr free_space_end_;
uptr ring_buffer_size_; uptr ring_buffer_size_;
uptr thread_alloc_size_; uptr thread_alloc_size_;
ThreadListHead free_list_; SpinMutex free_list_mutex_;
ThreadListHead live_list_; InternalMmapVector<Thread *> free_list_;
SpinMutex list_mutex_; SpinMutex live_list_mutex_;
InternalMmapVector<Thread *> live_list_;
ThreadStats stats_; ThreadStats stats_;
SpinMutex stats_mutex_; SpinMutex stats_mutex_;

View File

@ -43,6 +43,9 @@ void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to the provided file descriptor // Tell the tools to write their reports to the provided file descriptor
// (casted to void *). // (casted to void *).
void __sanitizer_set_report_fd(void *fd); void __sanitizer_set_report_fd(void *fd);
// Get the current full report file path, if a path was specified by
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
const char *__sanitizer_get_report_path();
// Notify the tools that the sandbox is going to be turned on. The reserved // Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions // parameter will be used in the future to hold a structure with functions

View File

@ -22,6 +22,7 @@ extern "C" {
#endif #endif
typedef uint16_t dfsan_label; typedef uint16_t dfsan_label;
typedef uint32_t dfsan_origin;
/// Stores information associated with a specific label identifier. A label /// Stores information associated with a specific label identifier. A label
/// may be a base label created using dfsan_create_label, with associated /// may be a base label created using dfsan_create_label, with associated
@ -63,6 +64,12 @@ void dfsan_add_label(dfsan_label label, void *addr, size_t size);
/// value. /// value.
dfsan_label dfsan_get_label(long data); dfsan_label dfsan_get_label(long data);
/// Retrieves the immediate origin associated with the given data. The returned
/// origin may point to another origin.
///
/// The type of 'data' is arbitrary.
dfsan_origin dfsan_get_origin(long data);
/// Retrieves the label associated with the data at the given address. /// Retrieves the label associated with the data at the given address.
dfsan_label dfsan_read_label(const void *addr, size_t size); dfsan_label dfsan_read_label(const void *addr, size_t size);
@ -110,6 +117,15 @@ void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
size_t n, dfsan_label s1_label, size_t n, dfsan_label s1_label,
dfsan_label s2_label, dfsan_label n_label); dfsan_label s2_label, dfsan_label n_label);
/// Prints the origin trace of the label at the address addr to stderr. It also
/// prints description at the beginning of the trace. If origin tracking is not
/// on, or the address is not labeled, it prints nothing.
void dfsan_print_origin_trace(const void *addr, const char *description);
/// Retrieves the very first origin associated with the data at the given
/// address.
dfsan_origin dfsan_get_init_origin(const void *addr);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"

View File

@ -73,6 +73,9 @@ extern "C" {
* accessed through the pointer in x, or -1 if the whole range is good. */ * accessed through the pointer in x, or -1 if the whole range is good. */
intptr_t __hwasan_test_shadow(const volatile void *x, size_t size); intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
/* Sets the callback function to be called during HWASan error reporting. */
void __hwasan_set_error_report_callback(void (*callback)(const char *));
int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size); int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
void * __sanitizer_memalign(size_t alignment, size_t size); void * __sanitizer_memalign(size_t alignment, size_t size);
void * __sanitizer_aligned_alloc(size_t alignment, size_t size); void * __sanitizer_aligned_alloc(size_t alignment, size_t size);

View File

@ -53,6 +53,11 @@ void __memprof_print_accumulated_stats(void);
/// \returns Default options string. /// \returns Default options string.
const char *__memprof_default_options(void); const char *__memprof_default_options(void);
/// Prints the memory profile to the current profile file.
///
/// \returns 0 on success.
int __memprof_profile_dump(void);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif

View File

@ -67,6 +67,12 @@ static const unsigned __tsan_mutex_recursive_lock = 1 << 6;
// the corresponding __tsan_mutex_post_lock annotation. // the corresponding __tsan_mutex_post_lock annotation.
static const unsigned __tsan_mutex_recursive_unlock = 1 << 7; static const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
// Convenient composed constants.
static const unsigned __tsan_mutex_try_read_lock =
__tsan_mutex_read_lock | __tsan_mutex_try_lock;
static const unsigned __tsan_mutex_try_read_lock_failed =
__tsan_mutex_try_read_lock | __tsan_mutex_try_lock_failed;
// Annotate creation of a mutex. // Annotate creation of a mutex.
// Supported flags: mutex creation flags. // Supported flags: mutex creation flags.
void __tsan_mutex_create(void *addr, unsigned flags); void __tsan_mutex_create(void *addr, unsigned flags);
@ -141,7 +147,7 @@ void __tsan_external_write(void *addr, void *caller_pc, void *tag);
// and freed by __tsan_destroy_fiber. // and freed by __tsan_destroy_fiber.
// - TSAN context of current fiber or thread can be obtained // - TSAN context of current fiber or thread can be obtained
// by calling __tsan_get_current_fiber. // by calling __tsan_get_current_fiber.
// - __tsan_switch_to_fiber should be called immediatly before switch // - __tsan_switch_to_fiber should be called immediately before switch
// to fiber, such as call of swapcontext. // to fiber, such as call of swapcontext.
// - Fiber name can be set by __tsan_set_fiber_name. // - Fiber name can be set by __tsan_set_fiber_name.
void *__tsan_get_current_fiber(void); void *__tsan_get_current_fiber(void);
@ -154,6 +160,15 @@ void __tsan_set_fiber_name(void *fiber, const char *name);
// Do not establish a happens-before relation between fibers // Do not establish a happens-before relation between fibers
static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0; static const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
// User-provided callback invoked on TSan initialization.
void __tsan_on_initialize();
// User-provided callback invoked on TSan shutdown.
// `failed` - Nonzero if TSan did detect issues, zero otherwise.
// Return `0` if TSan should exit as if no issues were detected. Return nonzero
// if TSan should exit as if issues were detected.
int __tsan_on_finalize(int failed);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif

View File

@ -30,7 +30,7 @@ __extension__ typedef __int128 __tsan_atomic128;
#endif #endif
// Part of ABI, do not change. // Part of ABI, do not change.
// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic // https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
typedef enum { typedef enum {
__tsan_memory_order_relaxed, __tsan_memory_order_relaxed,
__tsan_memory_order_consume, __tsan_memory_order_consume,

View File

@ -63,8 +63,8 @@ bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
return addr && (func == wrapper); return addr && (func == wrapper);
} }
// Android and Solaris do not have dlvsym // dlvsym is a GNU extension supported by some other platforms.
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS #if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
static void *GetFuncAddr(const char *name, const char *ver) { static void *GetFuncAddr(const char *name, const char *ver) {
return dlvsym(RTLD_NEXT, name, ver); return dlvsym(RTLD_NEXT, name, ver);
} }
@ -75,7 +75,7 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
*ptr_to_real = (uptr)addr; *ptr_to_real = (uptr)addr;
return addr && (func == wrapper); return addr && (func == wrapper);
} }
#endif // !SANITIZER_ANDROID #endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception } // namespace __interception

View File

@ -35,8 +35,8 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
(::__interception::uptr) & (func), \ (::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func)) (::__interception::uptr) & WRAP(func))
// Android and Solaris do not have dlvsym // dlvsym is a GNU extension supported by some other platforms.
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS #if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \ ::__interception::InterceptFunction( \
#func, symver, \ #func, symver, \
@ -46,7 +46,7 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
#else #else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS #endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#endif // INTERCEPTION_LINUX_H #endif // INTERCEPTION_LINUX_H
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||

View File

@ -136,7 +136,7 @@ namespace __interception {
static const int kAddressLength = FIRST_32_SECOND_64(4, 8); static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
static const int kJumpInstructionLength = 5; static const int kJumpInstructionLength = 5;
static const int kShortJumpInstructionLength = 2; static const int kShortJumpInstructionLength = 2;
static const int kIndirectJumpInstructionLength = 6; UNUSED static const int kIndirectJumpInstructionLength = 6;
static const int kBranchLength = static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength); FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength; static const int kDirectBranchLength = kBranchLength + kAddressLength;
@ -165,7 +165,7 @@ static uptr GetMmapGranularity() {
return si.dwAllocationGranularity; return si.dwAllocationGranularity;
} }
static uptr RoundUpTo(uptr size, uptr boundary) { UNUSED static uptr RoundUpTo(uptr size, uptr boundary) {
return (size + boundary - 1) & ~(boundary - 1); return (size + boundary - 1) & ~(boundary - 1);
} }
@ -309,7 +309,7 @@ struct TrampolineMemoryRegion {
uptr max_size; uptr max_size;
}; };
static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig UNUSED static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024; static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion]; static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];

View File

@ -123,14 +123,18 @@ void Deallocate(void *p) {
void *Reallocate(const StackTrace &stack, void *p, uptr new_size, void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) { uptr alignment) {
RegisterDeallocation(p);
if (new_size > max_malloc_size) { if (new_size > max_malloc_size) {
allocator.Deallocate(GetAllocatorCache(), p); ReportAllocationSizeTooBig(new_size, stack);
return ReportAllocationSizeTooBig(new_size, stack); return nullptr;
} }
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterDeallocation(p);
RegisterAllocation(stack, p, new_size); void *new_p =
return p; allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
if (new_p)
RegisterAllocation(stack, new_p, new_size);
else if (new_size != 0)
RegisterAllocation(stack, p, new_size);
return new_p;
} }
void GetAllocatorCacheRange(uptr *begin, uptr *end) { void GetAllocatorCacheRange(uptr *begin, uptr *end) {
@ -309,6 +313,16 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
return kIgnoreObjectInvalid; return kIgnoreObjectInvalid;
} }
} }
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
// This function can be used to treat memory reachable from `tctx` as live.
// This is useful for threads that have been created but not yet started.
// This is currently a no-op because the LSan `pthread_create()` interceptor
// blocks until the child thread starts which keeps the thread's `arg` pointer
// live.
}
} // namespace __lsan } // namespace __lsan
using namespace __lsan; using namespace __lsan;

View File

@ -50,7 +50,7 @@ struct ChunkMetadata {
}; };
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
defined(__arm__) defined(__arm__) || SANITIZER_RISCV64
template <typename AddressSpaceViewTy> template <typename AddressSpaceViewTy>
struct AP32 { struct AP32 {
static const uptr kSpaceBeg = 0; static const uptr kSpaceBeg = 0;

View File

@ -65,8 +65,34 @@ void RegisterLsanFlags(FlagParser *parser, Flags *f) {
if (flags()->log_threads) Report(__VA_ARGS__); \ if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0) } while (0)
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; class LeakSuppressionContext {
static SuppressionContext *suppression_ctx = nullptr; bool parsed = false;
SuppressionContext context;
bool suppressed_stacks_sorted = true;
InternalMmapVector<u32> suppressed_stacks;
Suppression *GetSuppressionForAddr(uptr addr);
void LazyInit();
public:
LeakSuppressionContext(const char *supprression_types[],
int suppression_types_num)
: context(supprression_types, suppression_types_num) {}
Suppression *GetSuppressionForStack(u32 stack_trace_id);
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
if (!suppressed_stacks_sorted) {
suppressed_stacks_sorted = true;
SortAndDedup(suppressed_stacks);
}
return suppressed_stacks;
}
void PrintMatchedSuppressions();
};
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak"; static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak }; static const char *kSuppressionTypes[] = { kSuppressionLeak };
static const char kStdSuppressions[] = static const char kStdSuppressions[] =
@ -86,14 +112,20 @@ static const char kStdSuppressions[] =
void InitializeSuppressions() { void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx); CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder) suppression_ctx = new (suppression_placeholder)
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
suppression_ctx->Parse(kStdSuppressions);
} }
static SuppressionContext *GetSuppressionContext() { void LeakSuppressionContext::LazyInit() {
if (!parsed) {
parsed = true;
context.ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
context.Parse(__lsan_default_suppressions());
context.Parse(kStdSuppressions);
}
}
static LeakSuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx); CHECK(suppression_ctx);
return suppression_ctx; return suppression_ctx;
} }
@ -221,6 +253,27 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
pid_t, void (*cb)(void *, void *, uptr, void *), void *); pid_t, void (*cb)(void *, void *, uptr, void *), void *);
#endif #endif
static void ProcessThreadRegistry(Frontier *frontier) {
InternalMmapVector<uptr> ptrs;
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
GetAdditionalThreadContextPtrs, &ptrs);
for (uptr i = 0; i < ptrs.size(); ++i) {
void *ptr = reinterpret_cast<void *>(ptrs[i]);
uptr chunk = PointsIntoChunk(ptr);
if (!chunk)
continue;
LsanMetadata m(chunk);
if (!m.allocated())
continue;
// Mark as reachable and add to frontier.
LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
m.set_tag(kReachable);
frontier->push_back(chunk);
}
}
// Scans thread data (stacks and TLS) for heap pointers. // Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads, static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) { Frontier *frontier) {
@ -315,15 +368,15 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
__libc_iterate_dynamic_tls(os_id, cb, frontier); __libc_iterate_dynamic_tls(os_id, cb, frontier);
#else #else
if (dtls && !DTLSInDestruction(dtls)) { if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) { ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtls->dtv[j].beg; uptr dtls_beg = dtv.beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size; uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) { if (dtls_beg < dtls_end) {
LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable); kReachable);
} }
} });
} else { } else {
// We are handling a thread with DTLS under destruction. Log about // We are handling a thread with DTLS under destruction. Log about
// this and continue. // this and continue.
@ -332,6 +385,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
#endif #endif
} }
} }
// Add pointers reachable from ThreadContexts
ProcessThreadRegistry(frontier);
} }
#endif // SANITIZER_FUCHSIA #endif // SANITIZER_FUCHSIA
@ -390,6 +446,24 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
} }
} }
static void IgnoredSuppressedCb(uptr chunk, void *arg) {
CHECK(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated() || m.tag() == kIgnored)
return;
const InternalMmapVector<u32> &suppressed =
*static_cast<const InternalMmapVector<u32> *>(arg);
uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
return;
LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
chunk + m.requested_size(), m.requested_size());
m.set_tag(kIgnored);
}
// ForEachChunk callback. If chunk is marked as ignored, adds its address to // ForEachChunk callback. If chunk is marked as ignored, adds its address to
// frontier. // frontier.
static void CollectIgnoredCb(uptr chunk, void *arg) { static void CollectIgnoredCb(uptr chunk, void *arg) {
@ -473,6 +547,12 @@ void ProcessPC(Frontier *frontier) {
// Sets the appropriate tag on each chunk. // Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) { Frontier *frontier) {
const InternalMmapVector<u32> &suppressed_stacks =
GetSuppressionContext()->GetSortedSuppressedStacks();
if (!suppressed_stacks.empty()) {
ForEachChunk(IgnoredSuppressedCb,
const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
}
ForEachChunk(CollectIgnoredCb, frontier); ForEachChunk(CollectIgnoredCb, frontier);
ProcessGlobalRegions(frontier); ProcessGlobalRegions(frontier);
ProcessThreads(suspended_threads, frontier); ProcessThreads(suspended_threads, frontier);
@ -532,18 +612,20 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
} }
} }
static void PrintMatchedSuppressions() { void LeakSuppressionContext::PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched; InternalMmapVector<Suppression *> matched;
GetSuppressionContext()->GetMatched(&matched); context.GetMatched(&matched);
if (!matched.size()) if (!matched.size())
return; return;
const char *line = "-----------------------------------------------------"; const char *line = "-----------------------------------------------------";
Printf("%s\n", line); Printf("%s\n", line);
Printf("Suppressions used:\n"); Printf("Suppressions used:\n");
Printf(" count bytes template\n"); Printf(" count bytes template\n");
for (uptr i = 0; i < matched.size(); i++) for (uptr i = 0; i < matched.size(); i++) {
Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed( Printf("%7zu %10zu %s\n",
&matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
matched[i]->weight, matched[i]->templ);
}
Printf("%s\n\n", line); Printf("%s\n\n", line);
} }
@ -551,8 +633,7 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
const InternalMmapVector<tid_t> &suspended_threads = const InternalMmapVector<tid_t> &suspended_threads =
*(const InternalMmapVector<tid_t> *)arg; *(const InternalMmapVector<tid_t> *)arg;
if (tctx->status == ThreadStatusRunning) { if (tctx->status == ThreadStatusRunning) {
uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
tctx->os_id, CompareLess<int>());
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
Report("Running thread %d was not suspended. False leaks are possible.\n", Report("Running thread %d was not suspended. False leaks are possible.\n",
tctx->os_id); tctx->os_id);
@ -595,43 +676,68 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
param->success = true; param->success = true;
} }
static bool CheckForLeaks() { static bool PrintResults(LeakReport &report) {
if (&__lsan_is_turned_off && __lsan_is_turned_off()) uptr unsuppressed_count = report.UnsuppressedLeakCount();
return false; if (unsuppressed_count) {
EnsureMainThreadIDIsCorrect();
CheckForLeaksParam param;
LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();
uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
if (unsuppressed_count > 0) {
Decorator d; Decorator d;
Printf("\n" Printf(
"=================================================================" "\n"
"\n"); "================================================================="
"\n");
Printf("%s", d.Error()); Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n"); Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.Default()); Printf("%s", d.Default());
param.leak_report.ReportTopLeaks(flags()->max_leaks); report.ReportTopLeaks(flags()->max_leaks);
} }
if (common_flags()->print_suppressions) if (common_flags()->print_suppressions)
PrintMatchedSuppressions(); GetSuppressionContext()->PrintMatchedSuppressions();
if (unsuppressed_count > 0) { if (unsuppressed_count > 0) {
param.leak_report.PrintSummary(); report.PrintSummary();
return true; return true;
} }
return false; return false;
} }
static bool CheckForLeaks() {
if (&__lsan_is_turned_off && __lsan_is_turned_off())
return false;
// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
// suppressions. However if a stack id was previously suppressed, it should be
// suppressed in future checks as well.
for (int i = 0;; ++i) {
EnsureMainThreadIDIsCorrect();
CheckForLeaksParam param;
LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
"etc)\n");
Die();
}
// No new suppressions stacks, so rerun will not help and we can report.
if (!param.leak_report.ApplySuppressions())
return PrintResults(param.leak_report);
// No indirect leaks to report, so we are done here.
if (!param.leak_report.IndirectUnsuppressedLeakCount())
return PrintResults(param.leak_report);
if (i >= 8) {
Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
return PrintResults(param.leak_report);
}
// We found a new previously unseen suppressed call stack. Rerun to make
// sure it does not hold indirect leaks.
VReport(1, "Rerun with %zu suppressed stacks.",
GetSuppressionContext()->GetSortedSuppressedStacks().size());
}
}
static bool has_reported_leaks = false; static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; } bool HasReportedLeaks() { return has_reported_leaks; }
@ -652,21 +758,20 @@ static int DoRecoverableLeakCheck() {
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
static Suppression *GetSuppressionForAddr(uptr addr) { Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr; Suppression *s = nullptr;
// Suppress by module name. // Suppress by module name.
SuppressionContext *suppressions = GetSuppressionContext();
if (const char *module_name = if (const char *module_name =
Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
if (suppressions->Match(module_name, kSuppressionLeak, &s)) if (context.Match(module_name, kSuppressionLeak, &s))
return s; return s;
// Suppress by file or function name. // Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) { for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { context.Match(cur->info.file, kSuppressionLeak, &s)) {
break; break;
} }
} }
@ -674,12 +779,18 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
return s; return s;
} }
static Suppression *GetSuppressionForStack(u32 stack_trace_id) { Suppression *LeakSuppressionContext::GetSuppressionForStack(
u32 stack_trace_id) {
LazyInit();
StackTrace stack = StackDepotGet(stack_trace_id); StackTrace stack = StackDepotGet(stack_trace_id);
for (uptr i = 0; i < stack.size; i++) { for (uptr i = 0; i < stack.size; i++) {
Suppression *s = GetSuppressionForAddr( Suppression *s = GetSuppressionForAddr(
StackTrace::GetPreviousInstructionPc(stack.trace[i])); StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s) return s; if (s) {
suppressed_stacks_sorted = false;
suppressed_stacks.push_back(stack_trace_id);
return s;
}
} }
return nullptr; return nullptr;
} }
@ -784,22 +895,27 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size; bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count; allocations += leaks_[i].hit_count;
} }
InternalScopedString summary(kMaxSummaryLength); InternalScopedString summary;
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
allocations); allocations);
ReportErrorSummary(summary.data()); ReportErrorSummary(summary.data());
} }
void LeakReport::ApplySuppressions() { uptr LeakReport::ApplySuppressions() {
LeakSuppressionContext *suppressions = GetSuppressionContext();
uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) { for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); Suppression *s =
suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) { if (s) {
s->weight += leaks_[i].total_size; s->weight += leaks_[i].total_size;
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count); leaks_[i].hit_count);
leaks_[i].is_suppressed = true; leaks_[i].is_suppressed = true;
++new_suppressions;
} }
} }
return new_suppressions;
} }
uptr LeakReport::UnsuppressedLeakCount() { uptr LeakReport::UnsuppressedLeakCount() {
@ -809,6 +925,14 @@ uptr LeakReport::UnsuppressedLeakCount() {
return result; return result;
} }
uptr LeakReport::IndirectUnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
result++;
return result;
}
} // namespace __lsan } // namespace __lsan
#else // CAN_SANITIZE_LEAKS #else // CAN_SANITIZE_LEAKS
namespace __lsan { namespace __lsan {

View File

@ -41,6 +41,8 @@
#define CAN_SANITIZE_LEAKS 1 #define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && SANITIZER_LINUX #elif defined(__arm__) && SANITIZER_LINUX
#define CAN_SANITIZE_LEAKS 1 #define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_RISCV64 && SANITIZER_LINUX
#define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA #elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
#define CAN_SANITIZE_LEAKS 1 #define CAN_SANITIZE_LEAKS 1
#else #else
@ -50,6 +52,7 @@
namespace __sanitizer { namespace __sanitizer {
class FlagParser; class FlagParser;
class ThreadRegistry; class ThreadRegistry;
class ThreadContextBase;
struct DTLS; struct DTLS;
} }
@ -63,8 +66,6 @@ enum ChunkTag {
kIgnored = 3 kIgnored = 3
}; };
const u32 kInvalidTid = (u32) -1;
struct Flags { struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc" #include "lsan_flags.inc"
@ -103,8 +104,9 @@ class LeakReport {
ChunkTag tag); ChunkTag tag);
void ReportTopLeaks(uptr max_leaks); void ReportTopLeaks(uptr max_leaks);
void PrintSummary(); void PrintSummary();
void ApplySuppressions(); uptr ApplySuppressions();
uptr UnsuppressedLeakCount(); uptr UnsuppressedLeakCount();
uptr IndirectUnsuppressedLeakCount();
private: private:
void PrintReportForLeak(uptr index); void PrintReportForLeak(uptr index);
@ -141,6 +143,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const &region, void ScanRootRegion(Frontier *frontier, RootRegion const &region,
uptr region_begin, uptr region_end, bool is_readable); uptr region_begin, uptr region_end, bool is_readable);
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg); void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
// Run stoptheworld while holding any platform-specific locks, as well as the // Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks. // allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback, void LockStuffAndStopTheWorld(StopTheWorldCallback callback,

View File

@ -107,9 +107,7 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
auto params = static_cast<const Params *>(data); auto params = static_cast<const Params *>(data);
uptr begin = reinterpret_cast<uptr>(chunk); uptr begin = reinterpret_cast<uptr>(chunk);
uptr end = begin + size; uptr end = begin + size;
auto i = __sanitizer::InternalLowerBound(params->allocator_caches, 0, auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
params->allocator_caches.size(),
begin, CompareLess<uptr>());
if (i < params->allocator_caches.size() && if (i < params->allocator_caches.size() &&
params->allocator_caches[i] >= begin && params->allocator_caches[i] >= begin &&
end - params->allocator_caches[i] <= sizeof(AllocatorCache)) { end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {

View File

@ -23,7 +23,7 @@
namespace __lsan { namespace __lsan {
class ThreadContext : public ThreadContextLsanBase { class ThreadContext final : public ThreadContextLsanBase {
public: public:
explicit ThreadContext(int tid); explicit ThreadContext(int tid);
void OnCreated(void *arg) override; void OnCreated(void *arg) override;

View File

@ -460,7 +460,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
if (res == 0) { if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
IsStateDetached(detached)); IsStateDetached(detached));
CHECK_NE(tid, 0); CHECK_NE(tid, kMainTid);
atomic_store(&p.tid, tid, memory_order_release); atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0) while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield(); internal_sched_yield();

View File

@ -48,7 +48,7 @@ void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
OnStartedArgs args; OnStartedArgs args;
uptr stack_size = 0; uptr stack_size = 0;
uptr tls_size = 0; uptr tls_size = 0;
GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size); &args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size; args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size; args.tls_end = args.tls_begin + tls_size;
@ -75,8 +75,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
} }
void InitializeMainThread() { void InitializeMainThread() {
u32 tid = ThreadCreate(0, 0, true); u32 tid = ThreadCreate(kMainTid, 0, true);
CHECK_EQ(tid, 0); CHECK_EQ(tid, kMainTid);
ThreadStart(tid, GetTid()); ThreadStart(tid, GetTid());
} }

View File

@ -94,7 +94,7 @@ void ThreadJoin(u32 tid) {
} }
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
if (GetCurrentThread() == 0) if (GetCurrentThread() == kMainTid)
CurrentThreadContext()->os_id = GetTid(); CurrentThreadContext()->os_id = GetTid();
} }

View File

@ -35,9 +35,9 @@ class CombinedAllocator {
secondary_.InitLinkerInitialized(); secondary_.InitLinkerInitialized();
} }
void Init(s32 release_to_os_interval_ms) { void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
stats_.Init(); stats_.Init();
primary_.Init(release_to_os_interval_ms); primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.Init(); secondary_.Init();
} }

View File

@ -119,7 +119,8 @@ class SizeClassAllocator32 {
typedef SizeClassAllocator32<Params> ThisT; typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache; typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
void Init(s32 release_to_os_interval_ms) { void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
CHECK(!heap_start);
possible_regions.Init(); possible_regions.Init();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
} }

View File

@ -19,7 +19,7 @@ template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
// The template parameter Params is a class containing the actual parameters. // The template parameter Params is a class containing the actual parameters.
// //
// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg. // Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap. // If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
// Otherwise SpaceBeg=kSpaceBeg (fixed address). // Otherwise SpaceBeg=kSpaceBeg (fixed address).
// kSpaceSize is a power of two. // kSpaceSize is a power of two.
// At the beginning the entire space is mprotect-ed, then small parts of it // At the beginning the entire space is mprotect-ed, then small parts of it
@ -69,25 +69,45 @@ class SizeClassAllocator64 {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale); return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
} }
void Init(s32 release_to_os_interval_ms) { // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
// at heap_start and places the heap there. This mode requires kSpaceBeg ==
// ~(uptr)0.
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) { PremappedHeap = heap_start != 0;
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); if (PremappedHeap) {
CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize, CHECK(!kUsingConstantSpaceBeg);
PrimaryAllocatorName, kSpaceBeg)); NonConstSpaceBeg = heap_start;
uptr RegionInfoSize = AdditionalSize();
RegionInfoSpace =
address_range.Init(RegionInfoSize, PrimaryAllocatorName);
CHECK_NE(RegionInfoSpace, ~(uptr)0);
CHECK_EQ(RegionInfoSpace,
address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
"SizeClassAllocator: region info"));
MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
} else { } else {
// Combined allocator expects that an 2^N allocation is always aligned to if (kUsingConstantSpaceBeg) {
// 2^N. For this to work, the start of the space needs to be aligned as CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
// high as the largest size class (which also needs to be a power of 2). CHECK_EQ(kSpaceBeg,
NonConstSpaceBeg = address_range.InitAligned( address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); kSpaceBeg));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } else {
// Combined allocator expects that an 2^N allocation is always aligned
// to 2^N. For this to work, the start of the space needs to be aligned
// as high as the largest size class (which also needs to be a power of
// 2).
NonConstSpaceBeg = address_range.InitAligned(
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
RegionInfoSpace = SpaceEnd();
MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
"SizeClassAllocator: region info");
} }
SetReleaseToOSIntervalMs(release_to_os_interval_ms); SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
"SizeClassAllocator: region info");
// Check that the RegionInfo array is aligned on the CacheLine size. // Check that the RegionInfo array is aligned on the CacheLine size.
DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0); DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
} }
s32 ReleaseToOSIntervalMs() const { s32 ReleaseToOSIntervalMs() const {
@ -144,6 +164,17 @@ class SizeClassAllocator64 {
CompactPtrT *free_array = GetFreeArray(region_beg); CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex); BlockingMutexLock l(&region->mutex);
#if SANITIZER_WINDOWS
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
explicit and immediate, so unmapped regions must be explicitly mapped back
in when they are accessed again. */
if (region->rtoi.last_released_bytes > 0) {
MmapFixedOrDie(region_beg, region->mapped_user,
"SizeClassAllocator: region data");
region->rtoi.n_freed_at_last_release = 0;
region->rtoi.last_released_bytes = 0;
}
#endif
if (UNLIKELY(region->num_freed_chunks < n_chunks)) { if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
if (UNLIKELY(!PopulateFreeArray(stat, class_id, region, if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
n_chunks - region->num_freed_chunks))) n_chunks - region->num_freed_chunks)))
@ -360,8 +391,7 @@ class SizeClassAllocator64 {
} }
~PackedCounterArray() { ~PackedCounterArray() {
if (buffer) { if (buffer) {
memory_mapper->UnmapPackedCounterArrayBuffer( memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
reinterpret_cast<uptr>(buffer), buffer_size);
} }
} }
@ -586,6 +616,11 @@ class SizeClassAllocator64 {
atomic_sint32_t release_to_os_interval_ms_; atomic_sint32_t release_to_os_interval_ms_;
uptr RegionInfoSpace;
// True if the user has already mapped the entire heap R/W.
bool PremappedHeap;
struct Stats { struct Stats {
uptr n_allocated; uptr n_allocated;
uptr n_freed; uptr n_freed;
@ -615,7 +650,7 @@ class SizeClassAllocator64 {
RegionInfo *GetRegionInfo(uptr class_id) const { RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses); DCHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd()); RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
return &regions[class_id]; return &regions[class_id];
} }
@ -640,6 +675,9 @@ class SizeClassAllocator64 {
} }
bool MapWithCallback(uptr beg, uptr size, const char *name) { bool MapWithCallback(uptr beg, uptr size, const char *name) {
if (PremappedHeap)
return beg >= NonConstSpaceBeg &&
beg + size <= NonConstSpaceBeg + kSpaceSize;
uptr mapped = address_range.Map(beg, size, name); uptr mapped = address_range.Map(beg, size, name);
if (UNLIKELY(!mapped)) if (UNLIKELY(!mapped))
return false; return false;
@ -649,11 +687,18 @@ class SizeClassAllocator64 {
} }
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
if (PremappedHeap) {
CHECK_GE(beg, NonConstSpaceBeg);
CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
return;
}
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name)); CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
MapUnmapCallback().OnMap(beg, size); MapUnmapCallback().OnMap(beg, size);
} }
void UnmapWithCallbackOrDie(uptr beg, uptr size) { void UnmapWithCallbackOrDie(uptr beg, uptr size) {
if (PremappedHeap)
return;
MapUnmapCallback().OnUnmap(beg, size); MapUnmapCallback().OnUnmap(beg, size);
address_range.Unmap(beg, size); address_range.Unmap(beg, size);
} }
@ -792,17 +837,16 @@ class SizeClassAllocator64 {
return released_bytes; return released_bytes;
} }
uptr MapPackedCounterArrayBuffer(uptr buffer_size) { void *MapPackedCounterArrayBuffer(uptr buffer_size) {
// TODO(alekseyshl): The idea to explore is to check if we have enough // TODO(alekseyshl): The idea to explore is to check if we have enough
// space between num_freed_chunks*sizeof(CompactPtrT) and // space between num_freed_chunks*sizeof(CompactPtrT) and
// mapped_free_array to fit buffer_size bytes and use that space instead // mapped_free_array to fit buffer_size bytes and use that space instead
// of mapping a temporary one. // of mapping a temporary one.
return reinterpret_cast<uptr>( return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
} }
void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) { void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size); UnmapOrDie(buffer, buffer_size);
} }
// Releases [from, to) range of pages back to OS. // Releases [from, to) range of pages back to OS.
@ -823,6 +867,9 @@ class SizeClassAllocator64 {
// Attempts to release RAM occupied by freed chunks back to OS. The region is // Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked. // expected to be locked.
//
// TODO(morehouse): Support a callback on memory release so HWASan can release
// aliases as well.
void MaybeReleaseToOS(uptr class_id, bool force) { void MaybeReleaseToOS(uptr class_id, bool force) {
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id); const uptr chunk_size = ClassIdToSize(class_id);

View File

@ -24,7 +24,7 @@
// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog // E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
// look like 0b1xx0..0, where x is either 0 or 1. // look like 0b1xx0..0, where x is either 0 or 1.
// //
// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17: // Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
// //
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). // Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
// Next 4 classes: 256 + i * 64 (i = 1 to 4). // Next 4 classes: 256 + i * 64 (i = 1 to 4).

View File

@ -41,7 +41,7 @@ inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val, atomic_uint64_t::Type val,
memory_order mo) { memory_order mo) {
DCHECK(mo & DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr))); DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret; atomic_uint64_t::Type ret;
@ -67,7 +67,7 @@ inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type xchg, atomic_uint64_t::Type xchg,
memory_order mo) { memory_order mo) {
DCHECK(mo & DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr))); DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type; typedef atomic_uint64_t::Type Type;
@ -90,7 +90,7 @@ template <>
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr, inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) { memory_order mo) {
DCHECK(mo & DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr))); DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0; atomic_uint64_t::Type zero = 0;
@ -103,7 +103,7 @@ template <>
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v, inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) { memory_order mo) {
DCHECK(mo & DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr))); DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock); __spin_lock(&lock.lock);

View File

@ -0,0 +1,108 @@
//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#include "sanitizer_chained_origin_depot.h"
namespace __sanitizer {
bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
u32 hash, const args_type &args) const {
return here_id == args.here_id && prev_id == args.prev_id;
}
uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
const args_type &args) {
return sizeof(ChainedOriginDepotNode);
}
/* This is murmur2 hash for the 64->32 bit case.
It does not behave all that well because the keys have a very biased
distribution (I've seen 7-element buckets with the table only 14% full).
here_id is built of
* (1 bits) Reserved, zero.
* (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
* (23 bits) Sequential number (each part has each own sequence).
prev_id has either the same distribution as here_id (but with 3:8:21)
split, or one of two reserved values (-1) or (-2). Either case can
dominate depending on the workload.
*/
u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
u32 h = seed;
u32 k = args.here_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
k = args.prev_id;
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
const args_type &args) {
return true;
}
void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
u32 other_hash) {
here_id = args.here_id;
prev_id = args.prev_id;
}
ChainedOriginDepot::ChainedOriginDepotNode::args_type
ChainedOriginDepot::ChainedOriginDepotNode::load() const {
args_type ret = {here_id, prev_id};
return ret;
}
ChainedOriginDepot::ChainedOriginDepotNode::Handle
ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
return Handle(this);
}
ChainedOriginDepot::ChainedOriginDepot() {}
StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); }
bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
ChainedOriginDepotDesc desc = {here_id, prev_id};
bool inserted;
ChainedOriginDepotNode::Handle h = depot.Put(desc, &inserted);
*new_id = h.valid() ? h.id() : 0;
return inserted;
}
u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
ChainedOriginDepotDesc desc = depot.Get(id);
*other = desc.prev_id;
return desc.here_id;
}
void ChainedOriginDepot::LockAll() { depot.LockAll(); }
void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
} // namespace __sanitizer

View File

@ -0,0 +1,88 @@
//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
#include "sanitizer_common.h"
#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
class ChainedOriginDepot {
public:
ChainedOriginDepot();
// Gets the statistic of the origin chain storage.
StackDepotStats *GetStats();
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
// If successful, returns true and the new chain id new_id.
// If the same element already exists, returns false and sets new_id to the
// existing ID.
bool Put(u32 here_id, u32 prev_id, u32 *new_id);
// Retrieves the stored StackDepot ID for the given origin ID.
u32 Get(u32 id, u32 *other);
void LockAll();
void UnlockAll();
private:
struct ChainedOriginDepotDesc {
u32 here_id;
u32 prev_id;
};
struct ChainedOriginDepotNode {
ChainedOriginDepotNode *link;
u32 id;
u32 here_id;
u32 prev_id;
typedef ChainedOriginDepotDesc args_type;
bool eq(u32 hash, const args_type &args) const;
static uptr storage_size(const args_type &args);
static u32 hash(const args_type &args);
static bool is_valid(const args_type &args);
void store(const args_type &args, u32 other_hash);
args_type load() const;
struct Handle {
ChainedOriginDepotNode *node_;
Handle() : node_(nullptr) {}
explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
bool valid() { return node_; }
u32 id() { return node_->id; }
int here_id() { return node_->here_id; }
int prev_id() { return node_->prev_id; }
};
Handle get_handle();
typedef Handle handle_type;
};
StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
ChainedOriginDepot(const ChainedOriginDepot &) = delete;
void operator=(const ChainedOriginDepot &) = delete;
};
} // namespace __sanitizer
#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H

View File

@ -87,7 +87,7 @@ const char *StripModuleName(const char *module) {
void ReportErrorSummary(const char *error_message, const char *alt_tool_name) { void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary) if (!common_flags()->print_summary)
return; return;
InternalScopedString buff(kMaxSummaryLength); InternalScopedString buff;
buff.append("SUMMARY: %s: %s", buff.append("SUMMARY: %s: %s",
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message); alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data()); __sanitizer_report_error_summary(buff.data());
@ -274,6 +274,14 @@ uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
return name_len; return name_len;
} }
uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len) {
ReadBinaryNameCached(buf, buf_len);
const char *exec_name_pos = StripModuleName(buf);
uptr name_len = exec_name_pos - buf;
buf[name_len] = '\0';
return name_len;
}
#if !SANITIZER_GO #if !SANITIZER_GO
void PrintCmdline() { void PrintCmdline() {
char **argv = GetArgv(); char **argv = GetArgv();

View File

@ -44,7 +44,7 @@ const uptr kMaxPathLength = 4096;
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
static const uptr kErrorMessageBufferSize = 1 << 16; const uptr kErrorMessageBufferSize = 1 << 16;
// Denotes fake PC values that come from JIT/JAVA/etc. // Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external_ex() will be called. // For such PC values __tsan_symbolize_external_ex() will be called.
@ -135,6 +135,15 @@ void UnmapFromTo(uptr from, uptr to);
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end); uptr min_shadow_base_alignment, uptr &high_mem_end);
// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
// Reserves 2*S bytes of address space to the right of the returned address and
// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
// Also creates num_aliases regions of accessible memory starting at offset S
// from the returned address. Each region has size alias_size and is backed by
// the same physical memory.
uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
uptr num_aliases, uptr ring_buffer_size);
// Reserve memory range [beg, end]. If madvise_shadow is true then apply // Reserve memory range [beg, end]. If madvise_shadow is true then apply
// madvise (e.g. hugepages, core dumping) requested by options. // madvise (e.g. hugepages, core dumping) requested by options.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name, void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
@ -248,6 +257,7 @@ const char *StripModuleName(const char *module);
// OS // OS
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
const char *GetProcessName(); const char *GetProcessName();
void UpdateProcessName(); void UpdateProcessName();
@ -294,8 +304,8 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err, const char *mmap_type, error_t err,
bool raw_report = false); bool raw_report = false);
// Specific tools may override behavior of "Die" and "CheckFailed" functions // Specific tools may override behavior of "Die" function to do tool-specific
// to do tool-specific job. // job.
typedef void (*DieCallbackType)(void); typedef void (*DieCallbackType)(void);
// It's possible to add several callbacks that would be run when "Die" is // It's possible to add several callbacks that would be run when "Die" is
@ -307,9 +317,7 @@ bool RemoveDieCallback(DieCallbackType callback);
void SetUserDieCallback(DieCallbackType callback); void SetUserDieCallback(DieCallbackType callback);
typedef void (*CheckFailedCallbackType)(const char *, int, const char *, void SetCheckUnwindCallback(void (*callback)());
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Callback will be called if soft_rss_limit_mb is given and the limit is // Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit // exceeded (exceeded==true) or if rss went down below the limit
@ -343,8 +351,6 @@ void ReportDeadlySignal(const SignalContext &sig, u32 tid,
void SetAlternateSignalStack(); void SetAlternateSignalStack();
void UnsetAlternateSignalStack(); void UnsetAlternateSignalStack();
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string: // Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message // SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary. // and pass it to __sanitizer_report_error_summary.
@ -441,8 +447,14 @@ inline uptr Log2(uptr x) {
// Don't use std::min, std::max or std::swap, to minimize dependency // Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++. // on libstdc++.
template<class T> T Min(T a, T b) { return a < b ? a : b; } template <class T>
template<class T> T Max(T a, T b) { return a > b ? a : b; } constexpr T Min(T a, T b) {
return a < b ? a : b;
}
template <class T>
constexpr T Max(T a, T b) {
return a > b ? a : b;
}
template<class T> void Swap(T& a, T& b) { template<class T> void Swap(T& a, T& b) {
T tmp = a; T tmp = a;
a = b; a = b;
@ -467,6 +479,7 @@ inline int ToLower(int c) {
template<typename T> template<typename T>
class InternalMmapVectorNoCtor { class InternalMmapVectorNoCtor {
public: public:
using value_type = T;
void Initialize(uptr initial_capacity) { void Initialize(uptr initial_capacity) {
capacity_bytes_ = 0; capacity_bytes_ = 0;
size_ = 0; size_ = 0;
@ -590,21 +603,21 @@ class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
InternalMmapVector &operator=(InternalMmapVector &&) = delete; InternalMmapVector &operator=(InternalMmapVector &&) = delete;
}; };
class InternalScopedString : public InternalMmapVector<char> { class InternalScopedString {
public: public:
explicit InternalScopedString(uptr max_length) InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
: InternalMmapVector<char>(max_length), length_(0) {
(*this)[0] = '\0'; uptr length() const { return buffer_.size() - 1; }
}
uptr length() { return length_; }
void clear() { void clear() {
(*this)[0] = '\0'; buffer_.resize(1);
length_ = 0; buffer_[0] = '\0';
} }
void append(const char *format, ...); void append(const char *format, ...);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
private: private:
uptr length_; InternalMmapVector<char> buffer_;
}; };
template <class T> template <class T>
@ -651,9 +664,13 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less // Works like std::lower_bound: finds the first element that is not less
// than the val. // than the val.
template <class Container, class Value, class Compare> template <class Container,
uptr InternalLowerBound(const Container &v, uptr first, uptr last, class Compare = CompareLess<typename Container::value_type>>
const Value &val, Compare comp) { uptr InternalLowerBound(const Container &v,
const typename Container::value_type &val,
Compare comp = {}) {
uptr first = 0;
uptr last = v.size();
while (last > first) { while (last > first) {
uptr mid = (first + last) / 2; uptr mid = (first + last) / 2;
if (comp(v[mid], val)) if (comp(v[mid], val))
@ -677,6 +694,27 @@ enum ModuleArch {
kModuleArchRISCV64 kModuleArchRISCV64
}; };
// Sorts and removes duplicates from the container.
template <class Container,
class Compare = CompareLess<typename Container::value_type>>
void SortAndDedup(Container &v, Compare comp = {}) {
Sort(v.data(), v.size(), comp);
uptr size = v.size();
if (size < 2)
return;
uptr last = 0;
for (uptr i = 1; i < size; ++i) {
if (comp(v[last], v[i])) {
++last;
if (last != i)
v[last] = v[i];
} else {
CHECK(!comp(v[i], v[last]));
}
}
v.resize(last + 1);
}
// Opens the file 'file_name" and reads up to 'max_len' bytes. // Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'. // The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read. // Returns true if file was successfully opened and read.

View File

@ -239,7 +239,7 @@ extern const short *_tolower_tab_;
COMMON_INTERCEPT_FUNCTION(fn) COMMON_INTERCEPT_FUNCTION(fn)
#endif #endif
#ifdef __GLIBC__ #if SANITIZER_GLIBC
// If we could not find the versioned symbol, fall back to an unversioned // If we could not find the versioned symbol, fall back to an unversioned
// lookup. This is needed to work around a GLibc bug that causes dlsym // lookup. This is needed to work around a GLibc bug that causes dlsym
// with RTLD_NEXT to return the oldest versioned symbol. // with RTLD_NEXT to return the oldest versioned symbol.
@ -2195,6 +2195,7 @@ INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
} }
return res; return res;
} }
#if SANITIZER_GLIBC
namespace __sanitizer { namespace __sanitizer {
extern "C" { extern "C" {
int real_clock_gettime(u32 clk_id, void *tp) { int real_clock_gettime(u32 clk_id, void *tp) {
@ -2204,6 +2205,7 @@ int real_clock_gettime(u32 clk_id, void *tp) {
} }
} // extern "C" } // extern "C"
} // namespace __sanitizer } // namespace __sanitizer
#endif
INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) { INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
void *ctx; void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp); COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
@ -3355,7 +3357,7 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
char *res = REAL(setlocale)(category, locale); char *res = REAL(setlocale)(category, locale);
if (res) { if (res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
unpoison_ctype_arrays(ctx); unpoison_ctype_arrays(ctx);
} }
return res; return res;
@ -4030,7 +4032,7 @@ INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
// FIXME: under ASan the call below may write to freed memory and corrupt // FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See // its metadata. See
// https://github.com/google/sanitizers/issues/321. // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigwait)(set, sig); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig)); if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res; return res;
} }
@ -4047,7 +4049,7 @@ INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
// FIXME: under ASan the call below may write to freed memory and corrupt // FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See // its metadata. See
// https://github.com/google/sanitizers/issues/321. // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigwaitinfo)(set, info); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz); if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res; return res;
} }
@ -4066,7 +4068,7 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
// FIXME: under ASan the call below may write to freed memory and corrupt // FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See // its metadata. See
// https://github.com/google/sanitizers/issues/321. // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigtimedwait)(set, info, timeout); int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz); if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res; return res;
} }
@ -5995,6 +5997,9 @@ void unpoison_file(__sanitizer_FILE *fp) {
if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end) if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base, COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
fp->_IO_read_end - fp->_IO_read_base); fp->_IO_read_end - fp->_IO_read_base);
if (fp->_IO_write_base && fp->_IO_write_base < fp->_IO_write_end)
COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_write_base,
fp->_IO_write_end - fp->_IO_write_base);
#endif #endif
#endif // SANITIZER_HAS_STRUCT_FILE #endif // SANITIZER_HAS_STRUCT_FILE
} }
@ -6221,6 +6226,8 @@ INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) { INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
void *ctx; void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp); COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
if (fp)
unpoison_file(fp);
int res = REAL(fflush)(fp); int res = REAL(fflush)(fp);
// FIXME: handle fp == NULL // FIXME: handle fp == NULL
if (fp) { if (fp) {
@ -6240,6 +6247,8 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp); COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
const FileMetadata *m = GetInterceptorMetadata(fp); const FileMetadata *m = GetInterceptorMetadata(fp);
if (fp)
unpoison_file(fp);
int res = REAL(fclose)(fp); int res = REAL(fclose)(fp);
if (m) { if (m) {
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);

View File

@ -330,13 +330,17 @@ static void ioctl_table_fill() {
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int)); _(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int)); _(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
_(TCFLSH, NONE, 0); _(TCFLSH, NONE, 0);
#if SANITIZER_GLIBC
_(TCGETA, WRITE, struct_termio_sz); _(TCGETA, WRITE, struct_termio_sz);
#endif
_(TCGETS, WRITE, struct_termios_sz); _(TCGETS, WRITE, struct_termios_sz);
_(TCSBRK, NONE, 0); _(TCSBRK, NONE, 0);
_(TCSBRKP, NONE, 0); _(TCSBRKP, NONE, 0);
#if SANITIZER_GLIBC
_(TCSETA, READ, struct_termio_sz); _(TCSETA, READ, struct_termio_sz);
_(TCSETAF, READ, struct_termio_sz); _(TCSETAF, READ, struct_termio_sz);
_(TCSETAW, READ, struct_termio_sz); _(TCSETAW, READ, struct_termio_sz);
#endif
_(TCSETS, READ, struct_termios_sz); _(TCSETS, READ, struct_termios_sz);
_(TCSETSF, READ, struct_termios_sz); _(TCSETSF, READ, struct_termios_sz);
_(TCSETSW, READ, struct_termios_sz); _(TCSETSW, READ, struct_termios_sz);
@ -364,7 +368,7 @@ static void ioctl_table_fill() {
_(VT_WAITACTIVE, NONE, 0); _(VT_WAITACTIVE, NONE, 0);
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE // _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
_(CYGETDEFTHRESH, WRITE, sizeof(int)); _(CYGETDEFTHRESH, WRITE, sizeof(int));
_(CYGETDEFTIMEOUT, WRITE, sizeof(int)); _(CYGETDEFTIMEOUT, WRITE, sizeof(int));

View File

@ -1,6 +1,7 @@
#if defined(__aarch64__) && defined(__linux__) #if defined(__aarch64__) && defined(__linux__)
#include "sanitizer_common/sanitizer_asm.h" #include "sanitizer_common/sanitizer_asm.h"
#include "builtins/assembly.h"
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA) ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
@ -9,6 +10,7 @@ ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork): ASM_WRAPPER_NAME(vfork):
// Save x30 in the off-stack spill area. // Save x30 in the off-stack spill area.
hint #25 // paciasp
stp xzr, x30, [sp, #-16]! stp xzr, x30, [sp, #-16]!
bl COMMON_INTERCEPTOR_SPILL_AREA bl COMMON_INTERCEPTOR_SPILL_AREA
ldp xzr, x30, [sp], 16 ldp xzr, x30, [sp], 16
@ -33,6 +35,7 @@ ASM_WRAPPER_NAME(vfork):
bl COMMON_INTERCEPTOR_SPILL_AREA bl COMMON_INTERCEPTOR_SPILL_AREA
ldr x30, [x0] ldr x30, [x0]
ldp x0, xzr, [sp], 16 ldp x0, xzr, [sp], 16
hint #29 // autiasp
ret ret
ASM_SIZE(vfork) ASM_SIZE(vfork)
@ -40,4 +43,6 @@ ASM_SIZE(vfork)
.weak vfork .weak vfork
.set vfork, ASM_WRAPPER_NAME(vfork) .set vfork, ASM_WRAPPER_NAME(vfork)
GNU_PROPERTY_BTI_PAC
#endif #endif

View File

@ -13,6 +13,7 @@ INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_set_death_callback) INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path) INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd) INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container) INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print) INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary) INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)

View File

@ -92,14 +92,13 @@ void *BackgroundThread(void *arg) {
#endif #endif
void WriteToSyslog(const char *msg) { void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize); InternalScopedString msg_copy;
msg_copy.append("%s", msg); msg_copy.append("%s", msg);
char *p = msg_copy.data(); const char *p = msg_copy.data();
char *q;
// Print one line at a time. // Print one line at a time.
// syslog, at least on Android, has an implicit message length limit. // syslog, at least on Android, has an implicit message length limit.
while ((q = internal_strchr(p, '\n'))) { while (char* q = internal_strchr(p, '\n')) {
*q = '\0'; *q = '\0';
WriteOneLineToSyslog(p); WriteOneLineToSyslog(p);
p = q + 1; p = q + 1;

View File

@ -58,6 +58,9 @@ void ReportFile::ReopenIfNecessary() {
} else { } else {
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid); internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
} }
if (common_flags()->log_suffix) {
internal_strlcat(full_path, common_flags()->log_suffix, kMaxPathLength);
}
error_t err; error_t err;
fd = OpenFile(full_path, WrOnly, &err); fd = OpenFile(full_path, WrOnly, &err);
if (fd == kInvalidFd) { if (fd == kInvalidFd) {
@ -95,6 +98,12 @@ void ReportFile::SetReportPath(const char *path) {
} }
} }
const char *ReportFile::GetReportPath() {
SpinMutexLock l(mu);
ReopenIfNecessary();
return full_path;
}
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) { uptr *read_len, uptr max_len, error_t *errno_p) {
*buff = nullptr; *buff = nullptr;
@ -213,6 +222,10 @@ void __sanitizer_set_report_fd(void *fd) {
report_file.fd = (fd_t)reinterpret_cast<uptr>(fd); report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
report_file.fd_pid = internal_getpid(); report_file.fd_pid = internal_getpid();
} }
const char *__sanitizer_get_report_path() {
return report_file.GetReportPath();
}
} // extern "C" } // extern "C"
#endif // !SANITIZER_FUCHSIA #endif // !SANITIZER_FUCHSIA

View File

@ -26,6 +26,7 @@ struct ReportFile {
void Write(const char *buffer, uptr length); void Write(const char *buffer, uptr length);
bool SupportsColors(); bool SupportsColors();
void SetReportPath(const char *path); void SetReportPath(const char *path);
const char *GetReportPath();
// Don't use fields directly. They are only declared public to allow // Don't use fields directly. They are only declared public to allow
// aggregate initialization. // aggregate initialization.

View File

@ -35,6 +35,7 @@ void CommonFlags::CopyFrom(const CommonFlags &other) {
// Copy the string from "s" to "out", making the following substitutions: // Copy the string from "s" to "out", making the following substitutions:
// %b = binary basename // %b = binary basename
// %p = pid // %p = pid
// %d = binary directory
void SubstituteForFlagValue(const char *s, char *out, uptr out_size) { void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
char *out_end = out + out_size; char *out_end = out + out_size;
while (*s && out < out_end - 1) { while (*s && out < out_end - 1) {
@ -64,6 +65,12 @@ void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
s += 2; // skip "%p" s += 2; // skip "%p"
break; break;
} }
case 'd': {
uptr len = ReadBinaryDir(out, out_end - out);
out += len;
s += 2; // skip "%d"
break;
}
default: default:
*out++ = *s++; *out++ = *s++;
break; break;

View File

@ -59,6 +59,8 @@ COMMON_FLAG(
bool, log_exe_name, false, bool, log_exe_name, false,
"Mention name of executable when reporting error and " "Mention name of executable when reporting error and "
"append executable name to logs (as in \"log_path.exe_name.pid\").") "append executable name to logs (as in \"log_path.exe_name.pid\").")
COMMON_FLAG(const char *, log_suffix, nullptr,
"String to append to log file name, e.g. \".txt\".")
COMMON_FLAG( COMMON_FLAG(
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC, bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
"Write all sanitizer output to syslog in addition to other means of " "Write all sanitizer output to syslog in addition to other means of "

View File

@ -14,7 +14,6 @@
#include "sanitizer_fuchsia.h" #include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA #if SANITIZER_FUCHSIA
#include <limits.h>
#include <pthread.h> #include <pthread.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <unistd.h>
@ -69,9 +68,7 @@ uptr internal_getpid() {
return pid; return pid;
} }
int internal_dlinfo(void *handle, int request, void *p) { int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
UNIMPLEMENTED();
}
uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
@ -153,9 +150,9 @@ void BlockingMutex::CheckLocked() {
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
} }
uptr GetPageSize() { return PAGE_SIZE; } uptr GetPageSize() { return _zx_system_get_page_size(); }
uptr GetMmapGranularity() { return PAGE_SIZE; } uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
sanitizer_shadow_bounds_t ShadowBounds; sanitizer_shadow_bounds_t ShadowBounds;
@ -168,7 +165,7 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) { bool raw_report, bool die_for_nomem) {
size = RoundUpTo(size, PAGE_SIZE); size = RoundUpTo(size, GetPageSize());
zx_handle_t vmo; zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(size, 0, &vmo); zx_status_t status = _zx_vmo_create(size, 0, &vmo);
@ -214,15 +211,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
uptr ReservedAddressRange::Init(uptr init_size, const char *name, uptr ReservedAddressRange::Init(uptr init_size, const char *name,
uptr fixed_addr) { uptr fixed_addr) {
init_size = RoundUpTo(init_size, PAGE_SIZE); init_size = RoundUpTo(init_size, GetPageSize());
DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID); DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
uintptr_t base; uintptr_t base;
zx_handle_t vmar; zx_handle_t vmar;
zx_status_t status = zx_status_t status = _zx_vmar_allocate(
_zx_vmar_allocate( _zx_vmar_root_self(),
_zx_vmar_root_self(), ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, init_size, &vmar, &base);
0, init_size, &vmar, &base);
if (status != ZX_OK) if (status != ZX_OK)
ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
base_ = reinterpret_cast<void *>(base); base_ = reinterpret_cast<void *>(base);
@ -236,7 +232,7 @@ uptr ReservedAddressRange::Init(uptr init_size, const char *name,
static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
void *base, const char *name, bool die_for_nomem) { void *base, const char *name, bool die_for_nomem) {
uptr offset = fixed_addr - reinterpret_cast<uptr>(base); uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
map_size = RoundUpTo(map_size, PAGE_SIZE); map_size = RoundUpTo(map_size, GetPageSize());
zx_handle_t vmo; zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(map_size, 0, &vmo); zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
if (status != ZX_OK) { if (status != ZX_OK) {
@ -264,19 +260,19 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size, uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) { const char *name) {
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
name_, false); false);
} }
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size, uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) { const char *name) {
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
name_, true);
} }
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) { void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
if (!addr || !size) return; if (!addr || !size)
size = RoundUpTo(size, PAGE_SIZE); return;
size = RoundUpTo(size, GetPageSize());
zx_status_t status = zx_status_t status =
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size); _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
@ -316,7 +312,7 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) { const char *mem_type) {
CHECK_GE(size, PAGE_SIZE); CHECK_GE(size, GetPageSize());
CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment)); CHECK(IsPowerOfTwo(alignment));
@ -356,7 +352,8 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
_zx_vmar_root_self(), _zx_vmar_root_self(),
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
addr - info.base, vmo, 0, size, &new_addr); addr - info.base, vmo, 0, size, &new_addr);
if (status == ZX_OK) CHECK_EQ(new_addr, addr); if (status == ZX_OK)
CHECK_EQ(new_addr, addr);
} }
} }
if (status == ZX_OK && addr != map_addr) if (status == ZX_OK && addr != map_addr)
@ -381,9 +378,18 @@ void UnmapOrDie(void *addr, uptr size) {
UnmapOrDieVmar(addr, size, _zx_vmar_root_self()); UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
} }
// This is used on the shadow mapping, which cannot be changed. void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
// Zircon doesn't have anything like MADV_DONTNEED. uptr beg_aligned = RoundUpTo(beg, GetPageSize());
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {} uptr end_aligned = RoundDownTo(end, GetPageSize());
if (beg_aligned < end_aligned) {
zx_handle_t root_vmar = _zx_vmar_root_self();
CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
zx_status_t status =
_zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
end_aligned - beg_aligned, nullptr, 0);
CHECK_EQ(status, ZX_OK);
}
}
void DumpProcessMap() { void DumpProcessMap() {
// TODO(mcgrathr): write it // TODO(mcgrathr): write it
@ -412,8 +418,9 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uint64_t vmo_size; uint64_t vmo_size;
status = _zx_vmo_get_size(vmo, &vmo_size); status = _zx_vmo_get_size(vmo, &vmo_size);
if (status == ZX_OK) { if (status == ZX_OK) {
if (vmo_size < max_len) max_len = vmo_size; if (vmo_size < max_len)
size_t map_size = RoundUpTo(max_len, PAGE_SIZE); max_len = vmo_size;
size_t map_size = RoundUpTo(max_len, GetPageSize());
uintptr_t addr; uintptr_t addr;
status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
map_size, &addr); map_size, &addr);
@ -425,7 +432,8 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
} }
_zx_handle_close(vmo); _zx_handle_close(vmo);
} }
if (status != ZX_OK && errno_p) *errno_p = status; if (status != ZX_OK && errno_p)
*errno_p = status;
return status == ZX_OK; return status == ZX_OK;
} }
@ -499,9 +507,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
return true; return true;
} }
u32 GetNumberOfCPUs() { u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
return zx_system_get_num_cpus();
}
uptr GetRSS() { UNIMPLEMENTED(); } uptr GetRSS() { UNIMPLEMENTED(); }
@ -529,6 +535,10 @@ void __sanitizer_set_report_path(const char *path) {
void __sanitizer_set_report_fd(void *fd) { void __sanitizer_set_report_fd(void *fd) {
UNREACHABLE("not available on Fuchsia"); UNREACHABLE("not available on Fuchsia");
} }
const char *__sanitizer_get_report_path() {
UNREACHABLE("not available on Fuchsia");
}
} // extern "C" } // extern "C"
#endif // SANITIZER_FUCHSIA #endif // SANITIZER_FUCHSIA

View File

@ -28,6 +28,10 @@ extern "C" {
// (casted to void *). // (casted to void *).
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(void *fd); void __sanitizer_set_report_fd(void *fd);
// Get the current full report file path, if a path was specified by
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
SANITIZER_INTERFACE_ATTRIBUTE
const char *__sanitizer_get_report_path();
typedef struct { typedef struct {
int coverage_sandboxed; int coverage_sandboxed;

View File

@ -409,6 +409,9 @@ inline void Trap() {
(void)enable_fp; \ (void)enable_fp; \
} while (0) } while (0)
constexpr u32 kInvalidTid = -1;
constexpr u32 kMainTid = 0;
} // namespace __sanitizer } // namespace __sanitizer
namespace __asan { namespace __asan {

View File

@ -38,7 +38,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
void LibIgnore::OnLibraryLoaded(const char *name) { void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_); BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target. // Try to match suppressions with symlink target.
InternalScopedString buf(kMaxPathLength); InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 && if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf[0]) { buf[0]) {
for (uptr i = 0; i < count_; i++) { for (uptr i = 0; i < count_; i++) {

View File

@ -183,6 +183,14 @@ uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length); return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
} }
#if SANITIZER_LINUX
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address) {
return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,
new_size, flags, (uptr)new_address);
}
#endif
int internal_mprotect(void *addr, uptr length, int prot) { int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot); return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
} }
@ -489,22 +497,24 @@ int TgKill(pid_t pid, tid_t tid, int sig) {
} }
#endif #endif
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD #if SANITIZER_GLIBC
u64 NanoTime() { u64 NanoTime() {
#if SANITIZER_FREEBSD
timeval tv;
#else
kernel_timeval tv; kernel_timeval tv;
#endif
internal_memset(&tv, 0, sizeof(tv)); internal_memset(&tv, 0, sizeof(tv));
internal_syscall(SYSCALL(gettimeofday), &tv, 0); internal_syscall(SYSCALL(gettimeofday), &tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;
} }
// Used by real_clock_gettime.
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) { uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
return internal_syscall(SYSCALL(clock_gettime), clk_id, tp); return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
} }
#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD #elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
u64 NanoTime() {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
}
#endif
// Like getenv, but reads env directly from /proc (on Linux) or parses the // Like getenv, but reads env directly from /proc (on Linux) or parses the
// 'environ' array (on some others) and does not use libc. This function // 'environ' array (on some others) and does not use libc. This function
@ -1334,50 +1344,42 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
#elif SANITIZER_RISCV64 #elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) { int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
if (!fn || !child_stack) if (!fn || !child_stack)
return -EINVAL; return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg;
register int (*__fn)(void *) __asm__("a0") = fn; CHECK_EQ(0, (uptr)child_stack % 16);
register int res __asm__("a0");
register int __flags __asm__("a0") = flags;
register void *__stack __asm__("a1") = child_stack; register void *__stack __asm__("a1") = child_stack;
register int __flags __asm__("a2") = flags; register int *__ptid __asm__("a2") = parent_tidptr;
register void *__arg __asm__("a3") = arg; register void *__tls __asm__("a3") = newtls;
register int *__ptid __asm__("a4") = parent_tidptr; register int *__ctid __asm__("a4") = child_tidptr;
register void *__tls __asm__("a5") = newtls; register int (*__fn)(void *) __asm__("a5") = fn;
register int *__ctid __asm__("a6") = child_tidptr; register void *__arg __asm__("a6") = arg;
register int nr_clone __asm__("a7") = __NR_clone;
__asm__ __volatile__( __asm__ __volatile__(
"mv a0,a2\n" /* flags */
"mv a2,a4\n" /* ptid */
"mv a3,a5\n" /* tls */
"mv a4,a6\n" /* ctid */
"addi a7, zero, %9\n" /* clone */
"ecall\n" "ecall\n"
/* if (%r0 != 0) /* if (a0 != 0)
* return %r0; * return a0;
*/ */
"bnez a0, 1f\n" "bnez a0, 1f\n"
/* In the child, now. Call "fn(arg)". */ // In the child, now. Call "fn(arg)".
"ld a0, 8(sp)\n" "mv a0, a6\n"
"ld a1, 16(sp)\n" "jalr a5\n"
"jalr a1\n"
/* Call _exit(%r0). */ // Call _exit(a0).
"addi a7, zero, %10\n" "addi a7, zero, %9\n"
"ecall\n" "ecall\n"
"1:\n" "1:\n"
: "=r"(res) : "=r"(res)
: "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__tls), "r"(__ctid),
"r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit) "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
: "ra", "memory"); : "memory");
return res; return res;
} }
#elif defined(__aarch64__) #elif defined(__aarch64__)

View File

@ -49,7 +49,9 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss); uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset); __sanitizer_sigset_t *oldset);
#if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp); uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
#endif
// Linux-only syscalls. // Linux-only syscalls.
#if SANITIZER_LINUX #if SANITIZER_LINUX
@ -96,7 +98,6 @@ class ThreadLister {
// Exposed for testing. // Exposed for testing.
uptr ThreadDescriptorSize(); uptr ThreadDescriptorSize();
uptr ThreadSelf(); uptr ThreadSelf();
uptr ThreadSelfOffset();
// Matches a library's file name against a base name (stripping path and version // Matches a library's file name against a base name (stripping path and version
// information). // information).

View File

@ -36,6 +36,7 @@
#include <link.h> #include <link.h>
#include <pthread.h> #include <pthread.h>
#include <signal.h> #include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <syslog.h> #include <syslog.h>
@ -48,6 +49,10 @@
#include <osreldate.h> #include <osreldate.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np #define pthread_getattr_np pthread_attr_get_np
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
#undef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif #endif
#if SANITIZER_NETBSD #if SANITIZER_NETBSD
@ -183,85 +188,35 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
#endif #endif
} }
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \ // True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
!SANITIZER_NETBSD && !SANITIZER_SOLARIS // #19826) so dlpi_tls_data cannot be used.
static uptr g_tls_size; //
// musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to
#ifdef __i386__ // the TLS initialization image
#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27)) // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
#else __attribute__((unused)) static int g_use_dlpi_tls_data;
#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION
#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
#define DL_INTERNAL_FUNCTION
#endif
namespace {
struct GetTlsStaticInfoCall {
typedef void (*get_tls_func)(size_t*, size_t*);
};
struct GetTlsStaticInfoRegparmCall {
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
};
template <typename T>
void CallGetTls(void* ptr, size_t* size, size_t* align) {
typename T::get_tls_func get_tls;
CHECK_EQ(sizeof(get_tls), sizeof(ptr));
internal_memcpy(&get_tls, &ptr, sizeof(ptr));
CHECK_NE(get_tls, 0);
get_tls(size, align);
}
bool CmpLibcVersion(int major, int minor, int patch) {
int ma;
int mi;
int pa;
if (!GetLibcVersion(&ma, &mi, &pa))
return false;
if (ma > major)
return true;
if (ma < major)
return false;
if (mi > minor)
return true;
if (mi < minor)
return false;
return pa >= patch;
}
} // namespace
#if SANITIZER_GLIBC && !SANITIZER_GO
__attribute__((unused)) static uptr g_tls_size;
void InitTlsSize() { void InitTlsSize() {
// all current supported platforms have 16 bytes stack alignment int major, minor, patch;
const size_t kStackAlign = 16; g_use_dlpi_tls_data =
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
size_t tls_size = 0;
size_t tls_align = 0; #if defined(__x86_64__) || defined(__powerpc64__)
// On i?86, _dl_get_tls_static_info used to be internal_function, i.e. void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
// __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal size_t tls_align;
// function in 2.27 and later. ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0)) #endif
CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
&tls_size, &tls_align);
else
CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
&tls_size, &tls_align);
if (tls_align < kStackAlign)
tls_align = kStackAlign;
g_tls_size = RoundUpTo(tls_size, tls_align);
} }
#else #else
void InitTlsSize() { } void InitTlsSize() { }
#endif #endif // SANITIZER_GLIBC && !SANITIZER_GO
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \ // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \ // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
defined(__arm__) || SANITIZER_RISCV64) && \ // to get the pointer to thread-specific data keys in the thread control block.
SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc. // sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size; static atomic_uintptr_t thread_descriptor_size;
@ -294,9 +249,18 @@ uptr ThreadDescriptorSize() {
val = FIRST_32_SECOND_64(1168, 2288); val = FIRST_32_SECOND_64(1168, 2288);
else if (minor <= 14) else if (minor <= 14)
val = FIRST_32_SECOND_64(1168, 2304); val = FIRST_32_SECOND_64(1168, 2304);
else else if (minor < 32) // Unknown version
val = FIRST_32_SECOND_64(1216, 2304); val = FIRST_32_SECOND_64(1216, 2304);
else // minor == 32
val = FIRST_32_SECOND_64(1344, 2496);
} }
#elif defined(__s390__) || defined(__sparc__)
// The size of a prefix of TCB including pthread::{specific_1stblock,specific}
// suffices. Just return offsetof(struct pthread, specific_used), which hasn't
// changed since 2007-05. Technically this applies to i386/x86_64 as well but
// we call _dl_get_tls_static_info and need the precise size of struct
// pthread.
return FIRST_32_SECOND_64(524, 1552);
#elif defined(__mips__) #elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions. // TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776); val = FIRST_32_SECOND_64(1152, 1776);
@ -320,21 +284,12 @@ uptr ThreadDescriptorSize() {
val = 1776; val = 1776;
#elif defined(__powerpc64__) #elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21 val = 1776; // from glibc.ppc64le 2.20-8.fc21
#elif defined(__s390__)
val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
#endif #endif
if (val) if (val)
atomic_store_relaxed(&thread_descriptor_size, val); atomic_store_relaxed(&thread_descriptor_size, val);
return val; return val;
} }
// The offset at which pointer to self is located in the thread descriptor.
const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 #if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb // TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks. // head structure. It lies before the static tls blocks.
@ -353,68 +308,74 @@ static uptr TlsPreTcbSize() {
} }
#endif #endif
uptr ThreadSelf() { #if !SANITIZER_GO
uptr descr_addr; namespace {
#if defined(__i386__) struct TlsBlock {
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); uptr begin, end, align;
#elif defined(__x86_64__) size_t tls_modid;
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; }
#elif defined(__mips__) };
// MIPS uses TLS variant I. The thread pointer (in hardware register $29) } // namespace
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer;
asm volatile(".set push;\
.set mips64r2;\
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
#elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
#elif SANITIZER_RISCV64
// https://github.com/riscv/riscv-elf-psabi-doc/issues/53
uptr thread_pointer = reinterpret_cast<uptr>(__builtin_thread_pointer());
descr_addr = thread_pointer - TlsPreTcbSize();
#elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
#elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize();
#else
#error "unsupported CPU arch"
#endif
return descr_addr;
}
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
#if SANITIZER_FREEBSD extern "C" void *__tls_get_addr(size_t *);
static void **ThreadSelfSegbase() {
void **segbase = 0; static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
#if defined(__i386__) void *data) {
// sysarch(I386_GET_GSBASE, segbase); if (!info->dlpi_tls_modid)
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase)); return 0;
#elif defined(__x86_64__) uptr begin = (uptr)info->dlpi_tls_data;
// sysarch(AMD64_GET_FSBASE, segbase); #ifndef __s390__
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase)); if (!g_use_dlpi_tls_data) {
#else // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
#error "unsupported CPU arch" // and FreeBSD.
size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
}
#endif #endif
return segbase; for (unsigned i = 0; i != info->dlpi_phnum; ++i)
if (info->dlpi_phdr[i].p_type == PT_TLS) {
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
break;
}
return 0;
} }
uptr ThreadSelf() { __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
return (uptr)ThreadSelfSegbase()[2]; uptr *align) {
InternalMmapVector<TlsBlock> ranges;
dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
uptr len = ranges.size();
Sort(ranges.begin(), len);
// Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
// this module is guaranteed to exist and is one of the initially loaded
// modules.
uptr one = 0;
while (one != len && ranges[one].tls_modid != 1) ++one;
if (one == len) {
// This may happen with musl if no module uses PT_TLS.
*addr = 0;
*size = 0;
*align = 1;
return;
}
// Find the maximum consecutive ranges. We consider two modules consecutive if
// the gap is smaller than the alignment. The dynamic loader places static TLS
// blocks this way not to waste space.
uptr l = one;
*align = ranges[l].align;
while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
*align = Max(*align, ranges[--l].align);
uptr r = one + 1;
while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
*align = Max(*align, ranges[r++].align);
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
} }
#endif // SANITIZER_FREEBSD #endif // !SANITIZER_GO
#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
// SANITIZER_LINUX) && !SANITIZER_ANDROID
#if SANITIZER_NETBSD #if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() { static struct tls_tcb * ThreadSelfTlsTcb() {
@ -465,33 +426,67 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0; *addr = 0;
*size = 0; *size = 0;
} }
#elif SANITIZER_LINUX #elif SANITIZER_GLIBC && defined(__x86_64__)
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) // For x86-64, use an O(1) approach which requires precise
*addr = ThreadSelf(); // ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
*size = GetTlsSize(); asm("mov %%fs:16,%0" : "=r"(*addr));
*size = g_tls_size;
*addr -= *size; *addr -= *size;
*addr += ThreadDescriptorSize(); *addr += ThreadDescriptorSize();
#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \ #elif SANITIZER_GLIBC && defined(__powerpc64__)
defined(__arm__) || SANITIZER_RISCV64 // Workaround for glibc<2.25(?). 2.27 is known to not need this.
*addr = ThreadSelf(); uptr tp;
*size = GetTlsSize(); asm("addi %0,13,-0x7000" : "=r"(tp));
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
#elif SANITIZER_FREEBSD || SANITIZER_LINUX
uptr align;
GetStaticTlsBoundary(addr, size, &align);
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
defined(__sparc__)
if (SANITIZER_GLIBC) {
#if defined(__x86_64__) || defined(__i386__)
align = Max<uptr>(align, 64);
#else #else
*addr = 0; align = Max<uptr>(align, 16);
*size = 0;
#endif #endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
*size = 0;
if (segbase != 0) {
// tcbalign = 16
// tls_size = round(tls_static_space, tcbalign);
// dtv = segbase[1];
// dtv[2] = segbase - tls_static_space;
void **dtv = (void**) segbase[1];
*addr = (uptr) dtv[2];
*size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
} }
const uptr tp = RoundUpTo(*addr + *size, align);
// lsan requires the range to additionally cover the static TLS surplus
// (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
// allocations only referenced by tls in dynamically loaded modules.
if (SANITIZER_GLIBC)
*size += 1644;
else if (SANITIZER_FREEBSD)
*size += 128; // RTLD_STATIC_TLS_EXTRA
// Extend the range to include the thread control block. On glibc, lsan needs
// the range to include pthread::{specific_1stblock,specific} so that
// allocations only referenced by pthread_setspecific can be scanned. This may
// underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
// because the number of bytes after pthread::specific is larger.
*addr = tp - RoundUpTo(*size, align);
*size = tp - *addr + ThreadDescriptorSize();
#else
if (SANITIZER_GLIBC)
*size += 1664;
else if (SANITIZER_FREEBSD)
*size += 128; // RTLD_STATIC_TLS_EXTRA
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
const uptr pre_tcb_size = TlsPreTcbSize();
*addr -= pre_tcb_size;
*size += pre_tcb_size;
#else
// arm and aarch64 reserve two words at TP, so this underestimates the range.
// However, this is sufficient for the purpose of finding the pointers to
// thread-specific data keys.
const uptr tcb_size = ThreadDescriptorSize();
*addr -= tcb_size;
*size += tcb_size;
#endif
#endif
#elif SANITIZER_NETBSD #elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb(); struct tls_tcb * const tcb = ThreadSelfTlsTcb();
*addr = 0; *addr = 0;
@ -518,15 +513,13 @@ static void GetTls(uptr *addr, uptr *size) {
#if !SANITIZER_GO #if !SANITIZER_GO
uptr GetTlsSize() { uptr GetTlsSize() {
#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \ #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS SANITIZER_SOLARIS
uptr addr, size; uptr addr, size;
GetTls(&addr, &size); GetTls(&addr, &size);
return size; return size;
#elif defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else #else
return g_tls_size; return 0;
#endif #endif
} }
#endif #endif
@ -547,10 +540,9 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
if (!main) { if (!main) {
// If stack and tls intersect, make them non-intersecting. // If stack and tls intersect, make them non-intersecting.
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
CHECK_GT(*tls_addr + *tls_size, *stk_addr); if (*stk_addr + *stk_size < *tls_addr + *tls_size)
CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size); *tls_size = *stk_addr + *stk_size - *tls_addr;
*stk_size -= *tls_size; *stk_size = *tls_addr - *stk_addr;
*tls_addr = *stk_addr + *stk_size;
} }
} }
#endif #endif
@ -569,20 +561,12 @@ struct DlIteratePhdrData {
bool first; bool first;
}; };
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
DlIteratePhdrData *data = (DlIteratePhdrData*)arg; InternalMmapVectorNoCtor<LoadedModule> *modules) {
InternalScopedString module_name(kMaxPathLength);
if (data->first) {
data->first = false;
// First module is the binary itself.
ReadBinaryNameCached(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
module_name.append("%s", info->dlpi_name);
}
if (module_name[0] == '\0') if (module_name[0] == '\0')
return 0; return 0;
LoadedModule cur_module; LoadedModule cur_module;
cur_module.set(module_name.data(), info->dlpi_addr); cur_module.set(module_name, info->dlpi_addr);
for (int i = 0; i < (int)info->dlpi_phnum; i++) { for (int i = 0; i < (int)info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i]; const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) { if (phdr->p_type == PT_LOAD) {
@ -594,7 +578,26 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
writable); writable);
} }
} }
data->modules->push_back(cur_module); modules->push_back(cur_module);
return 0;
}
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData *)arg;
if (data->first) {
InternalMmapVector<char> module_name(kMaxPathLength);
data->first = false;
// First module is the binary itself.
ReadBinaryNameCached(module_name.data(), module_name.size());
return AddModuleSegments(module_name.data(), info, data->modules);
}
if (info->dlpi_name) {
InternalScopedString module_name;
module_name.append("%s", info->dlpi_name);
return AddModuleSegments(module_name.data(), info, data->modules);
}
return 0; return 0;
} }
@ -729,13 +732,9 @@ u32 GetNumberOfCPUs() {
#elif SANITIZER_SOLARIS #elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN); return sysconf(_SC_NPROCESSORS_ONLN);
#else #else
#if defined(CPU_COUNT)
cpu_set_t CPUs; cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs); return CPU_COUNT(&CPUs);
#else
return 1;
#endif
#endif #endif
} }
@ -802,20 +801,13 @@ void LogMessageOnPrintf(const char *str) {
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_GO #if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the // glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is // vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null // initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime. // and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname; extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
inline bool CanUseVDSO() { inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;
if (&__progname && __progname && *__progname)
return true;
return false;
}
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling // MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is // clock_gettime. real_clock_gettime only exists if clock_gettime is
@ -835,13 +827,13 @@ u64 MonotonicNanoTime() {
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
} }
#else #else
// Non-Linux & Go always use the syscall. // Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() { u64 MonotonicNanoTime() {
timespec ts; timespec ts;
internal_clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
} }
#endif // SANITIZER_LINUX && !SANITIZER_GO #endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() { void ReExec() {
const char *pathname = "/proc/self/exe"; const char *pathname = "/proc/self/exe";
@ -910,6 +902,65 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
return shadow_start; return shadow_start;
} }
static uptr MmapSharedNoReserve(uptr addr, uptr size) {
return internal_mmap(
reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
}
static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
uptr alias_size) {
#if SANITIZER_LINUX
return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
reinterpret_cast<void *>(alias_addr));
#else
CHECK(false && "mremap is not supported outside of Linux");
return 0;
#endif
}
static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
uptr total_size = alias_size * num_aliases;
uptr mapped = MmapSharedNoReserve(start_addr, total_size);
CHECK_EQ(mapped, start_addr);
for (uptr i = 1; i < num_aliases; ++i) {
uptr alias_addr = start_addr + i * alias_size;
CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr);
}
}
uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
uptr num_aliases, uptr ring_buffer_size) {
CHECK_EQ(alias_size & (alias_size - 1), 0);
CHECK_EQ(num_aliases & (num_aliases - 1), 0);
CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0);
const uptr granularity = GetMmapGranularity();
shadow_size = RoundUpTo(shadow_size, granularity);
CHECK_EQ(shadow_size & (shadow_size - 1), 0);
const uptr alias_region_size = alias_size * num_aliases;
const uptr alignment =
2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size);
const uptr left_padding = ring_buffer_size;
const uptr right_size = alignment;
const uptr map_size = left_padding + 2 * alignment;
const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size));
CHECK_NE(map_start, static_cast<uptr>(-1));
const uptr right_start = RoundUpTo(map_start + left_padding, alignment);
UnmapFromTo(map_start, right_start - left_padding);
UnmapFromTo(right_start + right_size, map_start + map_size);
CreateAliases(right_start + right_size / 2, alias_size, num_aliases);
return right_start;
}
void InitializePlatformCommonFlags(CommonFlags *cf) { void InitializePlatformCommonFlags(CommonFlags *cf) {
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr) if (&__libc_get_static_tls_bounds == nullptr)

View File

@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// `LocalAddressSpaceView` provides the local (i.e. target and current address // `LocalAddressSpaceView` provides the local (i.e. target and current address
// space are the same) implementation of the `AddressSpaveView` interface which // space are the same) implementation of the `AddressSpaceView` interface which
// provides a simple interface to load memory from another process (i.e. // provides a simple interface to load memory from another process (i.e.
// out-of-process) // out-of-process)
// //

View File

@ -37,13 +37,21 @@
extern char **environ; extern char **environ;
#endif #endif
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__) #if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1 #define SANITIZER_OS_TRACE 1
#include <os/trace.h> #include <os/trace.h>
#else #else
#define SANITIZER_OS_TRACE 0 #define SANITIZER_OS_TRACE 0
#endif #endif
// import new crash reporting api
#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
#define HAVE_CRASHREPORTERCLIENT_H 1
#include <CrashReporterClient.h>
#else
#define HAVE_CRASHREPORTERCLIENT_H 0
#endif
#if !SANITIZER_IOS #if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron #include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else #else
@ -62,6 +70,7 @@ extern "C" {
#include <mach/mach_time.h> #include <mach/mach_time.h>
#include <mach/vm_statistics.h> #include <mach/vm_statistics.h>
#include <malloc/malloc.h> #include <malloc/malloc.h>
#include <os/log.h>
#include <pthread.h> #include <pthread.h>
#include <sched.h> #include <sched.h>
#include <signal.h> #include <signal.h>
@ -133,6 +142,12 @@ uptr internal_munmap(void *addr, uptr length) {
return munmap(addr, length); return munmap(addr, length);
} }
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address) {
CHECK(false && "internal_mremap is unimplemented on Mac");
return 0;
}
int internal_mprotect(void *addr, uptr length, int prot) { int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot); return mprotect(addr, length, prot);
} }
@ -444,7 +459,7 @@ uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
// On OS X the executable path is saved to the stack by dyld. Reading it // On OS X the executable path is saved to the stack by dyld. Reading it
// from there is much faster than calling dladdr, especially for large // from there is much faster than calling dladdr, especially for large
// binaries with symbols. // binaries with symbols.
InternalScopedString exe_path(kMaxPathLength); InternalMmapVector<char> exe_path(kMaxPathLength);
uint32_t size = exe_path.size(); uint32_t size = exe_path.size();
if (_NSGetExecutablePath(exe_path.data(), &size) == 0 && if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
realpath(exe_path.data(), buf) != 0) { realpath(exe_path.data(), buf) != 0) {
@ -620,6 +635,23 @@ constexpr u16 GetOSMajorKernelOffset() {
using VersStr = char[64]; using VersStr = char[64];
static uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {
u16 kernel_major = GetDarwinKernelVersion().major;
u16 offset = GetOSMajorKernelOffset();
CHECK_GE(kernel_major, offset);
u16 os_major = kernel_major - offset;
const char *format = "%d.0";
if (TARGET_OS_OSX) {
if (os_major >= 16) { // macOS 11+
os_major -= 5;
} else { // macOS 10.15 and below
format = "10.%d";
}
}
return internal_snprintf(vers, sizeof(VersStr), format, os_major);
}
static void GetOSVersion(VersStr vers) { static void GetOSVersion(VersStr vers) {
uptr len = sizeof(VersStr); uptr len = sizeof(VersStr);
if (SANITIZER_IOSSIM) { if (SANITIZER_IOSSIM) {
@ -633,17 +665,19 @@ static void GetOSVersion(VersStr vers) {
} else { } else {
int res = int res =
internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0); internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
if (res) {
// Fallback for XNU 17 (macOS 10.13) and below that do not provide the
// `kern.osproductversion` property.
u16 kernel_major = GetDarwinKernelVersion().major;
u16 offset = GetOSMajorKernelOffset();
CHECK_LE(kernel_major, 17);
CHECK_GE(kernel_major, offset);
u16 os_major = kernel_major - offset;
auto format = TARGET_OS_OSX ? "10.%d" : "%d.0"; // XNU 17 (macOS 10.13) and below do not provide the sysctl
len = internal_snprintf(vers, len, format, os_major); // `kern.osproductversion` entry (res != 0).
bool no_os_version = res != 0;
// For launchd, sanitizer initialization runs before sysctl is setup
// (res == 0 && len != strlen(vers), vers is not a valid version). However,
// the kernel version `kern.osrelease` is available.
bool launchd = (res == 0 && internal_strlen(vers) < 3);
if (launchd) CHECK_EQ(internal_getpid(), 1);
if (no_os_version || launchd) {
len = ApproximateOSVersionViaKernelVersion(vers);
} }
} }
CHECK_LT(len, sizeof(VersStr)); CHECK_LT(len, sizeof(VersStr));
@ -681,7 +715,7 @@ static void MapToMacos(u16 *major, u16 *minor) {
} }
static MacosVersion GetMacosAlignedVersionInternal() { static MacosVersion GetMacosAlignedVersionInternal() {
VersStr vers; VersStr vers = {};
GetOSVersion(vers); GetOSVersion(vers);
u16 major, minor; u16 major, minor;
@ -707,7 +741,7 @@ MacosVersion GetMacosAlignedVersion() {
} }
DarwinKernelVersion GetDarwinKernelVersion() { DarwinKernelVersion GetDarwinKernelVersion() {
VersStr vers; VersStr vers = {};
uptr len = sizeof(VersStr); uptr len = sizeof(VersStr);
int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0); int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
CHECK_EQ(res, 0); CHECK_EQ(res, 0);
@ -751,7 +785,51 @@ static BlockingMutex syslog_lock(LINKER_INITIALIZED);
void WriteOneLineToSyslog(const char *s) { void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO #if !SANITIZER_GO
syslog_lock.CheckLocked(); syslog_lock.CheckLocked();
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s); if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
os_log_error(OS_LOG_DEFAULT, "%{public}s", s);
} else {
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
}
#endif
}
// buffer to store crash report application information
static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
extern "C" {
// Integrate with crash reporter libraries.
#if HAVE_CRASHREPORTERCLIENT_H
CRASH_REPORTER_CLIENT_HIDDEN
struct crashreporter_annotations_t gCRAnnotations
__attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
CRASHREPORTER_ANNOTATIONS_VERSION,
0,
0,
0,
0,
0,
0,
#if CRASHREPORTER_ANNOTATIONS_VERSION > 4
0,
#endif
};
#else
// fall back to old crashreporter api
static const char *__crashreporter_info__ __attribute__((__used__)) =
&crashreporter_info_buff[0];
asm(".desc ___crashreporter_info__, 0x10");
#endif
} // extern "C"
static void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(crashreporter_info_buff, msg,
sizeof(crashreporter_info_buff));
#if HAVE_CRASHREPORTERCLIENT_H
(void)CRSetCrashLogMessage(crashreporter_info_buff);
#endif #endif
} }
@ -947,7 +1025,7 @@ void MaybeReexec() {
if (DyldNeedsEnvVariable() && !lib_is_in_env) { if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library. // library.
InternalScopedString program_name(1024); InternalMmapVector<char> program_name(1024);
uint32_t buf_size = program_name.size(); uint32_t buf_size = program_name.size();
_NSGetExecutablePath(program_name.data(), &buf_size); _NSGetExecutablePath(program_name.data(), &buf_size);
char *new_env = const_cast<char*>(info.dli_fname); char *new_env = const_cast<char*>(info.dli_fname);
@ -1066,7 +1144,7 @@ char **GetArgv() {
return *_NSGetArgv(); return *_NSGetArgv();
} }
#if SANITIZER_IOS #if SANITIZER_IOS && !SANITIZER_IOSSIM
// The task_vm_info struct is normally provided by the macOS SDK, but we need // The task_vm_info struct is normally provided by the macOS SDK, but we need
// fields only available in 10.12+. Declare the struct manually to be able to // fields only available in 10.12+. Declare the struct manually to be able to
// build against older SDKs. // build against older SDKs.
@ -1106,26 +1184,35 @@ static uptr GetTaskInfoMaxAddress() {
uptr GetMaxUserVirtualAddress() { uptr GetMaxUserVirtualAddress() {
static uptr max_vm = GetTaskInfoMaxAddress(); static uptr max_vm = GetTaskInfoMaxAddress();
if (max_vm != 0) if (max_vm != 0) {
return max_vm - 1; const uptr ret_value = max_vm - 1;
CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);
return ret_value;
}
// xnu cannot provide vm address limit // xnu cannot provide vm address limit
# if SANITIZER_WORDSIZE == 32 # if SANITIZER_WORDSIZE == 32
return 0xffe00000 - 1; constexpr uptr fallback_max_vm = 0xffe00000 - 1;
# else # else
return 0x200000000 - 1; constexpr uptr fallback_max_vm = 0x200000000 - 1;
# endif # endif
static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,
"Max virtual address must be less than mmap range size.");
return fallback_max_vm;
} }
#else // !SANITIZER_IOS #else // !SANITIZER_IOS
uptr GetMaxUserVirtualAddress() { uptr GetMaxUserVirtualAddress() {
# if SANITIZER_WORDSIZE == 64 # if SANITIZER_WORDSIZE == 64
return (1ULL << 47) - 1; // 0x00007fffffffffffUL; constexpr uptr max_vm = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# else // SANITIZER_WORDSIZE == 32 # else // SANITIZER_WORDSIZE == 32
static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize"); static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
return (1ULL << 32) - 1; // 0xffffffff; constexpr uptr max_vm = (1ULL << 32) - 1; // 0xffffffff;
# endif # endif
static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,
"Max virtual address must be less than mmap range size.");
return max_vm;
} }
#endif #endif
@ -1180,6 +1267,12 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
return shadow_start; return shadow_start;
} }
uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
uptr num_aliases, uptr ring_buffer_size) {
CHECK(false && "HWASan aliasing is unimplemented on Mac");
return 0;
}
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found, uptr *largest_gap_found,
uptr *max_occupied_addr) { uptr *max_occupied_addr) {

View File

@ -14,26 +14,6 @@
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
so there's no ambiguity). */
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
# define TARGET_OS_OSX 1
#endif
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
0 (we have no support for them; they are not valid targets anyway). */
#ifndef TARGET_OS_IOS
#define TARGET_OS_IOS 0
#endif
#ifndef TARGET_OS_TV
#define TARGET_OS_TV 0
#endif
#ifndef TARGET_OS_WATCH
#define TARGET_OS_WATCH 0
#endif
#if SANITIZER_MAC #if SANITIZER_MAC
#include "sanitizer_posix.h" #include "sanitizer_posix.h"
@ -84,22 +64,5 @@ void RestrictMemoryToMaxAddress(uptr max_address);
} // namespace __sanitizer } // namespace __sanitizer
extern "C" {
static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
{};
static const char *__crashreporter_info__ __attribute__((__used__)) =
&__crashreporter_info_buff__[0];
asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
inline void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }
} // namespace __sanitizer
#endif // SANITIZER_MAC #endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H #endif // SANITIZER_MAC_H

View File

@ -120,11 +120,7 @@ INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER(); COMMON_MALLOC_ENTER();
// Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)| InternalScopedString new_name;
// bytes.
size_t buflen =
sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
InternalScopedString new_name(buflen);
if (name && zone->introspect == sanitizer_zone.introspect) { if (name && zone->introspect == sanitizer_zone.introspect) {
new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name); new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data(); name = new_name.data();

View File

@ -105,6 +105,12 @@ uptr internal_munmap(void *addr, uptr length) {
return _REAL(munmap, addr, length); return _REAL(munmap, addr, length);
} }
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address) {
CHECK(false && "internal_mremap is unimplemented on NetBSD");
return 0;
}
int internal_mprotect(void *addr, uptr length, int prot) { int internal_mprotect(void *addr, uptr length, int prot) {
DEFINE__REAL(int, mprotect, void *a, uptr b, int c); DEFINE__REAL(int, mprotect, void *a, uptr b, int c);
return _REAL(mprotect, addr, length, prot); return _REAL(mprotect, addr, length, prot);

View File

@ -19,12 +19,25 @@
# error "This operating system is not supported" # error "This operating system is not supported"
#endif #endif
// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C
// function declarations into a .S file which doesn't compile.
// https://crbug.com/1162741
#if __has_include(<features.h>) && !defined(__ANDROID__)
#include <features.h>
#endif
#if defined(__linux__) #if defined(__linux__)
# define SANITIZER_LINUX 1 # define SANITIZER_LINUX 1
#else #else
# define SANITIZER_LINUX 0 # define SANITIZER_LINUX 0
#endif #endif
#if defined(__GLIBC__)
# define SANITIZER_GLIBC 1
#else
# define SANITIZER_GLIBC 0
#endif
#if defined(__FreeBSD__) #if defined(__FreeBSD__)
# define SANITIZER_FREEBSD 1 # define SANITIZER_FREEBSD 1
#else #else
@ -46,6 +59,11 @@
#if defined(__APPLE__) #if defined(__APPLE__)
# define SANITIZER_MAC 1 # define SANITIZER_MAC 1
# include <TargetConditionals.h> # include <TargetConditionals.h>
# if TARGET_OS_OSX
# define SANITIZER_OSX 1
# else
# define SANITIZER_OSX 0
# endif
# if TARGET_OS_IPHONE # if TARGET_OS_IPHONE
# define SANITIZER_IOS 1 # define SANITIZER_IOS 1
# else # else
@ -60,6 +78,7 @@
# define SANITIZER_MAC 0 # define SANITIZER_MAC 0
# define SANITIZER_IOS 0 # define SANITIZER_IOS 0
# define SANITIZER_IOSSIM 0 # define SANITIZER_IOSSIM 0
# define SANITIZER_OSX 0
#endif #endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH #if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
@ -247,8 +266,12 @@
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38) #define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__) #elif defined(__aarch64__)
# if SANITIZER_MAC # if SANITIZER_MAC
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM # if SANITIZER_OSX || SANITIZER_IOSSIM
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
# else
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
# endif
# else # else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif # endif

View File

@ -46,6 +46,12 @@
#define SI_LINUX_NOT_ANDROID 0 #define SI_LINUX_NOT_ANDROID 0
#endif #endif
#if SANITIZER_GLIBC
#define SI_GLIBC 1
#else
#define SI_GLIBC 0
#endif
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
#define SI_ANDROID 1 #define SI_ANDROID 1
#else #else
@ -159,7 +165,7 @@
SANITIZER_INTERCEPT_MEMCMP && \ SANITIZER_INTERCEPT_MEMCMP && \
((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_FREEBSD) ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX #define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD #define SANITIZER_INTERCEPT___STRNDUP SI_GLIBC
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ #if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
#define SI_MAC_DEPLOYMENT_BELOW_10_7 1 #define SI_MAC_DEPLOYMENT_BELOW_10_7 1
@ -183,8 +189,8 @@
#define SANITIZER_INTERCEPT_FPUTS SI_POSIX #define SANITIZER_INTERCEPT_FPUTS SI_POSIX
#define SANITIZER_INTERCEPT_PUTS SI_POSIX #define SANITIZER_INTERCEPT_PUTS SI_POSIX
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_READV SI_POSIX #define SANITIZER_INTERCEPT_READV SI_POSIX
#define SANITIZER_INTERCEPT_WRITEV SI_POSIX #define SANITIZER_INTERCEPT_WRITEV SI_POSIX
@ -192,8 +198,8 @@
#define SANITIZER_INTERCEPT_PREADV \ #define SANITIZER_INTERCEPT_PREADV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC
#define SANITIZER_INTERCEPT_PRCTL SI_LINUX #define SANITIZER_INTERCEPT_PRCTL SI_LINUX
@ -201,16 +207,16 @@
#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX #define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
#define SANITIZER_INTERCEPT_SCANF SI_POSIX #define SANITIZER_INTERCEPT_SCANF SI_POSIX
#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC
#ifndef SANITIZER_INTERCEPT_PRINTF #ifndef SANITIZER_INTERCEPT_PRINTF
#define SANITIZER_INTERCEPT_PRINTF SI_POSIX #define SANITIZER_INTERCEPT_PRINTF SI_POSIX
#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD) #define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC
#endif #endif
#define SANITIZER_INTERCEPT___PRINTF_CHK \ #define SANITIZER_INTERCEPT___PRINTF_CHK \
(SANITIZER_INTERCEPT_PRINTF && SI_LINUX_NOT_ANDROID) (SANITIZER_INTERCEPT_PRINTF && SI_GLIBC)
#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA #define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX #define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
@ -220,13 +226,11 @@
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETPWENT \ #define SANITIZER_INTERCEPT_GETPWENT \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETGRENT_R \ #define SANITIZER_INTERCEPT_FGETGRENT_R (SI_GLIBC || SI_SOLARIS)
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_GETPWENT_R \ #define SANITIZER_INTERCEPT_GETPWENT_R \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETPWENT_R \ #define SANITIZER_INTERCEPT_FGETPWENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SETPWENT \ #define SANITIZER_INTERCEPT_SETPWENT \
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \ #define SANITIZER_INTERCEPT_CLOCK_GETTIME \
@ -234,8 +238,8 @@
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX #define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX #define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
#define SANITIZER_INTERCEPT_WAIT SI_POSIX #define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX #define SANITIZER_INTERCEPT_INET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
@ -250,8 +254,7 @@
(SI_FREEBSD || SI_LINUX_NOT_ANDROID) (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \ #define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETHOSTENT_R \ #define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX #define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX #define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD) #define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
@ -296,8 +299,7 @@
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_REALPATH SI_POSIX #define SANITIZER_INTERCEPT_REALPATH SI_POSIX
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \ #define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CONFSTR \ #define SANITIZER_INTERCEPT_CONFSTR \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
@ -324,7 +326,7 @@
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX #define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
#define SANITIZER_INTERCEPT_BACKTRACE \ #define SANITIZER_INTERCEPT_BACKTRACE \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX #define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \ #define SANITIZER_INTERCEPT_STATFS \
@ -342,11 +344,11 @@
#define SANITIZER_INTERCEPT_SHMCTL \ #define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \ (((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
SI_NETBSD || SI_SOLARIS) // NOLINT SI_NETBSD || SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \ #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \ #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD) (SI_POSIX && !SI_NETBSD)
@ -360,7 +362,7 @@
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \ #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD) (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED (SI_POSIX && !SI_NETBSD) #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \ #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \
(SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
@ -368,7 +370,7 @@
(SI_LINUX_NOT_ANDROID && !SI_NETBSD) (SI_LINUX_NOT_ANDROID && !SI_NETBSD)
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD #define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX #define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX #define SANITIZER_INTERCEPT_PTSNAME SI_LINUX
#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX #define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX #define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
@ -381,7 +383,7 @@
#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD) #define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS) #define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC
#define SANITIZER_INTERCEPT_RAND_R \ #define SANITIZER_INTERCEPT_RAND_R \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_ICONV \ #define SANITIZER_INTERCEPT_ICONV \
@ -396,12 +398,12 @@
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS) (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD #define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \ #define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \ #define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \ #define SANITIZER_INTERCEPT_TLS_GET_ADDR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
@ -419,19 +421,19 @@
#else #else
#define SANITIZER_INTERCEPT_AEABI_MEM 0 #define SANITIZER_INTERCEPT_AEABI_MEM 0
#endif #endif
#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_GLIBC
#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX) #define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX)
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_XDR (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_XDRREC SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_XDRREC SI_GLIBC
#define SANITIZER_INTERCEPT_TSEARCH \ #define SANITIZER_INTERCEPT_TSEARCH \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS) (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_GLIBC
#define SANITIZER_INTERCEPT_FOPEN SI_POSIX #define SANITIZER_INTERCEPT_FOPEN SI_POSIX
#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_FOPEN64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \ #define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \
(SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS) (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_OBSTACK SI_GLIBC
#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX #define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX #define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
@ -456,7 +458,7 @@
#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPTOR_HOOKS \ #define SANITIZER_INTERCEPTOR_HOOKS \
(SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD) (SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX #define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX #define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX #define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
@ -479,20 +481,12 @@
#define SANITIZER_INTERCEPT_MMAP SI_POSIX #define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \ #define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
!SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_MEMALIGN \ #define SANITIZER_INTERCEPT_MEMALIGN \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS) (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN \ #define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS && \ #define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
!SI_ANDROID) // NOLINT #define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PVALLOC \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
!SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_CFREE \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA && SI_NOT_RTEMS && \
!SI_SOLARIS && !SANITIZER_ANDROID) // NOLINT
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX #define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS) #define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD) #define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
@ -532,7 +526,7 @@
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD #define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX) #define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD #define SANITIZER_INTERCEPT_NETENT SI_NETBSD
#define SANITIZER_INTERCEPT_SETVBUF \ #define SANITIZER_INTERCEPT_SETVBUF \
(SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC) (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
@ -583,7 +577,7 @@
#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD #define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
#define SANITIZER_INTERCEPT_QSORT \ #define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID) (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID) #define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
// sigaltstack on i386 macOS cannot be intercepted due to setjmp() // sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers. // calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \ #define SANITIZER_INTERCEPT_SIGALTSTACK \
@ -591,4 +585,25 @@
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD) #define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD #define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
// to do two things:
// 1. Build compiler-rt with -DSANITIZER_OVERRIDE_INTERCEPTORS.
// 2. Provide a header file named sanitizer_intercept_overriders.h in the
// include path for their compiler-rt build.
// An example of an overrider for strlen interceptor that one can list in
// sanitizer_intercept_overriders.h is as follows:
//
// #ifdef SANITIZER_INTERCEPT_STRLEN
// #undef SANITIZER_INTERCEPT_STRLEN
// #define SANITIZER_INTERCEPT_STRLEN <value of choice>
// #endif
//
// This "feature" is useful for downstream users who do not want some of
// their libc funtions to be intercepted. They can selectively disable
// interception of those functions.
#ifdef SANITIZER_OVERRIDE_INTERCEPTORS
#include <sanitizer_intercept_overriders.h>
#endif
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H

View File

@ -35,7 +35,10 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/statvfs.h> #include <sys/statvfs.h>
#include <sys/time.h> #include <sys/time.h>
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-W#warnings"
#include <sys/timeb.h> #include <sys/timeb.h>
#pragma clang diagnostic pop
#include <sys/times.h> #include <sys/times.h>
#include <sys/timespec.h> #include <sys/timespec.h>
#include <sys/types.h> #include <sys/types.h>

View File

@ -26,12 +26,9 @@
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that // With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work // are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat // fine with newer headers, too.
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
// Also, for some platforms (e.g. mips) there are additional members in the
// <sys/stat.h> struct stat:s.
#include <linux/posix_types.h> #include <linux/posix_types.h>
#if defined(__x86_64__) #if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h> #include <sys/stat.h>
#else #else
#define ino_t __kernel_ino_t #define ino_t __kernel_ino_t

View File

@ -11,18 +11,19 @@
// Sizes and layouts of platform-specific POSIX data structures. // Sizes and layouts of platform-specific POSIX data structures.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_platform.h" #if defined(__linux__) || defined(__APPLE__)
#if SANITIZER_LINUX || SANITIZER_MAC
// Tests in this file assume that off_t-dependent data structures match the // Tests in this file assume that off_t-dependent data structures match the
// libc ABI. For example, struct dirent here is what readdir() function (as // libc ABI. For example, struct dirent here is what readdir() function (as
// exported from libc) returns, and not the user-facing "dirent", which // exported from libc) returns, and not the user-facing "dirent", which
// depends on _FILE_OFFSET_BITS setting. // depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below. // To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS #undef _FILE_OFFSET_BITS
#endif #endif
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
// Must go after undef _FILE_OFFSET_BITS. // Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h" #include "sanitizer_glibc_version.h"
@ -37,6 +38,7 @@
#include <pwd.h> #include <pwd.h>
#include <signal.h> #include <signal.h>
#include <stddef.h> #include <stddef.h>
#include <stdio.h>
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/socket.h> #include <sys/socket.h>
@ -58,7 +60,6 @@
#endif #endif
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
#include <fstab.h>
#include <sys/mount.h> #include <sys/mount.h>
#include <sys/timeb.h> #include <sys/timeb.h>
#include <utmpx.h> #include <utmpx.h>
@ -110,20 +111,31 @@ typedef struct user_fpregs elf_fpregset_t;
#include <wordexp.h> #include <wordexp.h>
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_LINUX
#include <glob.h> #if SANITIZER_GLIBC
#include <obstack.h> #include <fstab.h>
#include <mqueue.h>
#include <net/if_ppp.h> #include <net/if_ppp.h>
#include <netax25/ax25.h> #include <netax25/ax25.h>
#include <netipx/ipx.h> #include <netipx/ipx.h>
#include <netrom/netrom.h> #include <netrom/netrom.h>
#include <obstack.h>
#if HAVE_RPC_XDR_H #if HAVE_RPC_XDR_H
# include <rpc/xdr.h> # include <rpc/xdr.h>
#endif #endif
#include <scsi/scsi.h> #include <scsi/scsi.h>
#include <sys/mtio.h> #else
#include <linux/if_ppp.h>
#include <linux/kd.h>
#include <linux/ppp_defs.h>
#endif // SANITIZER_GLIBC
#if SANITIZER_ANDROID
#include <linux/mtio.h>
#else
#include <glob.h>
#include <mqueue.h>
#include <sys/kd.h> #include <sys/kd.h>
#include <sys/mtio.h>
#include <sys/shm.h> #include <sys/shm.h>
#include <sys/statvfs.h> #include <sys/statvfs.h>
#include <sys/timex.h> #include <sys/timex.h>
@ -142,20 +154,14 @@ typedef struct user_fpregs elf_fpregset_t;
#include <sys/msg.h> #include <sys/msg.h>
#include <sys/ipc.h> #include <sys/ipc.h>
#include <crypt.h> #include <crypt.h>
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #endif // SANITIZER_ANDROID
#if SANITIZER_ANDROID
#include <linux/kd.h>
#include <linux/mtio.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#endif
#if SANITIZER_LINUX
#include <link.h> #include <link.h>
#include <sys/vfs.h> #include <sys/vfs.h>
#include <sys/epoll.h> #include <sys/epoll.h>
#include <linux/capability.h> #include <linux/capability.h>
#else
#include <fstab.h>
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX
#if SANITIZER_MAC #if SANITIZER_MAC
@ -202,8 +208,11 @@ namespace __sanitizer {
unsigned struct_statfs64_sz = sizeof(struct statfs64); unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS #endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
#if !SANITIZER_ANDROID #if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
unsigned struct_fstab_sz = sizeof(struct fstab); unsigned struct_fstab_sz = sizeof(struct fstab);
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_MAC
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs); unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr); unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t); unsigned ucontext_t_sz = sizeof(ucontext_t);
@ -299,7 +308,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
int glob_nomatch = GLOB_NOMATCH; int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC; int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif #endif
@ -422,7 +431,9 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_input_id_sz = sizeof(struct input_id); unsigned struct_input_id_sz = sizeof(struct input_id);
unsigned struct_mtpos_sz = sizeof(struct mtpos); unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry); unsigned struct_rtentry_sz = sizeof(struct rtentry);
#if SANITIZER_GLIBC || SANITIZER_ANDROID
unsigned struct_termio_sz = sizeof(struct termio); unsigned struct_termio_sz = sizeof(struct termio);
#endif
unsigned struct_vt_consize_sz = sizeof(struct vt_consize); unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes); unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
unsigned struct_vt_stat_sz = sizeof(struct vt_stat); unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
@ -447,7 +458,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode); unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
#endif // SANITIZER_LINUX #endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct); unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor); unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000) #if EV_VERSION > (0x010000)
@ -470,12 +481,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25); unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc); unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit); unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #endif // SANITIZER_GLIBC
#if !SANITIZER_ANDROID && !SANITIZER_MAC #if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
@ -881,6 +890,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP; unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR; unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP; unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
#if SANITIZER_GLIBC
unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN; unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;
unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST; unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;
unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE; unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;
@ -899,6 +909,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS; unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL; unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS; unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
#endif
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL; unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI; unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI; unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
@ -969,7 +980,7 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD #endif // SANITIZER_LINUX || SANITIZER_FREEBSD
#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(glob_t); CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc); CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv); CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
@ -980,7 +991,7 @@ CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir); CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat); CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
CHECK_SIZE_AND_OFFSET(glob_t, gl_stat); CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
#endif #endif // SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(addrinfo); CHECK_TYPE_SIZE(addrinfo);
CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags); CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
@ -1003,17 +1014,27 @@ CHECK_TYPE_SIZE(iovec);
CHECK_SIZE_AND_OFFSET(iovec, iov_base); CHECK_SIZE_AND_OFFSET(iovec, iov_base);
CHECK_SIZE_AND_OFFSET(iovec, iov_len); CHECK_SIZE_AND_OFFSET(iovec, iov_len);
// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
// many implementations don't conform to the standard. Since we pick the
// non-conforming glibc definition, exclude the checks for musl (incompatible
// sizes but compatible offsets).
CHECK_TYPE_SIZE(msghdr); CHECK_TYPE_SIZE(msghdr);
CHECK_SIZE_AND_OFFSET(msghdr, msg_name); CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen); CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
CHECK_SIZE_AND_OFFSET(msghdr, msg_iov); CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen); CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_control); CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen); CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_flags); CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
CHECK_TYPE_SIZE(cmsghdr); CHECK_TYPE_SIZE(cmsghdr);
#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
#endif
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
@ -1121,7 +1142,7 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
CHECK_TYPE_SIZE(ether_addr); CHECK_TYPE_SIZE(ether_addr);
#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(ipc_perm); CHECK_TYPE_SIZE(ipc_perm);
# if SANITIZER_FREEBSD # if SANITIZER_FREEBSD
CHECK_SIZE_AND_OFFSET(ipc_perm, key); CHECK_SIZE_AND_OFFSET(ipc_perm, key);
@ -1183,7 +1204,7 @@ CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data); CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
#endif #endif
#if SANITIZER_LINUX #if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo)); COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));
#endif #endif
@ -1233,7 +1254,7 @@ COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE); COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
#endif #endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID #if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE)); COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
CHECK_SIZE_AND_OFFSET(FILE, _flags); CHECK_SIZE_AND_OFFSET(FILE, _flags);
CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr); CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
@ -1250,9 +1271,7 @@ CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
CHECK_SIZE_AND_OFFSET(FILE, _markers); CHECK_SIZE_AND_OFFSET(FILE, _markers);
CHECK_SIZE_AND_OFFSET(FILE, _chain); CHECK_SIZE_AND_OFFSET(FILE, _chain);
CHECK_SIZE_AND_OFFSET(FILE, _fileno); CHECK_SIZE_AND_OFFSET(FILE, _fileno);
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk)); COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit); CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev); CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
@ -1267,7 +1286,7 @@ CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close); CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
#endif #endif // SANITIZER_GLIBC
#if SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(sem_t); CHECK_TYPE_SIZE(sem_t);

View File

@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__) #elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128) ? FIRST_32_SECOND_64(104, 128)
: FIRST_32_SECOND_64(144, 216); : FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104; const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__) #elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat_sz = 64;
@ -443,6 +443,8 @@ struct __sanitizer_cmsghdr {
int cmsg_type; int cmsg_type;
}; };
#else #else
// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
// many implementations don't conform to the standard.
struct __sanitizer_msghdr { struct __sanitizer_msghdr {
void *msg_name; void *msg_name;
unsigned msg_namelen; unsigned msg_namelen;

View File

@ -275,8 +275,8 @@ void ReportFile::Write(const char *buffer, uptr length) {
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) { bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/false); MemoryMappingLayout proc_maps(/*cache_enabled*/false);
InternalScopedString buff(kMaxPathLength); InternalMmapVector<char> buff(kMaxPathLength);
MemoryMappedSegment segment(buff.data(), kMaxPathLength); MemoryMappedSegment segment(buff.data(), buff.size());
while (proc_maps.Next(&segment)) { while (proc_maps.Next(&segment)) {
if (segment.IsExecutable() && if (segment.IsExecutable() &&
internal_strcmp(module, segment.filename) == 0) { internal_strcmp(module, segment.filename) == 0) {

View File

@ -40,6 +40,10 @@ uptr internal_write(fd_t fd, const void *buf, uptr count);
uptr internal_mmap(void *addr, uptr length, int prot, int flags, uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset); int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length); uptr internal_munmap(void *addr, uptr length);
#if SANITIZER_LINUX
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address);
#endif
int internal_mprotect(void *addr, uptr length, int prot); int internal_mprotect(void *addr, uptr length, int prot);
int internal_madvise(uptr addr, uptr length, int advice); int internal_madvise(uptr addr, uptr length, int advice);

View File

@ -143,7 +143,7 @@ void Abort() {
if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) { if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact; struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact)); internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL; sigact.sa_handler = SIG_DFL;
internal_sigaction(SIGABRT, &sigact, nullptr); internal_sigaction(SIGABRT, &sigact, nullptr);
} }
#endif #endif

View File

@ -249,26 +249,21 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
va_list args) { va_list args) {
va_list args2; va_list args2;
va_copy(args2, args); va_copy(args2, args);
const int kLen = 16 * 1024; InternalMmapVector<char> v;
int needed_length; int needed_length = 0;
char *buffer = local_buffer; char *buffer = local_buffer;
// First try to print a message using a local buffer, and then fall back to // First try to print a message using a local buffer, and then fall back to
// mmaped buffer. // mmaped buffer.
for (int use_mmap = 0; use_mmap < 2; use_mmap++) { for (int use_mmap = 0;; use_mmap++) {
if (use_mmap) { if (use_mmap) {
va_end(args); va_end(args);
va_copy(args, args2); va_copy(args, args2);
buffer = (char*)MmapOrDie(kLen, "Report"); v.resize(needed_length + 1);
buffer_size = kLen; buffer_size = v.capacity();
v.resize(buffer_size);
buffer = &v[0];
} }
needed_length = 0; needed_length = 0;
// Check that data fits into the current buffer.
# define CHECK_NEEDED_LENGTH \
if (needed_length >= buffer_size) { \
if (!use_mmap) continue; \
RAW_CHECK_MSG(needed_length < kLen, \
"Buffer in Report is too short!\n"); \
}
// Fuchsia's logging infrastructure always keeps track of the logging // Fuchsia's logging infrastructure always keeps track of the logging
// process, thread, and timestamp, so never prepend such information. // process, thread, and timestamp, so never prepend such information.
if (!SANITIZER_FUCHSIA && append_pid) { if (!SANITIZER_FUCHSIA && append_pid) {
@ -277,18 +272,20 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
if (common_flags()->log_exe_name && exe_name) { if (common_flags()->log_exe_name && exe_name) {
needed_length += internal_snprintf(buffer, buffer_size, needed_length += internal_snprintf(buffer, buffer_size,
"==%s", exe_name); "==%s", exe_name);
CHECK_NEEDED_LENGTH if (needed_length >= buffer_size)
continue;
} }
needed_length += internal_snprintf( needed_length += internal_snprintf(
buffer + needed_length, buffer_size - needed_length, "==%d==", pid); buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
CHECK_NEEDED_LENGTH if (needed_length >= buffer_size)
continue;
} }
needed_length += VSNPrintf(buffer + needed_length, needed_length += VSNPrintf(buffer + needed_length,
buffer_size - needed_length, format, args); buffer_size - needed_length, format, args);
CHECK_NEEDED_LENGTH if (needed_length >= buffer_size)
continue;
// If the message fit into the buffer, print it and exit. // If the message fit into the buffer, print it and exit.
break; break;
# undef CHECK_NEEDED_LENGTH
} }
RawWrite(buffer); RawWrite(buffer);
@ -297,9 +294,6 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
CallPrintfAndReportCallback(buffer); CallPrintfAndReportCallback(buffer);
LogMessageOnPrintf(buffer); LogMessageOnPrintf(buffer);
// If we had mapped any memory, clean up.
if (buffer != local_buffer)
UnmapOrDie((void *)buffer, buffer_size);
va_end(args2); va_end(args2);
} }
@ -346,13 +340,24 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
FORMAT(2, 3) FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) { void InternalScopedString::append(const char *format, ...) {
CHECK_LT(length_, size()); uptr prev_len = length();
va_list args;
va_start(args, format); while (true) {
VSNPrintf(data() + length_, size() - length_, format, args); buffer_.resize(buffer_.capacity());
va_end(args);
length_ += internal_strlen(data() + length_); va_list args;
CHECK_LT(length_, size()); va_start(args, format);
uptr sz = VSNPrintf(buffer_.data() + prev_len, buffer_.size() - prev_len,
format, args);
va_end(args);
if (sz < buffer_.size() - prev_len) {
buffer_.resize(prev_len + sz + 1);
break;
}
buffer_.reserve(buffer_.capacity() * 2);
}
CHECK_EQ(buffer_[length()], '\0');
} }
} // namespace __sanitizer } // namespace __sanitizer

Some files were not shown because too many files have changed in this diff Show More