libsanitizer: add hwasan.

Introduce the libhwasan library from LLVM sources.
This commit is contained in:
Martin Liska 2020-11-13 17:06:48 +01:00 committed by Matthew Malcomson
parent b13dacdfb3
commit 1ee3d1ef10
34 changed files with 4575 additions and 1 deletions

View File

@ -1,4 +1,4 @@
a28a466210199559d38251c11f30515cc83eadd6
6e7dd1e3e1170080b76b5dcc5716bdd974343233
The first line of this file holds the git revision number of the
last merge done from the master library sources.

View File

@ -11,6 +11,7 @@ https://github.com/llvm/llvm-project in the following directories:
compiler-rt/lib/tsan
compiler-rt/lib/lsan
compiler-rt/lib/ubsan
compiler-rt/lib/hwasan
Trivial and urgent fixes (portability, build fixes, etc.) may go directly to the
GCC tree. All non-trivial changes, functionality improvements, etc. should go

View File

@ -0,0 +1,522 @@
//===-- hwasan.cpp --------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// HWAddressSanitizer runtime.
//===----------------------------------------------------------------------===//
#include "hwasan.h"
#include "hwasan_checks.h"
#include "hwasan_dynamic_shadow.h"
#include "hwasan_globals.h"
#include "hwasan_poisoning.h"
#include "hwasan_report.h"
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_init.h"
// ACHTUNG! No system header includes in this file.
using namespace __sanitizer;
namespace __hwasan {
static Flags hwasan_flags;
Flags *flags() {
return &hwasan_flags;
}
int hwasan_inited = 0;
int hwasan_instrumentation_inited = 0;
bool hwasan_init_is_running;
int hwasan_report_count = 0;
void Flags::SetDefaults() {
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "hwasan_flags.inc"
#undef HWASAN_FLAG
}
static void RegisterHwasanFlags(FlagParser *parser, Flags *f) {
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "hwasan_flags.inc"
#undef HWASAN_FLAG
}
static void InitializeFlags() {
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("HWASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 20;
cf.handle_ioctl = true;
// FIXME: test and enable.
cf.check_printf = false;
cf.intercept_tls_get_addr = true;
cf.exitcode = 99;
// 8 shadow pages ~512kB, small enough to cover common stack sizes.
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
// are.
cf.handle_segv = kHandleSignalNo;
cf.handle_sigbus = kHandleSignalNo;
cf.handle_abort = kHandleSignalNo;
cf.handle_sigill = kHandleSignalNo;
cf.handle_sigfpe = kHandleSignalNo;
#endif
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser parser;
RegisterHwasanFlags(&parser, f);
RegisterCommonFlags(&parser);
#if HWASAN_CONTAINS_UBSAN
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
FlagParser ubsan_parser;
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
RegisterCommonFlags(&ubsan_parser);
#endif
// Override from user-specified string.
if (__hwasan_default_options)
parser.ParseString(__hwasan_default_options());
#if HWASAN_CONTAINS_UBSAN
const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
parser.ParseStringFromEnv("HWASAN_OPTIONS");
#if HWASAN_CONTAINS_UBSAN
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
}
static void HWAsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
line, cond, (uptr)v1, (uptr)v2);
PRINT_CURRENT_STACK_CHECK();
Die();
}
static constexpr uptr kMemoryUsageBufferSize = 4096;
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats();
auto *sds = StackDepotGetStats();
AllocatorStatCounters asc;
GetAllocatorStats(asc);
s.append(
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
" heap: %zd",
internal_getpid(), GetRSS(), thread_stats.n_live_threads,
thread_stats.total_stack_size,
thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]);
}
#if SANITIZER_ANDROID
static char *memory_usage_buffer = nullptr;
static void InitMemoryUsage() {
memory_usage_buffer =
(char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string");
CHECK(memory_usage_buffer);
memory_usage_buffer[0] = '\0';
DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize,
memory_usage_buffer);
}
void UpdateMemoryUsage() {
if (!flags()->export_memory_stats)
return;
if (!memory_usage_buffer)
InitMemoryUsage();
InternalScopedString s(kMemoryUsageBufferSize);
HwasanFormatMemoryUsage(s);
internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
}
#else
void UpdateMemoryUsage() {}
#endif
} // namespace __hwasan
using namespace __hwasan;
void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
Thread *t = GetCurrentThread();
if (!t) {
// The thread is still being created, or has already been destroyed.
size = 0;
return;
}
Unwind(max_depth, pc, bp, context, t->stack_top(), t->stack_bottom(),
request_fast);
}
static bool InitializeSingleGlobal(const hwasan_global &global) {
uptr full_granule_size = RoundDownTo(global.size(), 16);
TagMemoryAligned(global.addr(), full_granule_size, global.tag());
if (global.size() % 16)
TagMemoryAligned(global.addr() + full_granule_size, 16, global.size() % 16);
return false;
}
static void InitLoadedGlobals() {
dl_iterate_phdr(
[](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
for (const hwasan_global &global : HwasanGlobalsFor(
info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
InitializeSingleGlobal(global);
return 0;
},
nullptr);
}
// Prepare to run instrumented code on the main thread.
static void InitInstrumentation() {
if (hwasan_instrumentation_inited) return;
InitPrctl();
if (!InitShadow()) {
Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
DumpProcessMap();
Die();
}
InitThreads();
hwasanThreadList().CreateCurrentThread();
hwasan_instrumentation_inited = 1;
}
// Interface.
uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol.
// This function was used by the old frame descriptor mechanism. We keep it
// around to avoid breaking ABI.
void __hwasan_init_frames(uptr beg, uptr end) {}
void __hwasan_init_static() {
InitShadowGOT();
InitInstrumentation();
// In the non-static code path we call dl_iterate_phdr here. But at this point
// libc might not have been initialized enough for dl_iterate_phdr to work.
// Fortunately, since this is a statically linked executable we can use the
// linker-defined symbol __ehdr_start to find the only relevant set of phdrs.
extern ElfW(Ehdr) __ehdr_start;
for (const hwasan_global &global : HwasanGlobalsFor(
/* base */ 0,
reinterpret_cast<const ElfW(Phdr) *>(
reinterpret_cast<const char *>(&__ehdr_start) +
__ehdr_start.e_phoff),
__ehdr_start.e_phnum))
InitializeSingleGlobal(global);
}
void __hwasan_init() {
CHECK(!hwasan_init_is_running);
if (hwasan_inited) return;
hwasan_init_is_running = 1;
SanitizerToolName = "HWAddressSanitizer";
InitTlsSize();
CacheBinaryName();
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(HWAsanCheckFailed);
__sanitizer_set_report_path(common_flags()->log_path);
AndroidTestTlsSlot();
DisableCoreDumperIfNecessary();
InitInstrumentation();
InitLoadedGlobals();
// Needs to be called here because flags()->random_tags might not have been
// initialized when InitInstrumentation() was called.
GetCurrentThread()->InitRandomState();
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
// This may call libc -> needs initialized shadow.
AndroidLogInit();
InitializeInterceptors();
InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
HwasanTSDInit();
HwasanTSDThreadInit();
HwasanAllocatorInit();
#if HWASAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
VPrintf(1, "HWAddressSanitizer init done\n");
hwasan_init_is_running = 0;
hwasan_inited = 1;
}
void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
ElfW(Half) phnum) {
for (const hwasan_global &global : HwasanGlobalsFor(base, phdr, phnum))
InitializeSingleGlobal(global);
}
void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
ElfW(Half) phnum) {
for (; phnum != 0; ++phdr, --phnum)
if (phdr->p_type == PT_LOAD)
TagMemory(base + phdr->p_vaddr, phdr->p_memsz, 0);
}
void __hwasan_print_shadow(const void *p, uptr sz) {
uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
uptr shadow_first = MemToShadow(ptr_raw);
uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw,
ptr_raw + sz, GetTagFromPointer((uptr)p));
for (uptr s = shadow_first; s <= shadow_last; ++s)
Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s);
}
sptr __hwasan_test_shadow(const void *p, uptr sz) {
if (sz == 0)
return -1;
tag_t ptr_tag = GetTagFromPointer((uptr)p);
uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
uptr shadow_first = MemToShadow(ptr_raw);
uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
for (uptr s = shadow_first; s <= shadow_last; ++s)
if (*(tag_t *)s != ptr_tag) {
sptr offset = ShadowToMem(s) - ptr_raw;
return offset < 0 ? 0 : offset;
}
return -1;
}
u16 __sanitizer_unaligned_load16(const uu16 *p) {
return *p;
}
u32 __sanitizer_unaligned_load32(const uu32 *p) {
return *p;
}
u64 __sanitizer_unaligned_load64(const uu64 *p) {
return *p;
}
void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
*p = x;
}
void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
*p = x;
}
void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
*p = x;
}
void __hwasan_loadN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
}
void __hwasan_load1(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
}
void __hwasan_load2(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
}
void __hwasan_load4(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
}
void __hwasan_load8(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
}
void __hwasan_load16(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
}
void __hwasan_loadN_noabort(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
}
void __hwasan_load1_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
}
void __hwasan_load2_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
}
void __hwasan_load4_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
}
void __hwasan_load8_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
}
void __hwasan_load16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
}
void __hwasan_storeN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
}
void __hwasan_store1(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
}
void __hwasan_store2(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
}
void __hwasan_store4(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
}
void __hwasan_store8(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
}
void __hwasan_store16(uptr p) {
CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
}
void __hwasan_storeN_noabort(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
}
void __hwasan_store1_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
}
void __hwasan_store2_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
}
void __hwasan_store4_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
}
void __hwasan_store8_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
}
void __hwasan_store16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
}
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
TagMemoryAligned(p, sz, tag);
}
uptr __hwasan_tag_pointer(uptr p, u8 tag) {
return AddTagToPointer(p, tag);
}
void __hwasan_handle_longjmp(const void *sp_dst) {
uptr dst = (uptr)sp_dst;
// HWASan does not support tagged SP.
CHECK(GetTagFromPointer(dst) == 0);
uptr sp = (uptr)__builtin_frame_address(0);
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
if (dst < sp || dst - sp > kMaxExpectedCleanupSize) {
Report(
"WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
"stack top: %p; target %p; distance: %p (%zd)\n"
"False positive error reports may follow\n",
(void *)sp, (void *)dst, dst - sp);
return;
}
TagMemory(sp, dst - sp, 0);
}
void __hwasan_handle_vfork(const void *sp_dst) {
uptr sp = (uptr)sp_dst;
Thread *t = GetCurrentThread();
CHECK(t);
uptr top = t->stack_top();
uptr bottom = t->stack_bottom();
if (top == 0 || bottom == 0 || sp < bottom || sp >= top) {
Report(
"WARNING: HWASan is ignoring requested __hwasan_handle_vfork: "
"stack top: %zx; current %zx; bottom: %zx \n"
"False positive error reports may follow\n",
top, sp, bottom);
return;
}
TagMemory(bottom, sp - bottom, 0);
}
extern "C" void *__hwasan_extra_spill_area() {
Thread *t = GetCurrentThread();
return &t->vfork_spill();
}
void __hwasan_print_memory_usage() {
InternalScopedString s(kMemoryUsageBufferSize);
HwasanFormatMemoryUsage(s);
Printf("%s\n", s.data());
}
static const u8 kFallbackTag = 0xBB;
u8 __hwasan_generate_tag() {
Thread *t = GetCurrentThread();
if (!t) return kFallbackTag;
return t->GenerateRandomTag();
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __hwasan_default_options() { return ""; }
} // extern "C"
#endif
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
stack.Print();
}
} // extern "C"

View File

@ -0,0 +1,165 @@
//===-- hwasan.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Private Hwasan header.
//===----------------------------------------------------------------------===//
#ifndef HWASAN_H
#define HWASAN_H
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "hwasan_interface_internal.h"
#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h"
#ifndef HWASAN_CONTAINS_UBSAN
# define HWASAN_CONTAINS_UBSAN CAN_SANITIZE_UB
#endif
#ifndef HWASAN_WITH_INTERCEPTORS
#define HWASAN_WITH_INTERCEPTORS 0
#endif
#ifndef HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
#define HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE HWASAN_WITH_INTERCEPTORS
#endif
typedef u8 tag_t;
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
const unsigned kAddressTagShift = 56;
const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
// Minimal alignment of the shadow base address. Determines the space available
// for threads and stack histories. This is an ABI constant.
const unsigned kShadowBaseAlignment = 32;
const unsigned kRecordAddrBaseTagShift = 3;
const unsigned kRecordFPShift = 48;
const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
static inline tag_t GetTagFromPointer(uptr p) {
return p >> kAddressTagShift;
}
static inline uptr UntagAddr(uptr tagged_addr) {
return tagged_addr & ~kAddressTagMask;
}
static inline void *UntagPtr(const void *tagged_ptr) {
return reinterpret_cast<void *>(
UntagAddr(reinterpret_cast<uptr>(tagged_ptr)));
}
static inline uptr AddTagToPointer(uptr p, tag_t tag) {
return (p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift);
}
namespace __hwasan {
extern int hwasan_inited;
extern bool hwasan_init_is_running;
extern int hwasan_report_count;
bool InitShadow();
void InitPrctl();
void InitThreads();
void InitializeInterceptors();
void HwasanAllocatorInit();
void *hwasan_malloc(uptr size, StackTrace *stack);
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack);
void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
void *hwasan_valloc(uptr size, StackTrace *stack);
void *hwasan_pvalloc(uptr size, StackTrace *stack);
void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack);
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
void hwasan_free(void *ptr, StackTrace *stack);
void InstallAtExitHandler();
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
if (hwasan_inited) \
stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
nullptr, common_flags()->fast_unwind_on_malloc, \
common_flags()->malloc_context_size)
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
#define GET_FATAL_STACK_TRACE_HERE \
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
#define PRINT_CURRENT_STACK_CHECK() \
{ \
GET_FATAL_STACK_TRACE_HERE; \
stack.Print(); \
}
void HwasanTSDInit();
void HwasanTSDThreadInit();
void HwasanOnDeadlySignal(int signo, void *info, void *context);
void UpdateMemoryUsage();
void AppendToErrorMessageBuffer(const char *buffer);
void AndroidTestTlsSlot();
} // namespace __hwasan
#define HWASAN_MALLOC_HOOK(ptr, size) \
do { \
if (&__sanitizer_malloc_hook) { \
__sanitizer_malloc_hook(ptr, size); \
} \
RunMallocHooks(ptr, size); \
} while (false)
#define HWASAN_FREE_HOOK(ptr) \
do { \
if (&__sanitizer_free_hook) { \
__sanitizer_free_hook(ptr); \
} \
RunFreeHooks(ptr); \
} while (false)
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
// For both bionic and glibc __sigset_t is an unsigned long.
typedef unsigned long __hw_sigset_t;
// Setjmp and longjmp implementations are platform specific, and hence the
// interception code is platform specific too. As yet we've only implemented
// the interception for AArch64.
typedef unsigned long long __hw_register_buf[22];
struct __hw_jmp_buf_struct {
// NOTE: The machine-dependent definition of `__sigsetjmp'
// assume that a `__hw_jmp_buf' begins with a `__hw_register_buf' and that
// `__mask_was_saved' follows it. Do not move these members or add others
// before it.
__hw_register_buf __jmpbuf; // Calling environment.
int __mask_was_saved; // Saved the signal mask?
__hw_sigset_t __saved_mask; // Saved signal mask.
};
typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
#endif // HWASAN_H

View File

@ -0,0 +1,408 @@
//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// HWAddressSanitizer allocator.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_checks.h"
#include "hwasan_mapping.h"
#include "hwasan_malloc_bisect.h"
#include "hwasan_thread.h"
#include "hwasan_report.h"
namespace __hwasan {
static Allocator allocator;
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled;
static const tag_t kFallbackAllocTag = 0xBB;
static const tag_t kFallbackFreeTag = 0xBC;
enum RightAlignMode {
kRightAlignNever,
kRightAlignSometimes,
kRightAlignAlways
};
// Initialized in HwasanAllocatorInit, an never changed.
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
bool HwasanChunkView::IsAllocated() const {
return metadata_ && metadata_->alloc_context_id &&
metadata_->get_requested_size();
}
// Aligns the 'addr' right to the granule boundary.
static uptr AlignRight(uptr addr, uptr requested_size) {
uptr tail_size = requested_size % kShadowAlignment;
if (!tail_size) return addr;
return addr + kShadowAlignment - tail_size;
}
uptr HwasanChunkView::Beg() const {
if (metadata_ && metadata_->right_aligned)
return AlignRight(block_, metadata_->get_requested_size());
return block_;
}
uptr HwasanChunkView::End() const {
return Beg() + UsedSize();
}
uptr HwasanChunkView::UsedSize() const {
return metadata_->get_requested_size();
}
u32 HwasanChunkView::GetAllocStackId() const {
return metadata_->alloc_context_id;
}
uptr HwasanChunkView::ActualSize() const {
return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
}
bool HwasanChunkView::FromSmallHeap() const {
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
}
void GetAllocatorStats(AllocatorStatCounters s) {
allocator.GetStats(s);
}
void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
}
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
allocator.SwallowCache(cache);
}
static uptr TaggedSize(uptr size) {
if (!size) size = 1;
uptr new_size = RoundUpTo(size, kShadowAlignment);
CHECK_GE(new_size, size);
return new_size;
}
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
bool zeroise) {
if (orig_size > kMaxAllowedMallocSize) {
if (AllocatorMayReturnNull()) {
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
orig_size);
return nullptr;
}
ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
}
alignment = Max(alignment, kShadowAlignment);
uptr size = TaggedSize(orig_size);
Thread *t = GetCurrentThread();
void *allocated;
if (t) {
allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, size, alignment);
}
if (UNLIKELY(!allocated)) {
SetAllocatorOutOfMemory();
if (AllocatorMayReturnNull())
return nullptr;
ReportOutOfMemory(size, stack);
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->set_requested_size(orig_size);
meta->alloc_context_id = StackDepotPut(*stack);
meta->right_aligned = false;
if (zeroise) {
internal_memset(allocated, 0, size);
} else if (flags()->max_malloc_fill_size > 0) {
uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
}
if (size != orig_size) {
internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
size - orig_size - 1);
}
void *user_ptr = allocated;
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
// retag to 0.
if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
uptr tag_size = orig_size ? orig_size : 1;
uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
user_ptr =
(void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
if (full_granule_size != tag_size) {
u8 *short_granule =
reinterpret_cast<u8 *>(allocated) + full_granule_size;
TagMemoryAligned((uptr)short_granule, kShadowAlignment,
tag_size % kShadowAlignment);
short_granule[kShadowAlignment - 1] = tag;
}
} else {
user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
}
}
HWASAN_MALLOC_HOOK(user_ptr, size);
return user_ptr;
}
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
CHECK(tagged_ptr);
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
tag_t mem_tag = *reinterpret_cast<tag_t *>(
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
}
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK(tagged_ptr);
HWASAN_FREE_HOOK(tagged_ptr);
if (!PointerAndMemoryTagsMatch(tagged_ptr))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
void *untagged_ptr = UntagPtr(tagged_ptr);
void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
uptr orig_size = meta->get_requested_size();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->alloc_context_id;
// Check tail magic.
uptr tagged_size = TaggedSize(orig_size);
if (flags()->free_checks_tail_magic && orig_size &&
tagged_size != orig_size) {
uptr tail_size = tagged_size - orig_size - 1;
CHECK_LT(tail_size, kShadowAlignment);
void *tail_beg = reinterpret_cast<void *>(
reinterpret_cast<uptr>(aligned_ptr) + orig_size);
if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
orig_size, tail_magic);
}
meta->set_requested_size(0);
meta->alloc_context_id = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
Thread *t = GetCurrentThread();
if (flags()->max_free_fill_size > 0) {
uptr fill_size =
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
t ? t->GenerateRandomTag() : kFallbackFreeTag);
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
free_context_id, static_cast<u32>(orig_size)});
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocator.Deallocate(cache, aligned_ptr);
}
}
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
uptr new_size, uptr alignment) {
if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
void *tagged_ptr_new =
HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
if (tagged_ptr_old && tagged_ptr_new) {
void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
internal_memcpy(
UntagPtr(tagged_ptr_new), untagged_ptr_old,
Min(new_size, static_cast<uptr>(meta->get_requested_size())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
}
static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
if (AllocatorMayReturnNull())
return nullptr;
ReportCallocOverflow(nmemb, size, stack);
}
return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
}
HwasanChunkView FindHeapChunkByAddress(uptr address) {
void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
if (!block)
return HwasanChunkView();
Metadata *metadata =
reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
static uptr AllocationSize(const void *tagged_ptr) {
const void *untagged_ptr = UntagPtr(tagged_ptr);
if (!untagged_ptr) return 0;
const void *beg = allocator.GetBlockBegin(untagged_ptr);
Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
if (b->right_aligned) {
if (beg != reinterpret_cast<void *>(RoundDownTo(
reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
return 0;
} else {
if (beg != untagged_ptr) return 0;
}
return b->get_requested_size();
}
void *hwasan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
}
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
}
void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
if (size == 0) {
HwasanDeallocate(stack, ptr);
return nullptr;
}
return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
}
void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
return nullptr;
ReportReallocArrayOverflow(nmemb, size, stack);
}
return hwasan_realloc(ptr, nmemb * size, stack);
}
void *hwasan_valloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(
HwasanAllocate(stack, size, GetPageSizeCached(), false));
}
void *hwasan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
return nullptr;
ReportPvallocOverflow(size, stack);
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
}
void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
return nullptr;
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
}
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
}
void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
return nullptr;
ReportInvalidAllocationAlignment(alignment, stack);
}
return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
}
int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
if (AllocatorMayReturnNull())
return errno_EINVAL;
ReportInvalidPosixMemalignAlignment(alignment, stack);
}
void *ptr = HwasanAllocate(stack, size, alignment, false);
if (UNLIKELY(!ptr))
// OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*(void **)UntagPtr(memptr) = ptr;
return 0;
}
void hwasan_free(void *ptr, StackTrace *stack) {
return HwasanDeallocate(stack, ptr);
}
} // namespace __hwasan
using namespace __hwasan;
void __hwasan_enable_allocator_tagging() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
}
void __hwasan_disable_allocator_tagging() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
}
uptr __sanitizer_get_current_allocated_bytes() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatAllocated];
}
uptr __sanitizer_get_heap_size() {
uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
return stats[AllocatorStatMapped];
}
uptr __sanitizer_get_free_bytes() { return 1; }
uptr __sanitizer_get_unmapped_bytes() { return 1; }
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }

View File

@ -0,0 +1,107 @@
//===-- hwasan_allocator.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
#include "hwasan_poisoning.h"
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
#endif
namespace __hwasan {
struct Metadata {
u32 requested_size_low;
u32 requested_size_high : 31;
u32 right_aligned : 1;
u32 alloc_context_id;
u64 get_requested_size() {
return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
}
void set_requested_size(u64 size) {
requested_size_low = size & ((1ul << 32) - 1);
requested_size_high = size >> 32;
}
};
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// It can return as user-requested mmap() or another thread stack.
// Make it accessible with zero-tagged pointer.
TagMemory(p, size, 0);
}
};
static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
static const uptr kSpaceSize = 0x2000000000ULL;
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
typedef HwasanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
class HwasanChunkView {
public:
HwasanChunkView() : block_(0), metadata_(nullptr) {}
HwasanChunkView(uptr block, Metadata *metadata)
: block_(block), metadata_(metadata) {}
bool IsAllocated() const; // Checks if the memory is currently allocated
uptr Beg() const; // First byte of user memory
uptr End() const; // Last byte of user memory
uptr UsedSize() const; // Size requested by the user
uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
bool FromSmallHeap() const;
private:
uptr block_;
Metadata *const metadata_;
};
HwasanChunkView FindHeapChunkByAddress(uptr address);
// Information about one (de)allocation that happened in the past.
// These are recorded in a thread-local ring buffer.
// TODO: this is currently 24 bytes (20 bytes + alignment).
// Compress it to 16 bytes or extend it to be more useful.
struct HeapAllocationRecord {
uptr tagged_addr;
u32 alloc_context_id;
u32 free_context_id;
u32 requested_size;
};
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H

View File

@ -0,0 +1,124 @@
//===-- hwasan_checks.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_CHECKS_H
#define HWASAN_CHECKS_H
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __hwasan {
template <unsigned X>
__attribute__((always_inline)) static void SigTrap(uptr p) {
#if defined(__aarch64__)
(void)p;
// 0x900 is added to do not interfere with the kernel use of lower values of
// brk immediate.
register uptr x0 asm("x0") = p;
asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
#elif defined(__x86_64__)
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
// total. The pointer is passed via rdi.
// 0x40 is added as a safeguard, to help distinguish our trap from others and
// to avoid 0 offsets in the command (otherwise it'll be reduced to a
// different nop command, the three bytes one).
asm volatile(
"int3\n"
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
"D"(p));
#else
// FIXME: not always sigill.
__builtin_trap();
#endif
// __builtin_unreachable();
}
// Version with access size which is not power of 2
template <unsigned X>
__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
#if defined(__aarch64__)
register uptr x0 asm("x0") = p;
register uptr x1 asm("x1") = size;
asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
#elif defined(__x86_64__)
// Size is stored in rsi.
asm volatile(
"int3\n"
"nopl %c0(%%rax)\n" ::"n"(0x40 + X),
"D"(p), "S"(size));
#else
__builtin_trap();
#endif
// __builtin_unreachable();
}
__attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
tag_t mem_tag, uptr ptr, uptr sz) {
tag_t ptr_tag = GetTagFromPointer(ptr);
if (ptr_tag == mem_tag)
return true;
if (mem_tag >= kShadowAlignment)
return false;
if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
return false;
#ifndef __aarch64__
ptr = UntagAddr(ptr);
#endif
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
}
enum class ErrorAction { Abort, Recover };
enum class AccessType { Load, Store };
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
SigTrap<0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + LogSize>(p);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
}
template <ErrorAction EA, AccessType AT>
__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
uptr sz) {
if (sz == 0)
return;
tag_t ptr_tag = GetTagFromPointer(p);
uptr ptr_raw = p & ~kAddressTagMask;
tag_t *shadow_first = (tag_t *)MemToShadow(ptr_raw);
tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
for (tag_t *t = shadow_first; t < shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
SigTrap<0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
uptr end = p + sz;
uptr tail_sz = end & 0xf;
if (UNLIKELY(tail_sz != 0 &&
!PossiblyShortTagMatches(
*shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
SigTrap<0x20 * (EA == ErrorAction::Recover) +
0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
}
} // end namespace __hwasan
#endif // HWASAN_CHECKS_H

View File

@ -0,0 +1,126 @@
//===-- hwasan_dynamic_shadow.cpp -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
/// region and handles ifunc resolver case, when necessary.
///
//===----------------------------------------------------------------------===//
#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_posix.h"
#include <elf.h>
#include <link.h>
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
#if SANITIZER_ANDROID
extern "C" {
INTERFACE_ATTRIBUTE void __hwasan_shadow();
decltype(__hwasan_shadow)* __hwasan_premap_shadow();
} // extern "C"
namespace __hwasan {
// Conservative upper limit.
static uptr PremapShadowSize() {
return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale,
GetMmapGranularity());
}
static uptr PremapShadow() {
return MapDynamicShadow(PremapShadowSize(), kShadowScale,
kShadowBaseAlignment, kHighMemEnd);
}
static bool IsPremapShadowAvailable() {
const uptr shadow = reinterpret_cast<uptr>(&__hwasan_shadow);
const uptr resolver = reinterpret_cast<uptr>(&__hwasan_premap_shadow);
// shadow == resolver is how Android KitKat and older handles ifunc.
// shadow == 0 just in case.
return shadow != 0 && shadow != resolver;
}
static uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
const uptr granularity = GetMmapGranularity();
const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow);
const uptr premap_shadow_size = PremapShadowSize();
const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
}
} // namespace __hwasan
extern "C" {
decltype(__hwasan_shadow)* __hwasan_premap_shadow() {
// The resolver might be called multiple times. Map the shadow just once.
static __sanitizer::uptr shadow = 0;
if (!shadow)
shadow = __hwasan::PremapShadow();
return reinterpret_cast<decltype(__hwasan_shadow)*>(shadow);
}
// __hwasan_shadow is a "function" that has the same address as the first byte
// of the shadow mapping.
INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow")))
void __hwasan_shadow();
extern __attribute((weak, visibility("hidden"))) ElfW(Rela) __rela_iplt_start[],
__rela_iplt_end[];
} // extern "C"
namespace __hwasan {
void InitShadowGOT() {
// Call the ifunc resolver for __hwasan_shadow and fill in its GOT entry. This
// needs to be done before other ifunc resolvers (which are handled by libc)
// because a resolver might read __hwasan_shadow.
typedef ElfW(Addr) (*ifunc_resolver_t)(void);
for (ElfW(Rela) *r = __rela_iplt_start; r != __rela_iplt_end; ++r) {
ElfW(Addr)* offset = reinterpret_cast<ElfW(Addr)*>(r->r_offset);
ElfW(Addr) resolver = r->r_addend;
if (resolver == reinterpret_cast<ElfW(Addr)>(&__hwasan_premap_shadow)) {
*offset = reinterpret_cast<ifunc_resolver_t>(resolver)();
break;
}
}
}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
if (IsPremapShadowAvailable())
return FindPremappedShadowStart(shadow_size_bytes);
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
}
} // namespace __hwasan
#else
namespace __hwasan {
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
}
} // namespace __hwasan
#endif // SANITIZER_ANDROID

View File

@ -0,0 +1,27 @@
//===-- hwasan_dynamic_shadow.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
/// region.
///
//===----------------------------------------------------------------------===//
#ifndef HWASAN_PREMAP_SHADOW_H
#define HWASAN_PREMAP_SHADOW_H
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __hwasan {
uptr FindDynamicShadowStart(uptr shadow_size_bytes);
void InitShadowGOT();
} // namespace __hwasan
#endif // HWASAN_PREMAP_SHADOW_H

View File

@ -0,0 +1,67 @@
//===-- hwasan_exceptions.cpp ---------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// HWAddressSanitizer runtime.
//===----------------------------------------------------------------------===//
#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_common.h"
#include <unwind.h>
using namespace __hwasan;
using namespace __sanitizer;
typedef _Unwind_Reason_Code PersonalityFn(int version, _Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception* unwind_exception,
_Unwind_Context* context);
// Pointers to the _Unwind_GetGR and _Unwind_GetCFA functions are passed in
// instead of being called directly. This is to handle cases where the unwinder
// is statically linked and the sanitizer runtime and the program are linked
// against different unwinders. The _Unwind_Context data structure is opaque so
// it may be incompatible between unwinders.
typedef _Unwind_Word GetGRFn(_Unwind_Context* context, int index);
typedef _Unwind_Word GetCFAFn(_Unwind_Context* context);
extern "C" SANITIZER_INTERFACE_ATTRIBUTE _Unwind_Reason_Code
__hwasan_personality_wrapper(int version, _Unwind_Action actions,
uint64_t exception_class,
_Unwind_Exception* unwind_exception,
_Unwind_Context* context,
PersonalityFn* real_personality, GetGRFn* get_gr,
GetCFAFn* get_cfa) {
_Unwind_Reason_Code rc;
if (real_personality)
rc = real_personality(version, actions, exception_class, unwind_exception,
context);
else
rc = _URC_CONTINUE_UNWIND;
// We only untag frames without a landing pad because landing pads are
// responsible for untagging the stack themselves if they resume.
//
// Here we assume that the frame record appears after any locals. This is not
// required by AAPCS but is a requirement for HWASAN instrumented functions.
if ((actions & _UA_CLEANUP_PHASE) && rc == _URC_CONTINUE_UNWIND) {
#if defined(__x86_64__)
uptr fp = get_gr(context, 6); // rbp
#elif defined(__aarch64__)
uptr fp = get_gr(context, 29); // x29
#else
#error Unsupported architecture
#endif
uptr sp = get_cfa(context);
TagMemory(sp, fp - sp, 0);
}
return rc;
}

View File

@ -0,0 +1,29 @@
//===-- hwasan_flags.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_FLAGS_H
#define HWASAN_FLAGS_H
namespace __hwasan {
struct Flags {
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "hwasan_flags.inc"
#undef HWASAN_FLAG
void SetDefaults();
};
Flags *flags();
} // namespace __hwasan
#endif // HWASAN_FLAGS_H

View File

@ -0,0 +1,74 @@
//===-- hwasan_flags.inc ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Hwasan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_FLAG
# error "Define HWASAN_FLAG prior to including this file!"
#endif
// HWASAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
HWASAN_FLAG(bool, verbose_threads, false,
"inform on thread creation/destruction")
HWASAN_FLAG(bool, tag_in_malloc, true, "")
HWASAN_FLAG(bool, tag_in_free, true, "")
HWASAN_FLAG(bool, print_stats, false, "")
HWASAN_FLAG(bool, halt_on_error, true, "")
HWASAN_FLAG(bool, atexit, false, "")
// Test only flag to disable malloc/realloc/free memory tagging on startup.
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
HWASAN_FLAG(bool, disable_allocator_tagging, false, "")
// If false, use simple increment of a thread local counter to generate new
// tags.
HWASAN_FLAG(bool, random_tags, true, "")
HWASAN_FLAG(
int, max_malloc_fill_size, 0,
"HWASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
HWASAN_FLAG(bool, free_checks_tail_magic, 1,
"If set, free() will check the magic values "
"to the right of the allocated object "
"if the allocation size is not a divident of the granule size")
HWASAN_FLAG(
int, max_free_fill_size, 0,
"HWASan allocator flag. max_free_fill_size is the maximal amount of "
"bytes that will be filled with free_fill_byte during free.")
HWASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
HWASAN_FLAG(int, free_fill_byte, 0x55,
"Value used to fill deallocated memory.")
HWASAN_FLAG(int, heap_history_size, 1023,
"The number of heap (de)allocations remembered per thread. "
"Affects the quality of heap-related reports, but not the ability "
"to find bugs.")
HWASAN_FLAG(bool, export_memory_stats, true,
"Export up-to-date memory stats through /proc")
HWASAN_FLAG(int, stack_history_size, 1024,
"The number of stack frames remembered per thread. "
"Affects the quality of stack-related reports, but not the ability "
"to find bugs.")
// Malloc / free bisection. Only tag malloc and free calls when a hash of
// allocation size and stack trace is between malloc_bisect_left and
// malloc_bisect_right (both inclusive). [0, 0] range is special and disables
// bisection (i.e. everything is tagged). Once the range is narrowed down
// enough, use malloc_bisect_dump to see interesting allocations.
HWASAN_FLAG(uptr, malloc_bisect_left, 0,
"Left bound of malloc bisection, inclusive.")
HWASAN_FLAG(uptr, malloc_bisect_right, 0,
"Right bound of malloc bisection, inclusive.")
HWASAN_FLAG(bool, malloc_bisect_dump, false,
"Print all allocations within [malloc_bisect_left, "
"malloc_bisect_right] range ")

View File

@ -0,0 +1,91 @@
//===-- hwasan_globals.cpp ------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// HWAddressSanitizer globals-specific runtime.
//===----------------------------------------------------------------------===//
#include "hwasan_globals.h"
namespace __hwasan {
enum { NT_LLVM_HWASAN_GLOBALS = 3 };
struct hwasan_global_note {
s32 begin_relptr;
s32 end_relptr;
};
// Check that the given library meets the code model requirements for tagged
// globals. These properties are not checked at link time so they need to be
// checked at runtime.
static void CheckCodeModel(ElfW(Addr) base, const ElfW(Phdr) * phdr,
ElfW(Half) phnum) {
ElfW(Addr) min_addr = -1ull, max_addr = 0;
for (unsigned i = 0; i != phnum; ++i) {
if (phdr[i].p_type != PT_LOAD)
continue;
ElfW(Addr) lo = base + phdr[i].p_vaddr, hi = lo + phdr[i].p_memsz;
if (min_addr > lo)
min_addr = lo;
if (max_addr < hi)
max_addr = hi;
}
if (max_addr - min_addr > 1ull << 32) {
Report("FATAL: HWAddressSanitizer: library size exceeds 2^32\n");
Die();
}
if (max_addr > 1ull << 48) {
Report("FATAL: HWAddressSanitizer: library loaded above address 2^48\n");
Die();
}
}
ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base,
const ElfW(Phdr) * phdr,
ElfW(Half) phnum) {
// Read the phdrs from this DSO.
for (unsigned i = 0; i != phnum; ++i) {
if (phdr[i].p_type != PT_NOTE)
continue;
const char *note = reinterpret_cast<const char *>(base + phdr[i].p_vaddr);
const char *nend = note + phdr[i].p_memsz;
// Traverse all the notes until we find a HWASan note.
while (note < nend) {
auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(note);
const char *name = note + sizeof(ElfW(Nhdr));
const char *desc = name + RoundUpTo(nhdr->n_namesz, 4);
// Discard non-HWASan-Globals notes.
if (nhdr->n_type != NT_LLVM_HWASAN_GLOBALS ||
internal_strcmp(name, "LLVM") != 0) {
note = desc + RoundUpTo(nhdr->n_descsz, 4);
continue;
}
// Only libraries with instrumented globals need to be checked against the
// code model since they use relocations that aren't checked at link time.
CheckCodeModel(base, phdr, phnum);
auto *global_note = reinterpret_cast<const hwasan_global_note *>(desc);
auto *globals_begin = reinterpret_cast<const hwasan_global *>(
note + global_note->begin_relptr);
auto *globals_end = reinterpret_cast<const hwasan_global *>(
note + global_note->end_relptr);
return {globals_begin, globals_end};
}
}
return {};
}
} // namespace __hwasan

View File

@ -0,0 +1,49 @@
//===-- hwasan_globals.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Private Hwasan header.
//===----------------------------------------------------------------------===//
#ifndef HWASAN_GLOBALS_H
#define HWASAN_GLOBALS_H
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __hwasan {
// This object should only ever be casted over the global (i.e. not constructed)
// in the ELF PT_NOTE in order for `addr()` to work correctly.
struct hwasan_global {
// The size of this global variable. Note that the size in the descriptor is
// max 1 << 24. Larger globals have multiple descriptors.
uptr size() const { return info & 0xffffff; }
// The fully-relocated address of this global.
uptr addr() const { return reinterpret_cast<uintptr_t>(this) + gv_relptr; }
// The static tag of this global.
u8 tag() const { return info >> 24; };
// The relative address between the start of the descriptor for the HWASan
// global (in the PT_NOTE), and the fully relocated address of the global.
s32 gv_relptr;
u32 info;
};
// Walk through the specific DSO (as specified by the base, phdr, and phnum),
// and return the range of the [beginning, end) of the HWASan globals descriptor
// array.
ArrayRef<const hwasan_global> HwasanGlobalsFor(ElfW(Addr) base,
const ElfW(Phdr) * phdr,
ElfW(Half) phnum);
} // namespace __hwasan
#endif // HWASAN_GLOBALS_H

View File

@ -0,0 +1,349 @@
//===-- hwasan_interceptors.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Interceptors for standard library functions.
//
// FIXME: move as many interceptors as possible into
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
#include "hwasan_report.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include <stdarg.h>
// ACHTUNG! No other system header includes in this file.
// Ideally, we should get rid of stdarg.h as well.
using namespace __hwasan;
using __sanitizer::memory_order;
using __sanitizer::atomic_load;
using __sanitizer::atomic_store;
using __sanitizer::atomic_uintptr_t;
static uptr allocated_for_dlsym;
static const uptr kDlsymAllocPoolSize = 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < sizeof(alloc_memory_for_dlsym);
}
static void *AllocateFromLocalPool(uptr size_in_bytes) {
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
allocated_for_dlsym += size_in_words;
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
return mem;
}
#define ENSURE_HWASAN_INITED() do { \
CHECK(!hwasan_init_is_running); \
if (!hwasan_inited) { \
__hwasan_init(); \
} \
} while (0)
int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
CHECK_NE(memptr, 0);
int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
return res;
}
void * __sanitizer_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_memalign(alignment, size, &stack);
}
void * __sanitizer_aligned_alloc(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_aligned_alloc(alignment, size, &stack);
}
void * __sanitizer___libc_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
void *ptr = hwasan_memalign(alignment, size, &stack);
if (ptr)
DTLS_on_libc_memalign(ptr, size);
return ptr;
}
void * __sanitizer_valloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_valloc(size, &stack);
}
void * __sanitizer_pvalloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_pvalloc(size, &stack);
}
void __sanitizer_free(void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
hwasan_free(ptr, &stack);
}
void __sanitizer_cfree(void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
hwasan_free(ptr, &stack);
}
uptr __sanitizer_malloc_usable_size(const void *ptr) {
return __sanitizer_get_allocated_size(ptr);
}
struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
__sanitizer_struct_mallinfo sret;
internal_memset(&sret, 0, sizeof(sret));
return sret;
}
int __sanitizer_mallopt(int cmd, int value) {
return 0;
}
void __sanitizer_malloc_stats(void) {
// FIXME: implement, but don't call REAL(malloc_stats)!
}
void * __sanitizer_calloc(uptr nmemb, uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size);
return hwasan_calloc(nmemb, size, &stack);
}
void * __sanitizer_realloc(void *ptr, uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr;
if (UNLIKELY(!hwasan_inited)) {
new_ptr = AllocateFromLocalPool(copy_size);
} else {
copy_size = size;
new_ptr = hwasan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
return hwasan_realloc(ptr, size, &stack);
}
void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_reallocarray(ptr, nmemb, size, &stack);
}
void * __sanitizer_malloc(uptr size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_init_is_running))
ENSURE_HWASAN_INITED();
if (UNLIKELY(!hwasan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
return hwasan_malloc(size, &stack);
}
#if HWASAN_WITH_INTERCEPTORS
#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
ALIAS("__sanitizer_" #FN); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
ARGS) ALIAS("__sanitizer_" #FN)
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
SIZE_T size);
INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
INTERCEPTOR_ALIAS(void, free, void *ptr);
INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
INTERCEPTOR_ALIAS(void, cfree, void *ptr);
INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
INTERCEPTOR_ALIAS(void, malloc_stats, void);
#endif
struct ThreadStartArg {
thread_callback_t callback;
void *param;
};
static void *HwasanThreadStartFunc(void *arg) {
__hwasan_thread_enter();
ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
UnmapOrDie(arg, GetPageSizeCached());
return A.callback(A.param);
}
INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
void * param) {
ScopedTaggingDisabler disabler;
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create"));
*A = {callback, param};
int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
&HwasanThreadStartFunc, A);
return res;
}
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
#endif // HWASAN_WITH_INTERCEPTORS
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
__hw_sigset_t *__restrict __oset);
#define SIG_BLOCK 0
#define SIG_SETMASK 2
extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
env[0].__mask_was_saved =
(savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
&env[0].__saved_mask) == 0);
return 0;
}
static void __attribute__((always_inline))
InternalLongjmp(__hw_register_buf env, int retval) {
// Clear all memory tags on the stack between here and where we're going.
unsigned long long stack_pointer = env[13];
// The stack pointer should never be tagged, so we don't need to clear the
// tag for this function call.
__hwasan_handle_longjmp((void *)stack_pointer);
// Run code for handling a longjmp.
// Need to use a register that isn't going to be loaded from the environment
// buffer -- hence why we need to specify the register to use.
// Must implement this ourselves, since we don't know the order of registers
// in different libc implementations and many implementations mangle the
// stack pointer so we can't use it without knowing the demangling scheme.
register long int retval_tmp asm("x1") = retval;
register void *env_address asm("x0") = &env[0];
asm volatile("ldp x19, x20, [%0, #0<<3];"
"ldp x21, x22, [%0, #2<<3];"
"ldp x23, x24, [%0, #4<<3];"
"ldp x25, x26, [%0, #6<<3];"
"ldp x27, x28, [%0, #8<<3];"
"ldp x29, x30, [%0, #10<<3];"
"ldp d8, d9, [%0, #14<<3];"
"ldp d10, d11, [%0, #16<<3];"
"ldp d12, d13, [%0, #18<<3];"
"ldp d14, d15, [%0, #20<<3];"
"ldr x5, [%0, #13<<3];"
"mov sp, x5;"
// Return the value requested to return through arguments.
// This should be in x1 given what we requested above.
"cmp %1, #0;"
"mov x0, #1;"
"csel x0, %1, x0, ne;"
"br x30;"
: "+r"(env_address)
: "r"(retval_tmp));
}
INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
if (env[0].__mask_was_saved)
// Restore the saved signal mask.
(void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
(__hw_sigset_t *)0);
InternalLongjmp(env[0].__jmpbuf, val);
}
// Required since glibc libpthread calls __libc_longjmp on pthread_exit, and
// _setjmp on start_thread. Hence we have to intercept the longjmp on
// pthread_exit so the __hw_jmp_buf order matches.
INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
InternalLongjmp(env[0].__jmpbuf, val);
}
INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
InternalLongjmp(env[0].__jmpbuf, val);
}
#undef SIG_BLOCK
#undef SIG_SETMASK
#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
static void BeforeFork() {
StackDepotLockAll();
}
static void AfterFork() {
StackDepotUnlockAll();
}
INTERCEPTOR(int, fork, void) {
ENSURE_HWASAN_INITED();
BeforeFork();
int pid = REAL(fork)();
AfterFork();
return pid;
}
namespace __hwasan {
int OnExit() {
// FIXME: ask frontend whether we need to return failure.
return 0;
}
} // namespace __hwasan
namespace __hwasan {
void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
INTERCEPT_FUNCTION(fork);
#if HWASAN_WITH_INTERCEPTORS
#if defined(__linux__)
INTERCEPT_FUNCTION(vfork);
#endif // __linux__
INTERCEPT_FUNCTION(pthread_create);
#endif
inited = 1;
}
} // namespace __hwasan

View File

@ -0,0 +1,11 @@
#include "sanitizer_common/sanitizer_asm.h"
#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
#define COMMON_INTERCEPTOR_HANDLE_VFORK __hwasan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
NO_EXEC_STACK_DIRECTIVE

View File

@ -0,0 +1,227 @@
//===-- hwasan_interface_internal.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Private Hwasan interface header.
//===----------------------------------------------------------------------===//
#ifndef HWASAN_INTERFACE_INTERNAL_H
#define HWASAN_INTERFACE_INTERNAL_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include <link.h>
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_init_static();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_init();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_library_loaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
ElfW(Half) phnum);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_library_unloaded(ElfW(Addr) base, const ElfW(Phdr) * phdr,
ElfW(Half) phnum);
using __sanitizer::uptr;
using __sanitizer::sptr;
using __sanitizer::uu64;
using __sanitizer::uu32;
using __sanitizer::uu16;
using __sanitizer::u64;
using __sanitizer::u32;
using __sanitizer::u16;
using __sanitizer::u8;
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_init_frames(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __hwasan_shadow_memory_dynamic_address;
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_loadN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load1(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load2(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load4(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load8(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_loadN_noabort(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load1_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load2_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load4_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load8_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_storeN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store2(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store4(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store8(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_storeN_noabort(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store2_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store4_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store8_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __hwasan_tag_pointer(uptr p, u8 tag);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_mismatch(uptr addr, u8 ts);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
size_t outsize);
SANITIZER_INTERFACE_ATTRIBUTE
u8 __hwasan_generate_tag();
// Returns the offset of the first tag mismatch or -1 if the whole range is
// good.
SANITIZER_INTERFACE_ATTRIBUTE
sptr __hwasan_test_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __hwasan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_print_shadow(const void *x, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_handle_longjmp(const void *sp_dst);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_handle_vfork(const void *sp_dst);
SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p);
SANITIZER_INTERFACE_ATTRIBUTE
u32 __sanitizer_unaligned_load32(const uu32 *p);
SANITIZER_INTERFACE_ATTRIBUTE
u64 __sanitizer_unaligned_load64(const uu64 *p);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store16(uu16 *p, u16 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store32(uu32 *p, u32 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *p, u64 x);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_enable_allocator_tagging();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_disable_allocator_tagging();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_thread_enter();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_thread_exit();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_print_memory_usage();
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_memalign(uptr alignment, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_aligned_alloc(uptr alignment, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer___libc_memalign(uptr alignment, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_valloc(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_pvalloc(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_free(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cfree(void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_malloc_usable_size(const void *ptr);
SANITIZER_INTERFACE_ATTRIBUTE
__hwasan::__sanitizer_struct_mallinfo __sanitizer_mallinfo();
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_mallopt(int cmd, int value);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_malloc_stats(void);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_calloc(uptr nmemb, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_realloc(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void * __sanitizer_malloc(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H

View File

@ -0,0 +1,455 @@
//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
/// FreeBSD-specific code.
///
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_report.h"
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include <dlfcn.h>
#include <elf.h>
#include <link.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include <sys/time.h>
#include <unistd.h>
#include <unwind.h>
#include <sys/prctl.h>
#include <errno.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_procmaps.h"
// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
//
// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
// Not currently tested.
// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
// Integration tests downstream exist.
// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
// Tested with check-hwasan on x86_64-linux.
// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
// Tested with check-hwasan on aarch64-linux-android.
#if !SANITIZER_ANDROID
SANITIZER_INTERFACE_ATTRIBUTE
THREADLOCAL uptr __hwasan_tls;
#endif
namespace __hwasan {
// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
constexpr uptr kZeroBaseShadowStart = 0;
constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
static void ProtectGap(uptr addr, uptr size) {
__sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
kZeroBaseMaxShadowStart);
}
uptr kLowMemStart;
uptr kLowMemEnd;
uptr kLowShadowEnd;
uptr kLowShadowStart;
uptr kHighShadowStart;
uptr kHighShadowEnd;
uptr kHighMemStart;
uptr kHighMemEnd;
static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
}
static void PrintAddressSpaceLayout() {
PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
if (kHighShadowEnd + 1 < kHighMemStart)
PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
else
CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
if (kLowShadowEnd + 1 < kHighShadowStart)
PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
else
CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
if (kLowMemEnd + 1 < kLowShadowStart)
PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
else
CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
CHECK_EQ(0, kLowMemStart);
}
static uptr GetHighMemEnd() {
// HighMem covers the upper part of the address space.
uptr max_address = GetMaxUserVirtualAddress();
// Adjust max address to make sure that kHighMemEnd and kHighMemStart are
// properly aligned:
max_address |= (GetMmapGranularity() << kShadowScale) - 1;
return max_address;
}
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
__hwasan_shadow_memory_dynamic_address =
FindDynamicShadowStart(shadow_size_bytes);
}
void InitPrctl() {
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI.
if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
errno == EINVAL) {
#if SANITIZER_ANDROID
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
// allow the ABI.
// If targeting Android and the prctl is not around we assume this is the
// case.
return;
#else
Printf(
"FATAL: "
"HWAddressSanitizer requires a kernel with tagged address ABI.\n");
Die();
#endif
}
// Turn on the tagged address ABI.
if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
(uptr)-1 ||
!internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
Printf(
"FATAL: HWAddressSanitizer failed to enable tagged address syscall "
"ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
"configuration.\n");
Die();
}
#undef PR_SET_TAGGED_ADDR_CTRL
#undef PR_GET_TAGGED_ADDR_CTRL
#undef PR_TAGGED_ADDR_ENABLE
}
bool InitShadow() {
// Define the entire memory range.
kHighMemEnd = GetHighMemEnd();
// Determine shadow memory base offset.
InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
// Place the low memory first.
kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
kLowMemStart = 0;
// Define the low shadow based on the already placed low memory.
kLowShadowEnd = MemToShadow(kLowMemEnd);
kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
// High shadow takes whatever memory is left up there (making sure it is not
// interfering with low memory in the fixed case).
kHighShadowEnd = MemToShadow(kHighMemEnd);
kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
// High memory starts where allocated shadow allows.
kHighMemStart = ShadowToMem(kHighShadowStart);
// Check the sanity of the defined memory ranges (there might be gaps).
CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
CHECK_GT(kHighMemStart, kHighShadowEnd);
CHECK_GT(kHighShadowEnd, kHighShadowStart);
CHECK_GT(kHighShadowStart, kLowMemEnd);
CHECK_GT(kLowMemEnd, kLowMemStart);
CHECK_GT(kLowShadowEnd, kLowShadowStart);
CHECK_GT(kLowShadowStart, kLowMemEnd);
if (Verbosity())
PrintAddressSpaceLayout();
// Reserve shadow memory.
ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
// Protect all the gaps.
ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
if (kLowMemEnd + 1 < kLowShadowStart)
ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
if (kLowShadowEnd + 1 < kHighShadowStart)
ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
if (kHighShadowEnd + 1 < kHighMemStart)
ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
return true;
}
void InitThreads() {
CHECK(__hwasan_shadow_memory_dynamic_address);
uptr guard_page_size = GetMmapGranularity();
uptr thread_space_start =
__hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
uptr thread_space_end =
__hwasan_shadow_memory_dynamic_address - guard_page_size;
ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
"hwasan threads", /*madvise_shadow*/ false);
ProtectGap(thread_space_end,
__hwasan_shadow_memory_dynamic_address - thread_space_end);
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
}
bool MemIsApp(uptr p) {
CHECK(GetTagFromPointer(p) == 0);
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
}
static void HwasanAtExit(void) {
if (common_flags()->print_module_map)
DumpProcessMap();
if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
ReportStats();
if (hwasan_report_count > 0) {
// ReportAtExitStatistics();
if (common_flags()->exitcode)
internal__exit(common_flags()->exitcode);
}
}
void InstallAtExitHandler() {
atexit(HwasanAtExit);
}
// ---------------------- TSD ---------------- {{{1
extern "C" void __hwasan_thread_enter() {
hwasanThreadList().CreateCurrentThread()->InitRandomState();
}
extern "C" void __hwasan_thread_exit() {
Thread *t = GetCurrentThread();
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
if (t)
hwasanThreadList().ReleaseThread(t);
}
#if HWASAN_WITH_INTERCEPTORS
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
void HwasanTSDThreadInit() {
if (tsd_key_inited)
CHECK_EQ(0, pthread_setspecific(tsd_key,
(void *)GetPthreadDestructorIterations()));
}
void HwasanTSDDtor(void *tsd) {
uptr iterations = (uptr)tsd;
if (iterations > 1) {
CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
return;
}
__hwasan_thread_exit();
}
void HwasanTSDInit() {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
}
#else
void HwasanTSDInit() {}
void HwasanTSDThreadInit() {}
#endif
#if SANITIZER_ANDROID
uptr *GetCurrentThreadLongPtr() {
return (uptr *)get_android_tls_ptr();
}
#else
uptr *GetCurrentThreadLongPtr() {
return &__hwasan_tls;
}
#endif
#if SANITIZER_ANDROID
void AndroidTestTlsSlot() {
uptr kMagicValue = 0x010203040A0B0C0D;
uptr *tls_ptr = GetCurrentThreadLongPtr();
uptr old_value = *tls_ptr;
*tls_ptr = kMagicValue;
dlerror();
if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
Printf(
"ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
"for dlerror().\n");
Die();
}
*tls_ptr = old_value;
}
#else
void AndroidTestTlsSlot() {}
#endif
Thread *GetCurrentThread() {
uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
if (UNLIKELY(*ThreadLongPtr == 0))
return nullptr;
auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
}
struct AccessInfo {
uptr addr;
uptr size;
bool is_store;
bool is_load;
bool recover;
};
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// Access type is passed in a platform dependent way (see below) and encoded
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
// recoverable. Valid values of Y are 0 to 4, which are interpreted as
// log2(access_size), and 0xF, which means that access size is passed via
// platform dependent register (see below).
#if defined(__aarch64__)
// Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
// access size is stored in X1 register. Access address is always in X0
// register.
uptr pc = (uptr)info->si_addr;
const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
if ((code & 0xff00) != 0x900)
return AccessInfo{}; // Not ours.
const bool is_store = code & 0x10;
const bool recover = code & 0x20;
const uptr addr = uc->uc_mcontext.regs[0];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
return AccessInfo{}; // Not ours.
const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
#elif defined(__x86_64__)
// Access type is encoded in the instruction following INT3 as
// NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
// RSI register. Access address is always in RDI register.
uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
uint8_t *nop = (uint8_t*)pc;
if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
*(nop + 3) < 0x40)
return AccessInfo{}; // Not ours.
const unsigned code = *(nop + 3);
const bool is_store = code & 0x10;
const bool recover = code & 0x20;
const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
return AccessInfo{}; // Not ours.
const uptr size =
size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
#else
# error Unsupported architecture
#endif
return AccessInfo{addr, size, is_store, !is_store, recover};
}
static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
ucontext_t *uc, uptr *registers_frame = nullptr) {
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
// The second stack frame contains the failure __hwasan_check function, as
// we have a stack frame for the registers saved in __hwasan_tag_mismatch that
// we wish to ignore. This (currently) only occurs on AArch64, as x64
// implementations use SIGTRAP to implement the failure, and thus do not go
// through the stack saver.
if (registers_frame && stack->trace && stack->size > 0) {
stack->trace++;
stack->size--;
}
bool fatal = flags()->halt_on_error || !ai.recover;
ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
registers_frame);
}
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
AccessInfo ai = GetAccessInfo(info, uc);
if (!ai.is_store && !ai.is_load)
return false;
SignalContext sig{info, uc};
HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
#if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
#elif defined(__x86_64__)
#else
# error Unsupported architecture
#endif
return true;
}
static void OnStackUnwind(const SignalContext &sig, const void *,
BufferedStackTrace *stack) {
stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
common_flags()->fast_unwind_on_fatal);
}
void HwasanOnDeadlySignal(int signo, void *info, void *context) {
// Probably a tag mismatch.
if (signo == SIGTRAP)
if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
return;
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
}
} // namespace __hwasan
// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
// rest of the mismatch handling code (C++).
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
size_t outsize) {
__hwasan::AccessInfo ai;
ai.is_store = access_info & 0x10;
ai.is_load = !ai.is_store;
ai.recover = access_info & 0x20;
ai.addr = addr;
if ((access_info & 0xf) == 0xf)
ai.size = outsize;
else
ai.size = 1 << (access_info & 0xf);
__hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
(uptr)__builtin_frame_address(0), nullptr,
registers_frame);
__builtin_unreachable();
}
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD

View File

@ -0,0 +1,50 @@
//===-- hwasan_malloc_bisect.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_hash.h"
#include "hwasan.h"
namespace __hwasan {
static u32 malloc_hash(StackTrace *stack, uptr orig_size) {
uptr len = Min(stack->size, (unsigned)7);
MurMur2HashBuilder H(len);
H.add(orig_size);
// Start with frame #1 to skip __sanitizer_malloc frame, which is
// (a) almost always the same (well, could be operator new or new[])
// (b) can change hashes when compiler-rt is rebuilt, invalidating previous
// bisection results.
// Because of ASLR, use only offset inside the page.
for (uptr i = 1; i < len; ++i) H.add(((u32)stack->trace[i]) & 0xFFF);
return H.get();
}
static inline bool malloc_bisect(StackTrace *stack, uptr orig_size) {
uptr left = flags()->malloc_bisect_left;
uptr right = flags()->malloc_bisect_right;
if (LIKELY(left == 0 && right == 0))
return true;
if (!stack)
return true;
// Allow malloc_bisect_right > (u32)(-1) to avoid spelling the latter in
// decimal.
uptr h = (uptr)malloc_hash(stack, orig_size);
if (h < left || h > right)
return false;
if (flags()->malloc_bisect_dump) {
Printf("[alloc] %u %zu\n", h, orig_size);
stack->Print();
}
return true;
}
} // namespace __hwasan

View File

@ -0,0 +1,66 @@
//===-- hwasan_mapping.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer and defines memory mapping.
///
//===----------------------------------------------------------------------===//
#ifndef HWASAN_MAPPING_H
#define HWASAN_MAPPING_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "hwasan_interface_internal.h"
// Typical mapping on Linux/x86_64:
// with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem ||
// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow ||
// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap ||
// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow ||
// || [0x000000000000, 0x770d59f3ffff] || LowMem ||
// Typical mapping on Android/AArch64
// with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
// || [0x007c77480000, 0x007fffffffff] || HighMem ||
// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow ||
// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap ||
// || [0x007477480000, 0x007bbebc7fff] || LowShadow ||
// || [0x000000000000, 0x00747747ffff] || LowMem ||
// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
constexpr uptr kShadowScale = 4;
constexpr uptr kShadowAlignment = 1ULL << kShadowScale;
namespace __hwasan {
extern uptr kLowMemStart;
extern uptr kLowMemEnd;
extern uptr kLowShadowEnd;
extern uptr kLowShadowStart;
extern uptr kHighShadowStart;
extern uptr kHighShadowEnd;
extern uptr kHighMemStart;
extern uptr kHighMemEnd;
inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address;
}
inline uptr ShadowToMem(uptr shadow_addr) {
return (shadow_addr - __hwasan_shadow_memory_dynamic_address) << kShadowScale;
}
inline uptr MemToShadowSize(uptr size) {
return size >> kShadowScale;
}
bool MemIsApp(uptr p);
} // namespace __hwasan
#endif // HWASAN_MAPPING_H

View File

@ -0,0 +1,44 @@
//===-- hwasan_memintrinsics.cpp --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer and contains HWASAN versions of
/// memset, memcpy and memmove
///
//===----------------------------------------------------------------------===//
#include <string.h>
#include "hwasan.h"
#include "hwasan_checks.h"
#include "hwasan_flags.h"
#include "hwasan_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
using namespace __hwasan;
void *__hwasan_memset(void *block, int c, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(block), size);
return memset(UntagPtr(block), c, size);
}
void *__hwasan_memcpy(void *to, const void *from, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
return memcpy(UntagPtr(to), UntagPtr(from), size);
}
void *__hwasan_memmove(void *to, const void *from, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
return memmove(UntagPtr(to), UntagPtr(from), size);
}

View File

@ -0,0 +1,81 @@
//===-- hwasan_new_delete.cpp ---------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Interceptors for operators new and delete.
//===----------------------------------------------------------------------===//
#include "hwasan.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include <stddef.h>
#include <stdlib.h>
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
void *res = hwasan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
if (ptr) hwasan_free(ptr, &stack)
#elif defined(__ANDROID__)
// We don't actually want to intercept operator new and delete on Android, but
// since we previously released a runtime that intercepted these functions,
// removing the interceptors would break ABI. Therefore we simply forward to
// malloc and free.
#define OPERATOR_NEW_BODY(nothrow) return malloc(size)
#define OPERATOR_DELETE_BODY free(ptr)
#endif
#ifdef OPERATOR_NEW_BODY
using namespace __hwasan;
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
} // namespace std
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY;
}
#endif // OPERATOR_NEW_BODY

View File

@ -0,0 +1,52 @@
//===-- hwasan_poisoning.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#include "hwasan_poisoning.h"
#include "hwasan_mapping.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_linux.h"
namespace __hwasan {
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
CHECK(IsAligned(p, kShadowAlignment));
CHECK(IsAligned(size, kShadowAlignment));
uptr shadow_start = MemToShadow(p);
uptr shadow_size = MemToShadowSize(size);
uptr page_size = GetPageSizeCached();
uptr page_start = RoundUpTo(shadow_start, page_size);
uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
uptr threshold = common_flags()->clear_shadow_mmap_threshold;
if (SANITIZER_LINUX &&
UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
internal_memset((void *)shadow_start, tag, page_start - shadow_start);
internal_memset((void *)page_end, tag,
shadow_start + shadow_size - page_end);
// For an anonymous private mapping MADV_DONTNEED will return a zero page on
// Linux.
ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
} else {
internal_memset((void *)shadow_start, tag, shadow_size);
}
return AddTagToPointer(p, tag);
}
uptr TagMemory(uptr p, uptr size, tag_t tag) {
uptr start = RoundDownTo(p, kShadowAlignment);
uptr end = RoundUpTo(p + size, kShadowAlignment);
return TagMemoryAligned(start, end - start, tag);
}
} // namespace __hwasan

View File

@ -0,0 +1,24 @@
//===-- hwasan_poisoning.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_POISONING_H
#define HWASAN_POISONING_H
#include "hwasan.h"
namespace __hwasan {
uptr TagMemory(uptr p, uptr size, tag_t tag);
uptr TagMemoryAligned(uptr p, uptr size, tag_t tag);
} // namespace __hwasan
#endif // HWASAN_POISONING_H

View File

@ -0,0 +1,651 @@
//===-- hwasan_report.cpp -------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Error reporting.
//===----------------------------------------------------------------------===//
#include "hwasan_report.h"
#include <dlfcn.h>
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_globals.h"
#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __sanitizer;
namespace __hwasan {
class ScopedReport {
public:
ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
BlockingMutexLock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
++hwasan_report_count;
}
~ScopedReport() {
{
BlockingMutexLock lock(&error_message_lock_);
if (fatal)
SetAbortMessage(error_message_.data());
error_message_ptr_ = nullptr;
}
if (common_flags()->print_module_map >= 2 ||
(fatal && common_flags()->print_module_map))
DumpProcessMap();
if (fatal)
Die();
}
static void MaybeAppendToErrorMessage(const char *msg) {
BlockingMutexLock lock(&error_message_lock_);
if (!error_message_ptr_)
return;
uptr len = internal_strlen(msg);
uptr old_size = error_message_ptr_->size();
error_message_ptr_->resize(old_size + len);
// overwrite old trailing '\0', keep new trailing '\0' untouched.
internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
}
private:
ScopedErrorReportLock error_report_lock_;
InternalMmapVector<char> error_message_;
bool fatal;
static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_;
};
InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_;
// If there is an active ScopedReport, append to its error message.
void AppendToErrorMessageBuffer(const char *buffer) {
ScopedReport::MaybeAppendToErrorMessage(buffer);
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
// A RAII object that holds a copy of the current thread stack ring buffer.
// The actual stack buffer may change while we are iterating over it (for
// example, Printf may call syslog() which can itself be built with hwasan).
class SavedStackAllocations {
public:
SavedStackAllocations(StackAllocationsRingBuffer *rb) {
uptr size = rb->size() * sizeof(uptr);
void *storage =
MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
new (&rb_) StackAllocationsRingBuffer(*rb, storage);
}
~SavedStackAllocations() {
StackAllocationsRingBuffer *rb = get();
UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
}
StackAllocationsRingBuffer *get() {
return (StackAllocationsRingBuffer *)&rb_;
}
private:
uptr rb_;
};
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
const char *Access() { return Blue(); }
const char *Allocation() const { return Magenta(); }
const char *Origin() const { return Magenta(); }
const char *Name() const { return Green(); }
const char *Location() { return Green(); }
const char *Thread() { return Green(); }
};
static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
HeapAllocationRecord *har, uptr *ring_index,
uptr *num_matching_addrs,
uptr *num_matching_addrs_4b) {
if (!rb) return false;
*num_matching_addrs = 0;
*num_matching_addrs_4b = 0;
for (uptr i = 0, size = rb->size(); i < size; i++) {
auto h = (*rb)[i];
if (h.tagged_addr <= tagged_addr &&
h.tagged_addr + h.requested_size > tagged_addr) {
*har = h;
*ring_index = i;
return true;
}
// Measure the number of heap ring buffer entries that would have matched
// if we had only one entry per address (e.g. if the ring buffer data was
// stored at the address itself). This will help us tune the allocator
// implementation for MTE.
if (UntagAddr(h.tagged_addr) <= UntagAddr(tagged_addr) &&
UntagAddr(h.tagged_addr) + h.requested_size > UntagAddr(tagged_addr)) {
++*num_matching_addrs;
}
// Measure the number of heap ring buffer entries that would have matched
// if we only had 4 tag bits, which is the case for MTE.
auto untag_4b = [](uptr p) {
return p & ((1ULL << 60) - 1);
};
if (untag_4b(h.tagged_addr) <= untag_4b(tagged_addr) &&
untag_4b(h.tagged_addr) + h.requested_size > untag_4b(tagged_addr)) {
++*num_matching_addrs_4b;
}
}
return false;
}
static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
tag_t addr_tag, uptr untagged_addr) {
uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
bool found_local = false;
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
uptr record = *record_addr;
if (!record)
break;
tag_t base_tag =
reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
uptr pc_mask = (1ULL << kRecordFPShift) - 1;
uptr pc = record & pc_mask;
FrameInfo frame;
if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
for (LocalInfo &local : frame.locals) {
if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
continue;
tag_t obj_tag = base_tag ^ local.tag_offset;
if (obj_tag != addr_tag)
continue;
// Calculate the offset from the object address to the faulting
// address. Because we only store bits 4-19 of FP (bits 0-3 are
// guaranteed to be zero), the calculation is performed mod 2^20 and may
// harmlessly underflow if the address mod 2^20 is below the object
// address.
uptr obj_offset =
(untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
if (obj_offset >= local.size)
continue;
if (!found_local) {
Printf("Potentially referenced stack objects:\n");
found_local = true;
}
Printf(" %s in %s %s:%d\n", local.name, local.function_name,
local.decl_file, local.decl_line);
}
frame.Clear();
}
}
if (found_local)
return;
// We didn't find any locals. Most likely we don't have symbols, so dump
// the information that we have for offline analysis.
InternalScopedString frame_desc(GetPageSizeCached() * 2);
Printf("Previously allocated frames:\n");
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
uptr record = *record_addr;
if (!record)
break;
uptr pc_mask = (1ULL << 48) - 1;
uptr pc = record & pc_mask;
frame_desc.append(" record_addr:0x%zx record:0x%zx",
reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
RenderFrame(&frame_desc, " %F %L\n", 0, frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
frame->ClearAll();
}
Printf("%s", frame_desc.data());
frame_desc.clear();
}
}
// Returns true if tag == *tag_ptr, reading tags from short granules if
// necessary. This may return a false positive if tags 1-15 are used as a
// regular tag rather than a short granule marker.
static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
if (tag == *tag_ptr)
return true;
if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
return false;
uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
return tag == inline_tag;
}
// HWASan globals store the size of the global in the descriptor. In cases where
// we don't have a binary with symbols, we can't grab the size of the global
// from the debug info - but we might be able to retrieve it from the
// descriptor. Returns zero if the lookup failed.
static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
// Find the ELF object that this global resides in.
Dl_info info;
dladdr(reinterpret_cast<void *>(ptr), &info);
auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
// Get the load bias. This is normally the same as the dli_fbase address on
// position-independent code, but can be different on non-PIE executables,
// binaries using LLD's partitioning feature, or binaries compiled with a
// linker script.
ElfW(Addr) load_bias = 0;
for (const auto &phdr :
ArrayRef<const ElfW(Phdr)>(phdr_begin, phdr_begin + ehdr->e_phnum)) {
if (phdr.p_type != PT_LOAD || phdr.p_offset != 0)
continue;
load_bias = reinterpret_cast<ElfW(Addr)>(ehdr) - phdr.p_vaddr;
break;
}
// Walk all globals in this ELF object, looking for the one we're interested
// in. Once we find it, we can stop iterating and return the size of the
// global we're interested in.
for (const hwasan_global &global :
HwasanGlobalsFor(load_bias, phdr_begin, ehdr->e_phnum))
if (global.addr() <= ptr && ptr < global.addr() + global.size())
return global.size();
return 0;
}
void PrintAddressDescription(
uptr tagged_addr, uptr access_size,
StackAllocationsRingBuffer *current_stack_allocations) {
Decorator d;
int num_descriptions_printed = 0;
uptr untagged_addr = UntagAddr(tagged_addr);
// Print some very basic information about the address, if it's a heap.
HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
if (uptr beg = chunk.Beg()) {
uptr size = chunk.ActualSize();
Printf("%s[%p,%p) is a %s %s heap chunk; "
"size: %zd offset: %zd\n%s",
d.Location(),
beg, beg + size,
chunk.FromSmallHeap() ? "small" : "large",
chunk.IsAllocated() ? "allocated" : "unallocated",
size, untagged_addr - beg,
d.Default());
}
// Check if this looks like a heap buffer overflow by scanning
// the shadow left and right and looking for the first adjacent
// object with a different memory tag. If that tag matches addr_tag,
// check the allocator if it has a live chunk there.
tag_t addr_tag = GetTagFromPointer(tagged_addr);
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
for (int i = 0; i < 1000; i++) {
if (TagsEqual(addr_tag, left)) {
candidate = left;
break;
}
--left;
if (TagsEqual(addr_tag, right)) {
candidate = right;
break;
}
++right;
}
if (candidate) {
uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
HwasanChunkView chunk = FindHeapChunkByAddress(mem);
if (chunk.IsAllocated()) {
Printf("%s", d.Location());
Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
untagged_addr,
candidate == left ? untagged_addr - chunk.End()
: chunk.Beg() - untagged_addr,
candidate == left ? "right" : "left", chunk.UsedSize(),
chunk.Beg(), chunk.End());
Printf("%s", d.Allocation());
Printf("allocated here:\n");
Printf("%s", d.Default());
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
num_descriptions_printed++;
} else {
// Check whether the address points into a loaded library. If so, this is
// most likely a global variable.
const char *module_name;
uptr module_address;
Symbolizer *sym = Symbolizer::GetOrInit();
if (sym->GetModuleNameAndOffsetForPC(mem, &module_name,
&module_address)) {
DataInfo info;
if (sym->SymbolizeData(mem, &info) && info.start) {
Printf(
"%p is located %zd bytes to the %s of %zd-byte global variable "
"%s [%p,%p) in %s\n",
untagged_addr,
candidate == left ? untagged_addr - (info.start + info.size)
: info.start - untagged_addr,
candidate == left ? "right" : "left", info.size, info.name,
info.start, info.start + info.size, module_name);
} else {
uptr size = GetGlobalSizeFromDescriptor(mem);
if (size == 0)
// We couldn't find the size of the global from the descriptors.
Printf(
"%p is located to the %s of a global variable in (%s+0x%x)\n",
untagged_addr, candidate == left ? "right" : "left",
module_name, module_address);
else
Printf(
"%p is located to the %s of a %zd-byte global variable in "
"(%s+0x%x)\n",
untagged_addr, candidate == left ? "right" : "left", size,
module_name, module_address);
}
num_descriptions_printed++;
}
}
}
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
// Scan all threads' ring buffers to find if it's a heap-use-after-free.
HeapAllocationRecord har;
uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
&ring_index, &num_matching_addrs,
&num_matching_addrs_4b)) {
Printf("%s", d.Location());
Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
har.requested_size, UntagAddr(har.tagged_addr),
UntagAddr(har.tagged_addr) + har.requested_size);
Printf("%s", d.Allocation());
Printf("freed by thread T%zd here:\n", t->unique_id());
Printf("%s", d.Default());
GetStackTraceFromId(har.free_context_id).Print();
Printf("%s", d.Allocation());
Printf("previously allocated here:\n", t);
Printf("%s", d.Default());
GetStackTraceFromId(har.alloc_context_id).Print();
// Print a developer note: the index of this heap object
// in the thread's deallocation ring buffer.
Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
flags()->heap_history_size);
Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
num_matching_addrs_4b);
t->Announce();
num_descriptions_printed++;
}
// Very basic check for stack memory.
if (t->AddrIsInStack(untagged_addr)) {
Printf("%s", d.Location());
Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
t->unique_id());
Printf("%s", d.Default());
t->Announce();
auto *sa = (t == GetCurrentThread() && current_stack_allocations)
? current_stack_allocations
: t->stack_allocations();
PrintStackAllocations(sa, addr_tag, untagged_addr);
num_descriptions_printed++;
}
});
// Print the remaining threads, as an extra information, 1 line per thread.
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
Printf("HWAddressSanitizer can not describe address in more detail.\n");
}
void ReportStats() {}
static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
void (*print_tag)(InternalScopedString &s,
tag_t *tag)) {
const uptr row_len = 16; // better be power of two.
tag_t *center_row_beg = reinterpret_cast<tag_t *>(
RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
InternalScopedString s(GetPageSizeCached() * 8);
for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " ");
s.append("%p:", row);
for (uptr i = 0; i < row_len; i++) {
s.append("%s", row + i == tag_ptr ? "[" : " ");
print_tag(s, &row[i]);
s.append("%s", row + i == tag_ptr ? "]" : " ");
}
s.append("\n");
}
Printf("%s", s.data());
}
static void PrintTagsAroundAddr(tag_t *tag_ptr) {
Printf(
"Memory tags around the buggy address (one tag corresponds to %zd "
"bytes):\n", kShadowAlignment);
PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
s.append("%02x", *tag);
});
Printf(
"Tags for short granules around the buggy address (one tag corresponds "
"to %zd bytes):\n",
kShadowAlignment);
PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
if (*tag >= 1 && *tag <= kShadowAlignment) {
uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
s.append("%02x",
*reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
} else {
s.append("..");
}
});
Printf(
"See "
"https://clang.llvm.org/docs/"
"HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
"description of short granule tags\n");
}
void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
ScopedReport R(flags()->halt_on_error);
uptr untagged_addr = UntagAddr(tagged_addr);
tag_t ptr_tag = GetTagFromPointer(tagged_addr);
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
tag_t mem_tag = *tag_ptr;
Decorator d;
Printf("%s", d.Error());
uptr pc = stack->size ? stack->trace[0] : 0;
const char *bug_type = "invalid-free";
Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
untagged_addr, pc);
Printf("%s", d.Access());
Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
Printf("%s", d.Default());
stack->Print();
PrintAddressDescription(tagged_addr, 0, nullptr);
PrintTagsAroundAddr(tag_ptr);
ReportErrorSummary(bug_type, stack);
}
void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
const u8 *expected) {
uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
ScopedReport R(flags()->halt_on_error);
Decorator d;
uptr untagged_addr = UntagAddr(tagged_addr);
Printf("%s", d.Error());
const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
Printf("\n%s", d.Default());
stack->Print();
HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
if (chunk.Beg()) {
Printf("%s", d.Allocation());
Printf("allocated here:\n");
Printf("%s", d.Default());
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
}
InternalScopedString s(GetPageSizeCached() * 8);
CHECK_GT(tail_size, 0U);
CHECK_LT(tail_size, kShadowAlignment);
u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
s.append("Tail contains: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
s.append(".. ");
for (uptr i = 0; i < tail_size; i++)
s.append("%02x ", tail[i]);
s.append("\n");
s.append("Expected: ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
s.append(".. ");
for (uptr i = 0; i < tail_size; i++)
s.append("%02x ", expected[i]);
s.append("\n");
s.append(" ");
for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
s.append(" ");
for (uptr i = 0; i < tail_size; i++)
s.append("%s ", expected[i] != tail[i] ? "^^" : " ");
s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
"to the right of a heap object, but within the %zd-byte granule, e.g.\n"
" char *x = new char[20];\n"
" x[25] = 42;\n"
"%s does not detect such bugs in uninstrumented code at the time of write,"
"\nbut can detect them at the time of free/delete.\n"
"To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
kShadowAlignment, SanitizerToolName);
Printf("%s", s.data());
GetCurrentThread()->Announce();
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
PrintTagsAroundAddr(tag_ptr);
ReportErrorSummary(bug_type, stack);
}
void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
bool is_store, bool fatal, uptr *registers_frame) {
ScopedReport R(fatal);
SavedStackAllocations current_stack_allocations(
GetCurrentThread()->stack_allocations());
Decorator d;
Printf("%s", d.Error());
uptr untagged_addr = UntagAddr(tagged_addr);
// TODO: when possible, try to print heap-use-after-free, etc.
const char *bug_type = "tag-mismatch";
uptr pc = stack->size ? stack->trace[0] : 0;
Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
untagged_addr, pc);
Thread *t = GetCurrentThread();
sptr offset =
__hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
tag_t ptr_tag = GetTagFromPointer(tagged_addr);
tag_t *tag_ptr =
reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
tag_t mem_tag = *tag_ptr;
Printf("%s", d.Access());
Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
mem_tag, t->unique_id());
if (offset != 0)
Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
Printf("%s", d.Default());
stack->Print();
PrintAddressDescription(tagged_addr, access_size,
current_stack_allocations.get());
t->Announce();
PrintTagsAroundAddr(tag_ptr);
if (registers_frame)
ReportRegisters(registers_frame, pc);
ReportErrorSummary(bug_type, stack);
}
// See the frame breakdown defined in __hwasan_tag_mismatch (from
// hwasan_tag_mismatch_aarch64.S).
void ReportRegisters(uptr *frame, uptr pc) {
Printf("Registers where the failure occurred (pc %p):\n", pc);
// We explicitly print a single line (4 registers/line) each iteration to
// reduce the amount of logcat error messages printed. Each Printf() will
// result in a new logcat line, irrespective of whether a newline is present,
// and so we wish to reduce the number of Printf() calls we have to make.
Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
frame[0], frame[1], frame[2], frame[3]);
Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
frame[4], frame[5], frame[6], frame[7]);
Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
frame[8], frame[9], frame[10], frame[11]);
Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
frame[12], frame[13], frame[14], frame[15]);
Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
frame[16], frame[17], frame[18], frame[19]);
Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
frame[20], frame[21], frame[22], frame[23]);
Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
frame[24], frame[25], frame[26], frame[27]);
Printf(" x28 %016llx x29 %016llx x30 %016llx\n",
frame[28], frame[29], frame[30]);
}
} // namespace __hwasan

View File

@ -0,0 +1,35 @@
//===-- hwasan_report.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file is a part of HWAddressSanitizer. HWASan-private header for error
/// reporting functions.
///
//===----------------------------------------------------------------------===//
#ifndef HWASAN_REPORT_H
#define HWASAN_REPORT_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __hwasan {
void ReportStats();
void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
bool is_store, bool fatal, uptr *registers_frame);
void ReportInvalidFree(StackTrace *stack, uptr addr);
void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
const u8 *expected);
void ReportRegisters(uptr *registers_frame, uptr pc);
void ReportAtExitStatistics();
} // namespace __hwasan
#endif // HWASAN_REPORT_H

View File

@ -0,0 +1,100 @@
//===-- hwasan_setjmp.S --------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// HWAddressSanitizer runtime.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_asm.h"
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
#include "sanitizer_common/sanitizer_platform.h"
// We want to save the context of the calling function.
// That requires
// 1) No modification of the link register by this function.
// 2) No modification of the stack pointer by this function.
// 3) (no modification of any other saved register, but that's not really going
// to occur, and hence isn't as much of a worry).
//
// There's essentially no way to ensure that the compiler will not modify the
// stack pointer when compiling a C function.
// Hence we have to write this function in assembly.
.section .text
.file "hwasan_setjmp.S"
.global __interceptor_setjmp
ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp:
CFI_STARTPROC
mov x1, #0
b __interceptor_sigsetjmp
CFI_ENDPROC
ASM_SIZE(__interceptor_setjmp)
#if SANITIZER_ANDROID
// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
// current signal.
.global __interceptor_setjmp_bionic
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
__interceptor_setjmp_bionic:
CFI_STARTPROC
mov x1, #1
b __interceptor_sigsetjmp
CFI_ENDPROC
ASM_SIZE(__interceptor_setjmp_bionic)
#endif
.global __interceptor_sigsetjmp
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
__interceptor_sigsetjmp:
CFI_STARTPROC
stp x19, x20, [x0, #0<<3]
stp x21, x22, [x0, #2<<3]
stp x23, x24, [x0, #4<<3]
stp x25, x26, [x0, #6<<3]
stp x27, x28, [x0, #8<<3]
stp x29, x30, [x0, #10<<3]
stp d8, d9, [x0, #14<<3]
stp d10, d11, [x0, #16<<3]
stp d12, d13, [x0, #18<<3]
stp d14, d15, [x0, #20<<3]
mov x2, sp
str x2, [x0, #13<<3]
// We always have the second argument to __sigjmp_save (savemask) set, since
// the _setjmp function above has set it for us as `false`.
// This function is defined in hwasan_interceptors.cc
b __sigjmp_save
CFI_ENDPROC
ASM_SIZE(__interceptor_sigsetjmp)
.macro ALIAS first second
.globl \second
.equ \second\(), \first
.endm
#if SANITIZER_ANDROID
ALIAS __interceptor_sigsetjmp, sigsetjmp
.weak sigsetjmp
ALIAS __interceptor_setjmp_bionic, setjmp
.weak setjmp
#else
ALIAS __interceptor_sigsetjmp, __sigsetjmp
.weak __sigsetjmp
#endif
ALIAS __interceptor_setjmp, _setjmp
.weak _setjmp
#endif
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE

View File

@ -0,0 +1,152 @@
#include "sanitizer_common/sanitizer_asm.h"
// The content of this file is AArch64-only:
#if defined(__aarch64__)
// The responsibility of the HWASan entry point in compiler-rt is to primarily
// readjust the stack from the callee and save the current register values to
// the stack.
// This entry point function should be called from a __hwasan_check_* symbol.
// These are generated during a lowering pass in the backend, and are found in
// AArch64AsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
// further information.
// The __hwasan_check_* caller of this function should have expanded the stack
// and saved the previous values of x0, x1, x29, and x30. This function will
// "consume" these saved values and treats it as part of its own stack frame.
// In this sense, the __hwasan_check_* callee and this function "share" a stack
// frame. This allows us to omit having unwinding information (.cfi_*) present
// in every __hwasan_check_* function, therefore reducing binary size. This is
// particularly important as hwasan_check_* instances are duplicated in every
// translation unit where HWASan is enabled.
// This function calls HwasanTagMismatch to step back into the C++ code that
// completes the stack unwinding and error printing. This function is is not
// permitted to return.
// Frame from __hwasan_check_:
// | ... |
// | ... |
// | Previous stack frames... |
// +=================================+
// | Unused 8-bytes for maintaining |
// | 16-byte SP alignment. |
// +---------------------------------+
// | Return address (x30) for caller |
// | of __hwasan_check_*. |
// +---------------------------------+
// | Frame address (x29) for caller |
// | of __hwasan_check_* |
// +---------------------------------+ <-- [SP + 232]
// | ... |
// | |
// | Stack frame space for x2 - x28. |
// | |
// | ... |
// +---------------------------------+ <-- [SP + 16]
// | |
// | Saved x1, as __hwasan_check_* |
// | clobbers it. |
// +---------------------------------+
// | Saved x0, likewise above. |
// +---------------------------------+ <-- [x30 / SP]
// This function takes two arguments:
// * x0: The data address.
// * x1: The encoded access info for the failing access.
// This function has two entry points. The first, __hwasan_tag_mismatch, is used
// by clients that were compiled without short tag checks (i.e. binaries built
// by older compilers and binaries targeting older runtimes). In this case the
// outlined tag check will be missing the code handling short tags (which won't
// be used in the binary's own stack variables but may be used on the heap
// or stack variables in other binaries), so the check needs to be done here.
//
// The second, __hwasan_tag_mismatch_v2, is used by binaries targeting newer
// runtimes. This entry point bypasses the short tag check since it will have
// already been done as part of the outlined tag check. Since tag mismatches are
// uncommon, there isn't a significant performance benefit to being able to
// bypass the check; the main benefits are that we can sometimes avoid
// clobbering the x17 register in error reports, and that the program will have
// a runtime dependency on the __hwasan_tag_mismatch_v2 symbol therefore it will
// fail to start up given an older (i.e. incompatible) runtime.
.section .text
.file "hwasan_tag_mismatch_aarch64.S"
.global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function
__hwasan_tag_mismatch:
// Compute the granule position one past the end of the access.
mov x16, #1
and x17, x1, #0xf
lsl x16, x16, x17
and x17, x0, #0xf
add x17, x16, x17
// Load the shadow byte again and check whether it is a short tag within the
// range of the granule position computed above.
ubfx x16, x0, #4, #52
ldrb w16, [x9, x16]
cmp w16, #0xf
b.hi __hwasan_tag_mismatch_v2
cmp w16, w17
b.lo __hwasan_tag_mismatch_v2
// Load the real tag from the last byte of the granule and compare against
// the pointer tag.
orr x16, x0, #0xf
ldrb w16, [x16]
cmp x16, x0, lsr #56
b.ne __hwasan_tag_mismatch_v2
// Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
// call and resume execution.
ldp x0, x1, [sp], #256
ret
.global __hwasan_tag_mismatch_v2
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
CFI_STARTPROC
// Set the CFA to be the return address for caller of __hwasan_check_*. Note
// that we do not emit CFI predicates to describe the contents of this stack
// frame, as this proxy entry point should never be debugged. The contents
// are static and are handled by the unwinder after calling
// __hwasan_tag_mismatch. The frame pointer is already correctly setup
// by __hwasan_check_*.
add x29, sp, #232
CFI_DEF_CFA(w29, 24)
CFI_OFFSET(w30, -16)
CFI_OFFSET(w29, -24)
// Save the rest of the registers into the preallocated space left by
// __hwasan_check.
str x28, [sp, #224]
stp x26, x27, [sp, #208]
stp x24, x25, [sp, #192]
stp x22, x23, [sp, #176]
stp x20, x21, [sp, #160]
stp x18, x19, [sp, #144]
stp x16, x17, [sp, #128]
stp x14, x15, [sp, #112]
stp x12, x13, [sp, #96]
stp x10, x11, [sp, #80]
stp x8, x9, [sp, #64]
stp x6, x7, [sp, #48]
stp x4, x5, [sp, #32]
stp x2, x3, [sp, #16]
// Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
// extract the saved registers from this frame without having to worry about
// finding this frame.
mov x2, sp
bl __hwasan_tag_mismatch4
CFI_ENDPROC
.Lfunc_end0:
.size __hwasan_tag_mismatch, .Lfunc_end0-__hwasan_tag_mismatch
#endif // defined(__aarch64__)
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE

View File

@ -0,0 +1,133 @@
#include "hwasan.h"
#include "hwasan_mapping.h"
#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
#include "hwasan_interface_internal.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __hwasan {
static u32 RandomSeed() {
u32 seed;
do {
if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
/*blocking=*/false))) {
seed = static_cast<u32>(
(NanoTime() >> 12) ^
(reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
}
} while (!seed);
return seed;
}
void Thread::InitRandomState() {
random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
// Push a random number of zeros onto the ring buffer so that the first stack
// tag base will be random.
for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
stack_allocations_->push(0);
}
void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
static u64 unique_id;
unique_id_ = unique_id++;
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
HwasanTSDThreadInit(); // Only needed with interceptors.
uptr *ThreadLong = GetCurrentThreadLongPtr();
// The following implicitly sets (this) as the current thread.
stack_allocations_ = new (ThreadLong)
StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
// Check that it worked.
CHECK_EQ(GetCurrentThread(), this);
// ScopedTaggingDisable needs GetCurrentThread to be set up.
ScopedTaggingDisabler disabler;
uptr tls_size;
uptr stack_size;
GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
&tls_size);
stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
if (stack_bottom_) {
int local;
CHECK(AddrIsInStack((uptr)&local));
CHECK(MemIsApp(stack_bottom_));
CHECK(MemIsApp(stack_top_ - 1));
}
if (flags()->verbose_threads) {
if (IsMainThread()) {
Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
sizeof(Thread), heap_allocations_->SizeInBytes(),
stack_allocations_->size() * sizeof(uptr));
}
Print("Creating : ");
}
}
void Thread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_)
TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
}
void Thread::Destroy() {
if (flags()->verbose_threads)
Print("Destroying: ");
AllocatorSwallowThreadLocalCache(allocator_cache());
ClearShadowForThreadStackAndTLS();
if (heap_allocations_)
heap_allocations_->Delete();
DTLS_Destroy();
// Unregister this as the current thread.
// Instrumented code can not run on this thread from this point onwards, but
// malloc/free can still be served. Glibc may call free() very late, after all
// TSD destructors are done.
CHECK_EQ(GetCurrentThread(), this);
*GetCurrentThreadLongPtr() = 0;
}
void Thread::Print(const char *Prefix) {
Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
unique_id_, this, stack_bottom(), stack_top(),
stack_top() - stack_bottom(),
tls_begin(), tls_end());
}
static u32 xorshift(u32 state) {
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
return state;
}
// Generate a (pseudo-)random non-zero tag.
tag_t Thread::GenerateRandomTag() {
if (tagging_disabled_) return 0;
tag_t tag;
do {
if (flags()->random_tags) {
if (!random_buffer_)
random_buffer_ = random_state_ = xorshift(random_state_);
CHECK(random_buffer_);
tag = random_buffer_ & 0xFF;
random_buffer_ >>= 8;
} else {
tag = random_state_ = (random_state_ + 1) & 0xFF;
}
} while (!tag);
return tag;
}
} // namespace __hwasan

View File

@ -0,0 +1,98 @@
//===-- hwasan_thread.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef HWASAN_THREAD_H
#define HWASAN_THREAD_H
#include "hwasan_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
namespace __hwasan {
typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
class Thread {
public:
void Init(uptr stack_buffer_start, uptr stack_buffer_size); // Must be called from the thread itself.
void InitRandomState();
void Destroy();
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
uptr stack_size() { return stack_top() - stack_bottom(); }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
bool IsMainThread() { return unique_id_ == 0; }
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
AllocatorCache *allocator_cache() { return &allocator_cache_; }
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
tag_t GenerateRandomTag();
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
u64 unique_id() const { return unique_id_; }
void Announce() {
if (announced_) return;
announced_ = true;
Print("Thread: ");
}
uptr &vfork_spill() { return vfork_spill_; }
private:
// NOTE: There is no Thread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
void ClearShadowForThreadStackAndTLS();
void Print(const char *prefix);
uptr vfork_spill_;
uptr stack_top_;
uptr stack_bottom_;
uptr tls_begin_;
uptr tls_end_;
u32 random_state_;
u32 random_buffer_;
AllocatorCache allocator_cache_;
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
Thread *next_; // All live threads form a linked list.
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
bool announced_;
friend struct ThreadListHead;
};
Thread *GetCurrentThread();
uptr *GetCurrentThreadLongPtr();
struct ScopedTaggingDisabler {
ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
};
} // namespace __hwasan
#endif // HWASAN_THREAD_H

View File

@ -0,0 +1,15 @@
#include "hwasan_thread_list.h"
namespace __hwasan {
static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
static HwasanThreadList *hwasan_thread_list;
HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
void InitThreadList(uptr storage, uptr size) {
CHECK(hwasan_thread_list == nullptr);
hwasan_thread_list =
new (thread_list_placeholder) HwasanThreadList(storage, size);
}
} // namespace

View File

@ -0,0 +1,215 @@
//===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
// HwasanThreadList is a registry for live threads, as well as an allocator for
// HwasanThread objects and their stack history ring buffers. There are
// constraints on memory layout of the shadow region and CompactRingBuffer that
// are part of the ABI contract between compiler-rt and llvm.
//
// * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
// * All stack ring buffers are located within (2**kShadowBaseAlignment)
// sized region below and adjacent to the shadow region.
// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
// aligned to twice its size. The value of N can be different for each buffer.
//
// These constrains guarantee that, given an address A of any element of the
// ring buffer,
// A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
// is the address of the next element of that ring buffer (with wrap-around).
// And, with K = kShadowBaseAlignment,
// S = (A | ((1 << K) - 1)) + 1
// (align up to kShadowBaseAlignment) is the start of the shadow region.
//
// These calculations are used in compiler instrumentation to update the ring
// buffer and obtain the base address of shadow using only two inputs: address
// of the current element of the ring buffer, and N (i.e. size of the ring
// buffer). Since the value of N is very limited, we pack both inputs into a
// single thread-local word as
// (1 << (N + 56)) | A
// See the implementation of class CompactRingBuffer, which is what is stored in
// said thread-local word.
//
// Note the unusual way of aligning up the address of the shadow:
// (A | ((1 << K) - 1)) + 1
// It is only correct if A is not already equal to the shadow base address, but
// it saves 2 instructions on AArch64.
#include "hwasan.h"
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
#include "sanitizer_common/sanitizer_placement_new.h"
namespace __hwasan {
static uptr RingBufferSize() {
uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
// FIXME: increase the limit to 8 once this bug is fixed:
// https://bugs.llvm.org/show_bug.cgi?id=39030
for (int shift = 1; shift < 7; ++shift) {
uptr size = 4096 * (1ULL << shift);
if (size >= desired_bytes)
return size;
}
Printf("stack history size too large: %d\n", flags()->stack_history_size);
CHECK(0);
return 0;
}
struct ThreadListHead {
Thread *list_;
ThreadListHead() : list_(nullptr) {}
void Push(Thread *t) {
t->next_ = list_;
list_ = t;
}
Thread *Pop() {
Thread *t = list_;
if (t)
list_ = t->next_;
return t;
}
void Remove(Thread *t) {
Thread **cur = &list_;
while (*cur != t) cur = &(*cur)->next_;
CHECK(*cur && "thread not found");
*cur = (*cur)->next_;
}
template <class CB>
void ForEach(CB cb) {
Thread *t = list_;
while (t) {
cb(t);
t = t->next_;
}
}
};
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
};
class HwasanThreadList {
public:
HwasanThreadList(uptr storage, uptr size)
: free_space_(storage), free_space_end_(storage + size) {
// [storage, storage + size) is used as a vector of
// thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
// Each element contains
// * a ring buffer at offset 0,
// * a Thread object at offset ring_buffer_size_.
ring_buffer_size_ = RingBufferSize();
thread_alloc_size_ =
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
}
Thread *CreateCurrentThread() {
Thread *t;
{
SpinMutexLock l(&list_mutex_);
t = free_list_.Pop();
if (t) {
uptr start = (uptr)t - ring_buffer_size_;
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
}
live_list_.Push(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
return t;
}
void DontNeedThread(Thread *t) {
uptr start = (uptr)t - ring_buffer_size_;
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
SpinMutexLock l(&list_mutex_);
live_list_.Remove(t);
free_list_.Push(t);
DontNeedThread(t);
}
Thread *GetThreadByBufferAddress(uptr p) {
return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
ring_buffer_size_);
}
uptr MemoryUsedPerThread() {
uptr res = sizeof(Thread) + ring_buffer_size_;
if (auto sz = flags()->heap_history_size)
res += HeapAllocationsRingBuffer::SizeInBytes(sz);
return res;
}
template <class CB>
void VisitAllLiveThreads(CB cb) {
SpinMutexLock l(&list_mutex_);
live_list_.ForEach(cb);
}
void AddThreadStats(Thread *t) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads++;
stats_.total_stack_size += t->stack_size();
}
void RemoveThreadStats(Thread *t) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads--;
stats_.total_stack_size -= t->stack_size();
}
ThreadStats GetThreadStats() {
SpinMutexLock l(&stats_mutex_);
return stats_;
}
private:
Thread *AllocThread() {
uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
free_space_ += thread_alloc_size_;
CHECK(free_space_ <= free_space_end_ && "out of thread memory");
return t;
}
uptr free_space_;
uptr free_space_end_;
uptr ring_buffer_size_;
uptr thread_alloc_size_;
ThreadListHead free_list_;
ThreadListHead live_list_;
SpinMutex list_mutex_;
ThreadStats stats_;
SpinMutex stats_mutex_;
};
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
} // namespace

View File

@ -0,0 +1,25 @@
//===-- hwasan_type_test.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Compile-time tests of the internal type definitions.
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "hwasan.h"
#include <setjmp.h>
#define CHECK_TYPE_SIZE_FITS(TYPE) \
COMPILER_CHECK(sizeof(__hw_##TYPE) <= sizeof(TYPE))
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
CHECK_TYPE_SIZE_FITS(jmp_buf);
CHECK_TYPE_SIZE_FITS(sigjmp_buf);
#endif

View File

@ -71,6 +71,7 @@ merge lib/tsan/rtl tsan
merge lib/sanitizer_common sanitizer_common
merge lib/interception interception
merge lib/ubsan ubsan
merge lib/hwasan hwasan
# Need to merge lib/builtins/assembly.h file:
mkdir -p builtins