All source files: Merge from upstream 285547.
libsanitizer/ * All source files: Merge from upstream 285547. * configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New variable. * configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it. * asan/Makefile.am (asan_files): Add new files. * asan/Makefile.in: Regenerate. * ubsan/Makefile.in: Likewise. * lsan/Makefile.in: Likewise. * tsan/Makefile.am (tsan_files): Add new files. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. (EXTRA_libsanitizer_common_la_SOURCES): Define. (libsanitizer_common_la_LIBADD): Likewise. (libsanitizer_common_la_DEPENDENCIES): Likewise. * sanitizer_common/Makefile.in: Regenerate. * interception/Makefile.in: Likewise. * libbacktace/Makefile.in: Likewise. * Makefile.in: Likewise. * configure: Likewise. * merge.sh: Handle builtins/assembly.h merging. * builtins/assembly.h: New file. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r241977
This commit is contained in:
parent
f31d9224e6
commit
1018981977
@ -1,3 +1,28 @@
|
||||
2016-11-09 Maxim Ostapenko <m.ostapenko@samsung.com>
|
||||
|
||||
* All source files: Merge from upstream 285547.
|
||||
* configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New
|
||||
variable.
|
||||
* configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it.
|
||||
* asan/Makefile.am (asan_files): Add new files.
|
||||
* asan/Makefile.in: Regenerate.
|
||||
* ubsan/Makefile.in: Likewise.
|
||||
* lsan/Makefile.in: Likewise.
|
||||
* tsan/Makefile.am (tsan_files): Add new files.
|
||||
* tsan/Makefile.in: Regenerate.
|
||||
* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
|
||||
(EXTRA_libsanitizer_common_la_SOURCES): Define.
|
||||
(libsanitizer_common_la_LIBADD): Likewise.
|
||||
(libsanitizer_common_la_DEPENDENCIES): Likewise.
|
||||
* sanitizer_common/Makefile.in: Regenerate.
|
||||
* interception/Makefile.in: Likewise.
|
||||
* libbacktace/Makefile.in: Likewise.
|
||||
* Makefile.in: Likewise.
|
||||
* configure: Likewise.
|
||||
* merge.sh: Handle builtins/assembly.h merging.
|
||||
* builtins/assembly.h: New file.
|
||||
* asan/libtool-version: Bump the libasan SONAME.
|
||||
|
||||
2016-09-21 Jakub Jelinek <jakub@redhat.com>
|
||||
|
||||
PR sanitizer/77567
|
||||
|
@ -1,4 +1,4 @@
|
||||
253555
|
||||
285547
|
||||
|
||||
The first line of this file holds the svn revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
|
@ -19,6 +19,8 @@ asan_files = \
|
||||
asan_activation.cc \
|
||||
asan_allocator.cc \
|
||||
asan_debugging.cc \
|
||||
asan_descriptions.cc \
|
||||
asan_errors.cc \
|
||||
asan_fake_stack.cc \
|
||||
asan_flags.cc \
|
||||
asan_globals.cc \
|
||||
@ -28,6 +30,7 @@ asan_files = \
|
||||
asan_malloc_linux.cc \
|
||||
asan_malloc_mac.cc \
|
||||
asan_malloc_win.cc \
|
||||
asan_memory_profile.cc \
|
||||
asan_new_delete.cc \
|
||||
asan_poisoning.cc \
|
||||
asan_posix.cc \
|
||||
|
@ -112,9 +112,10 @@ libasan_la_DEPENDENCIES = \
|
||||
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
|
||||
$(am__append_3) $(am__DEPENDENCIES_1)
|
||||
am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
|
||||
asan_fake_stack.lo asan_flags.lo asan_globals.lo \
|
||||
asan_interceptors.lo asan_linux.lo asan_mac.lo \
|
||||
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
|
||||
asan_descriptions.lo asan_errors.lo asan_fake_stack.lo \
|
||||
asan_flags.lo asan_globals.lo asan_interceptors.lo \
|
||||
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
|
||||
asan_malloc_mac.lo asan_malloc_win.lo asan_memory_profile.lo \
|
||||
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
|
||||
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
|
||||
asan_suppressions.lo asan_thread.lo asan_win.lo \
|
||||
@ -219,6 +220,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
@ -308,6 +310,8 @@ asan_files = \
|
||||
asan_activation.cc \
|
||||
asan_allocator.cc \
|
||||
asan_debugging.cc \
|
||||
asan_descriptions.cc \
|
||||
asan_errors.cc \
|
||||
asan_fake_stack.cc \
|
||||
asan_flags.cc \
|
||||
asan_globals.cc \
|
||||
@ -317,6 +321,7 @@ asan_files = \
|
||||
asan_malloc_linux.cc \
|
||||
asan_malloc_mac.cc \
|
||||
asan_malloc_win.cc \
|
||||
asan_memory_profile.cc \
|
||||
asan_new_delete.cc \
|
||||
asan_poisoning.cc \
|
||||
asan_posix.cc \
|
||||
@ -454,6 +459,8 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_descriptions.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_errors.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
|
||||
@ -463,6 +470,7 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_linux.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_mac.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_win.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_memory_profile.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_new_delete.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_poisoning.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
|
||||
|
@ -45,6 +45,7 @@ static struct AsanDeactivatedFlags {
|
||||
FlagParser parser;
|
||||
RegisterActivationFlags(&parser, &f, &cf);
|
||||
|
||||
cf.SetDefaults();
|
||||
// Copy the current activation flags.
|
||||
allocator_options.CopyTo(&f, &cf);
|
||||
cf.malloc_context_size = malloc_context_size;
|
||||
@ -59,12 +60,7 @@ static struct AsanDeactivatedFlags {
|
||||
parser.ParseString(env);
|
||||
}
|
||||
|
||||
// Override from getprop asan.options.
|
||||
char buf[100];
|
||||
GetExtraActivationFlags(buf, sizeof(buf));
|
||||
parser.ParseString(buf);
|
||||
|
||||
SetVerbosity(cf.verbosity);
|
||||
InitializeCommonFlags(&cf);
|
||||
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
|
@ -221,7 +221,7 @@ void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
|
||||
|
||||
struct Allocator {
|
||||
static const uptr kMaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
|
||||
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
|
||||
static const uptr kMaxThreadLocalQuarantine =
|
||||
FIRST_32_SECOND_64(1 << 18, 1 << 20);
|
||||
|
||||
@ -264,9 +264,43 @@ struct Allocator {
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
void RePoisonChunk(uptr chunk) {
|
||||
// This could a user-facing chunk (with redzones), or some internal
|
||||
// housekeeping chunk, like TransferBatch. Start by assuming the former.
|
||||
AsanChunk *ac = GetAsanChunk((void *)chunk);
|
||||
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
|
||||
uptr beg = ac->Beg();
|
||||
uptr end = ac->Beg() + ac->UsedSize(true);
|
||||
uptr chunk_end = chunk + allocated_size;
|
||||
if (chunk < beg && beg < end && end <= chunk_end) {
|
||||
// Looks like a valid AsanChunk. Or maybe not. Be conservative and only
|
||||
// poison the redzones.
|
||||
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
|
||||
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||
FastPoisonShadowPartialRightRedzone(
|
||||
end_aligned_down, end - end_aligned_down,
|
||||
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
|
||||
} else {
|
||||
// This can not be an AsanChunk. Poison everything. It may be reused as
|
||||
// AsanChunk later.
|
||||
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
}
|
||||
|
||||
void ReInitialize(const AllocatorOptions &options) {
|
||||
allocator.SetMayReturnNull(options.may_return_null);
|
||||
SharedInitCode(options);
|
||||
|
||||
// Poison all existing allocation's redzones.
|
||||
if (CanPoisonMemory()) {
|
||||
allocator.ForceLock();
|
||||
allocator.ForEachChunk(
|
||||
[](uptr chunk, void *alloc) {
|
||||
((Allocator *)alloc)->RePoisonChunk(chunk);
|
||||
},
|
||||
this);
|
||||
allocator.ForceUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
void GetOptions(AllocatorOptions *options) const {
|
||||
@ -354,7 +388,7 @@ struct Allocator {
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return allocator.ReturnNullOrDie();
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
@ -371,8 +405,7 @@ struct Allocator {
|
||||
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
|
||||
}
|
||||
|
||||
if (!allocated)
|
||||
return allocator.ReturnNullOrDie();
|
||||
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
@ -455,29 +488,28 @@ struct Allocator {
|
||||
return res;
|
||||
}
|
||||
|
||||
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
|
||||
// Set quarantine flag if chunk is allocated, issue ASan error report on
|
||||
// available and quarantined chunks. Return true on success, false otherwise.
|
||||
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
|
||||
BufferedStackTrace *stack) {
|
||||
u8 old_chunk_state = CHUNK_ALLOCATED;
|
||||
// Flip the chunk_state atomically to avoid race on double-free.
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE, memory_order_acquire))
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE,
|
||||
memory_order_acquire)) {
|
||||
ReportInvalidFree(ptr, old_chunk_state, stack);
|
||||
// It's not safe to push a chunk in quarantine on invalid free.
|
||||
return false;
|
||||
}
|
||||
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Expects the chunk to already be marked as quarantined by using
|
||||
// AtomicallySetQuarantineFlag.
|
||||
// AtomicallySetQuarantineFlagIfAllocated.
|
||||
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
|
||||
if (m->alloc_type != alloc_type) {
|
||||
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
CHECK_GE(m->alloc_tid, 0);
|
||||
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
|
||||
CHECK_EQ(m->free_tid, kInvalidTid);
|
||||
@ -514,13 +546,24 @@ struct Allocator {
|
||||
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
|
||||
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
|
||||
|
||||
if (m->alloc_type != alloc_type) {
|
||||
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, delete_size, stack);
|
||||
}
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
AtomicallySetQuarantineFlag(m, ptr, stack);
|
||||
|
||||
QuarantineChunk(m, ptr, stack, alloc_type);
|
||||
}
|
||||
|
||||
@ -551,7 +594,7 @@ struct Allocator {
|
||||
|
||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDie();
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
@ -642,6 +685,8 @@ struct Allocator {
|
||||
fallback_mutex.Unlock();
|
||||
allocator.ForceUnlock();
|
||||
}
|
||||
|
||||
void ReleaseToOS() { allocator.ReleaseToOS(); }
|
||||
};
|
||||
|
||||
static Allocator instance(LINKER_INITIALIZED);
|
||||
@ -653,11 +698,17 @@ static AsanAllocator &get_allocator() {
|
||||
bool AsanChunkView::IsValid() {
|
||||
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
|
||||
}
|
||||
bool AsanChunkView::IsAllocated() {
|
||||
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
|
||||
}
|
||||
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
|
||||
AllocType AsanChunkView::GetAllocType() {
|
||||
return (AllocType)chunk_->alloc_type;
|
||||
}
|
||||
|
||||
static StackTrace GetStackTraceFromId(u32 id) {
|
||||
CHECK(id);
|
||||
@ -666,16 +717,22 @@ static StackTrace GetStackTraceFromId(u32 id) {
|
||||
return res;
|
||||
}
|
||||
|
||||
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
|
||||
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() {
|
||||
return GetStackTraceFromId(chunk_->alloc_context_id);
|
||||
return GetStackTraceFromId(GetAllocStackId());
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() {
|
||||
return GetStackTraceFromId(chunk_->free_context_id);
|
||||
return GetStackTraceFromId(GetFreeStackId());
|
||||
}
|
||||
|
||||
void ReleaseToOS() { instance.ReleaseToOS(); }
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options) {
|
||||
instance.Initialize(options);
|
||||
SetAllocatorReleaseToOSCallback(ReleaseToOS);
|
||||
}
|
||||
|
||||
void ReInitializeAllocator(const AllocatorOptions &options) {
|
||||
@ -689,6 +746,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
|
||||
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||
return instance.FindHeapChunkByAddress(addr);
|
||||
}
|
||||
AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
|
||||
return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
|
||||
}
|
||||
|
||||
void AsanThreadLocalMallocStorage::CommitBack() {
|
||||
instance.CommitBack(this);
|
||||
@ -752,7 +812,7 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
|
||||
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
|
||||
if (!ptr) return 0;
|
||||
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
|
||||
|
@ -47,16 +47,20 @@ void GetAllocatorOptions(AllocatorOptions *options);
|
||||
class AsanChunkView {
|
||||
public:
|
||||
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
|
||||
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
|
||||
// or quarantined chunk.
|
||||
uptr Beg(); // First byte of user memory.
|
||||
uptr End(); // Last byte of user memory.
|
||||
uptr UsedSize(); // Size requested by the user.
|
||||
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
|
||||
// or quarantined chunk.
|
||||
bool IsAllocated(); // Checks if the memory is currently allocated.
|
||||
uptr Beg(); // First byte of user memory.
|
||||
uptr End(); // Last byte of user memory.
|
||||
uptr UsedSize(); // Size requested by the user.
|
||||
uptr AllocTid();
|
||||
uptr FreeTid();
|
||||
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
|
||||
u32 GetAllocStackId();
|
||||
u32 GetFreeStackId();
|
||||
StackTrace GetAllocStack();
|
||||
StackTrace GetFreeStack();
|
||||
AllocType GetAllocType();
|
||||
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
|
||||
if (addr >= Beg() && (addr + access_size) <= End()) {
|
||||
*offset = addr - Beg();
|
||||
@ -85,6 +89,7 @@ class AsanChunkView {
|
||||
};
|
||||
|
||||
AsanChunkView FindHeapChunkByAddress(uptr address);
|
||||
AsanChunkView FindHeapChunkByAllocBeg(uptr address);
|
||||
|
||||
// List of AsanChunks with total size.
|
||||
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
|
||||
@ -112,18 +117,36 @@ struct AsanMapUnmapCallback {
|
||||
# if defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = 0xa0000000000ULL;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif defined(__aarch64__) && SANITIZER_ANDROID
|
||||
const uptr kAllocatorSpace = 0x3000000000ULL;
|
||||
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
|
||||
typedef VeryCompactSizeClassMap SizeClassMap;
|
||||
# elif defined(__aarch64__)
|
||||
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
|
||||
// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
|
||||
// so no need to different values for different VMA.
|
||||
const uptr kAllocatorSpace = 0x10000000000ULL;
|
||||
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif SANITIZER_WINDOWS
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# endif
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
|
||||
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
|
||||
# endif
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = kAllocatorSpace;
|
||||
static const uptr kSpaceSize = kAllocatorSize;
|
||||
static const uptr kMetadataSize = 0;
|
||||
typedef __asan::SizeClassMap SizeClassMap;
|
||||
typedef AsanMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#else // Fallback to SizeClassAllocator32.
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
@ -169,7 +192,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
|
||||
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack);
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
|
||||
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
|
||||
|
||||
uptr asan_mz_size(const void *ptr);
|
||||
void asan_mz_force_lock();
|
||||
|
@ -12,74 +12,39 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_flags.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_thread.h"
|
||||
|
||||
namespace __asan {
|
||||
namespace {
|
||||
using namespace __asan;
|
||||
|
||||
void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
|
||||
descr->name[0] = 0;
|
||||
descr->region_address = 0;
|
||||
descr->region_size = 0;
|
||||
descr->region_kind = "stack";
|
||||
|
||||
AsanThread::StackFrameAccess access;
|
||||
if (!t->GetStackFrameAccessByAddr(addr, &access))
|
||||
return;
|
||||
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
|
||||
char *name, uptr name_size,
|
||||
uptr ®ion_address, uptr ®ion_size) {
|
||||
InternalMmapVector<StackVarDescr> vars(16);
|
||||
if (!ParseFrameDescription(access.frame_descr, &vars)) {
|
||||
if (!ParseFrameDescription(frame_descr, &vars)) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (uptr i = 0; i < vars.size(); i++) {
|
||||
if (access.offset <= vars[i].beg + vars[i].size) {
|
||||
internal_strncat(descr->name, vars[i].name_pos,
|
||||
Min(descr->name_size, vars[i].name_len));
|
||||
descr->region_address = addr - (access.offset - vars[i].beg);
|
||||
descr->region_size = vars[i].size;
|
||||
if (offset <= vars[i].beg + vars[i].size) {
|
||||
// We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
|
||||
// if we're limiting the copy due to name_len, we add 1 to ensure we copy
|
||||
// the whole name and then terminate with '\0'.
|
||||
internal_strlcpy(name, vars[i].name_pos,
|
||||
Min(name_size, vars[i].name_len + 1));
|
||||
region_address = addr - (offset - vars[i].beg);
|
||||
region_size = vars[i].size;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
|
||||
AsanChunkView chunk = FindHeapChunkByAddress(addr);
|
||||
|
||||
descr->name[0] = 0;
|
||||
descr->region_address = 0;
|
||||
descr->region_size = 0;
|
||||
|
||||
if (!chunk.IsValid()) {
|
||||
descr->region_kind = "heap-invalid";
|
||||
return;
|
||||
}
|
||||
|
||||
descr->region_address = chunk.Beg();
|
||||
descr->region_size = chunk.UsedSize();
|
||||
descr->region_kind = "heap";
|
||||
}
|
||||
|
||||
void AsanLocateAddress(uptr addr, AddressDescription *descr) {
|
||||
if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
|
||||
return;
|
||||
}
|
||||
if (GetInfoForAddressIfGlobal(addr, descr)) {
|
||||
return;
|
||||
}
|
||||
asanThreadRegistry().Lock();
|
||||
AsanThread *thread = FindThreadByStackAddress(addr);
|
||||
asanThreadRegistry().Unlock();
|
||||
if (thread) {
|
||||
GetInfoForStackVar(addr, descr, thread);
|
||||
return;
|
||||
}
|
||||
GetInfoForHeapAddress(addr, descr);
|
||||
}
|
||||
|
||||
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
|
||||
uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
|
||||
bool alloc_stack) {
|
||||
AsanChunkView chunk = FindHeapChunkByAddress(addr);
|
||||
if (!chunk.IsValid()) return 0;
|
||||
@ -106,18 +71,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
using namespace __asan;
|
||||
} // namespace
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
|
||||
uptr *region_address, uptr *region_size) {
|
||||
AddressDescription descr = { name, name_size, 0, 0, nullptr };
|
||||
AsanLocateAddress(addr, &descr);
|
||||
if (region_address) *region_address = descr.region_address;
|
||||
if (region_size) *region_size = descr.region_size;
|
||||
return descr.region_kind;
|
||||
uptr *region_address_ptr,
|
||||
uptr *region_size_ptr) {
|
||||
AddressDescription descr(addr);
|
||||
uptr region_address = 0;
|
||||
uptr region_size = 0;
|
||||
const char *region_kind = nullptr;
|
||||
if (name && name_size > 0) name[0] = 0;
|
||||
|
||||
if (auto shadow = descr.AsShadow()) {
|
||||
// region_{address,size} are already 0
|
||||
switch (shadow->kind) {
|
||||
case kShadowKindLow:
|
||||
region_kind = "low shadow";
|
||||
break;
|
||||
case kShadowKindGap:
|
||||
region_kind = "shadow gap";
|
||||
break;
|
||||
case kShadowKindHigh:
|
||||
region_kind = "high shadow";
|
||||
break;
|
||||
}
|
||||
} else if (auto heap = descr.AsHeap()) {
|
||||
region_kind = "heap";
|
||||
region_address = heap->chunk_access.chunk_begin;
|
||||
region_size = heap->chunk_access.chunk_size;
|
||||
} else if (auto stack = descr.AsStack()) {
|
||||
region_kind = "stack";
|
||||
if (!stack->frame_descr) {
|
||||
// region_{address,size} are already 0
|
||||
} else {
|
||||
FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
|
||||
name_size, region_address, region_size);
|
||||
}
|
||||
} else if (auto global = descr.AsGlobal()) {
|
||||
region_kind = "global";
|
||||
auto &g = global->globals[0];
|
||||
internal_strlcpy(name, g.name, name_size);
|
||||
region_address = g.beg;
|
||||
region_size = g.size;
|
||||
} else {
|
||||
// region_{address,size} are already 0
|
||||
region_kind = "heap-invalid";
|
||||
}
|
||||
|
||||
CHECK(region_kind);
|
||||
if (region_address_ptr) *region_address_ptr = region_address;
|
||||
if (region_size_ptr) *region_size_ptr = region_size;
|
||||
return region_kind;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
484
libsanitizer/asan/asan_descriptions.cc
Normal file
484
libsanitizer/asan/asan_descriptions.cc
Normal file
@ -0,0 +1,484 @@
|
||||
//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan functions for getting information about an address and/or printing it.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// Return " (thread_name) " or an empty string if the name is empty.
|
||||
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
|
||||
uptr buff_len) {
|
||||
const char *name = t->name;
|
||||
if (name[0] == '\0') return "";
|
||||
buff[0] = 0;
|
||||
internal_strncat(buff, " (", 3);
|
||||
internal_strncat(buff, name, buff_len - 4);
|
||||
internal_strncat(buff, ")", 2);
|
||||
return buff;
|
||||
}
|
||||
|
||||
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
|
||||
if (tid == kInvalidTid) return "";
|
||||
asanThreadRegistry().CheckLocked();
|
||||
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
|
||||
return ThreadNameWithParenthesis(t, buff, buff_len);
|
||||
}
|
||||
|
||||
void DescribeThread(AsanThreadContext *context) {
|
||||
CHECK(context);
|
||||
asanThreadRegistry().CheckLocked();
|
||||
// No need to announce the main thread.
|
||||
if (context->tid == 0 || context->announced) {
|
||||
return;
|
||||
}
|
||||
context->announced = true;
|
||||
char tname[128];
|
||||
InternalScopedString str(1024);
|
||||
str.append("Thread T%d%s", context->tid,
|
||||
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
|
||||
if (context->parent_tid == kInvalidTid) {
|
||||
str.append(" created by unknown thread\n");
|
||||
Printf("%s", str.data());
|
||||
return;
|
||||
}
|
||||
str.append(
|
||||
" created by T%d%s here:\n", context->parent_tid,
|
||||
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
|
||||
Printf("%s", str.data());
|
||||
StackDepotGet(context->stack_id).Print();
|
||||
// Recursively described parent thread if needed.
|
||||
if (flags()->print_full_thread_history) {
|
||||
AsanThreadContext *parent_context =
|
||||
GetThreadContextByTidLocked(context->parent_tid);
|
||||
DescribeThread(parent_context);
|
||||
}
|
||||
}
|
||||
|
||||
// Shadow descriptions
|
||||
static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
|
||||
CHECK(!AddrIsInMem(addr));
|
||||
if (AddrIsInShadowGap(addr)) {
|
||||
*shadow_kind = kShadowKindGap;
|
||||
} else if (AddrIsInHighShadow(addr)) {
|
||||
*shadow_kind = kShadowKindHigh;
|
||||
} else if (AddrIsInLowShadow(addr)) {
|
||||
*shadow_kind = kShadowKindLow;
|
||||
} else {
|
||||
CHECK(0 && "Address is not in memory and not in shadow?");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DescribeAddressIfShadow(uptr addr) {
|
||||
ShadowAddressDescription descr;
|
||||
if (!GetShadowAddressInformation(addr, &descr)) return false;
|
||||
descr.Print();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
|
||||
if (AddrIsInMem(addr)) return false;
|
||||
ShadowKind shadow_kind;
|
||||
if (!GetShadowKind(addr, &shadow_kind)) return false;
|
||||
if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
|
||||
descr->addr = addr;
|
||||
descr->kind = shadow_kind;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Heap descriptions
|
||||
static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
|
||||
AsanChunkView chunk, uptr addr,
|
||||
uptr access_size) {
|
||||
descr->bad_addr = addr;
|
||||
if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
|
||||
descr->access_type = kAccessTypeLeft;
|
||||
} else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
|
||||
descr->access_type = kAccessTypeRight;
|
||||
if (descr->offset < 0) {
|
||||
descr->bad_addr -= descr->offset;
|
||||
descr->offset = 0;
|
||||
}
|
||||
} else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
|
||||
descr->access_type = kAccessTypeInside;
|
||||
} else {
|
||||
descr->access_type = kAccessTypeUnknown;
|
||||
}
|
||||
descr->chunk_begin = chunk.Beg();
|
||||
descr->chunk_size = chunk.UsedSize();
|
||||
descr->alloc_type = chunk.GetAllocType();
|
||||
}
|
||||
|
||||
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
|
||||
Decorator d;
|
||||
InternalScopedString str(4096);
|
||||
str.append("%s", d.Location());
|
||||
switch (descr.access_type) {
|
||||
case kAccessTypeLeft:
|
||||
str.append("%p is located %zd bytes to the left of",
|
||||
(void *)descr.bad_addr, descr.offset);
|
||||
break;
|
||||
case kAccessTypeRight:
|
||||
str.append("%p is located %zd bytes to the right of",
|
||||
(void *)descr.bad_addr, descr.offset);
|
||||
break;
|
||||
case kAccessTypeInside:
|
||||
str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
|
||||
descr.offset);
|
||||
break;
|
||||
case kAccessTypeUnknown:
|
||||
str.append(
|
||||
"%p is located somewhere around (this is AddressSanitizer bug!)",
|
||||
(void *)descr.bad_addr);
|
||||
}
|
||||
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
|
||||
(void *)descr.chunk_begin,
|
||||
(void *)(descr.chunk_begin + descr.chunk_size));
|
||||
str.append("%s", d.EndLocation());
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
bool GetHeapAddressInformation(uptr addr, uptr access_size,
|
||||
HeapAddressDescription *descr) {
|
||||
AsanChunkView chunk = FindHeapChunkByAddress(addr);
|
||||
if (!chunk.IsValid()) {
|
||||
return false;
|
||||
}
|
||||
descr->addr = addr;
|
||||
GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
|
||||
access_size);
|
||||
CHECK_NE(chunk.AllocTid(), kInvalidTid);
|
||||
descr->alloc_tid = chunk.AllocTid();
|
||||
descr->alloc_stack_id = chunk.GetAllocStackId();
|
||||
descr->free_tid = chunk.FreeTid();
|
||||
if (descr->free_tid != kInvalidTid)
|
||||
descr->free_stack_id = chunk.GetFreeStackId();
|
||||
return true;
|
||||
}
|
||||
|
||||
static StackTrace GetStackTraceFromId(u32 id) {
|
||||
CHECK(id);
|
||||
StackTrace res = StackDepotGet(id);
|
||||
CHECK(res.trace);
|
||||
return res;
|
||||
}
|
||||
|
||||
bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
|
||||
HeapAddressDescription descr;
|
||||
if (!GetHeapAddressInformation(addr, access_size, &descr)) {
|
||||
Printf(
|
||||
"AddressSanitizer can not describe address in more detail "
|
||||
"(wild memory access suspected).\n");
|
||||
return false;
|
||||
}
|
||||
descr.Print();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Stack descriptions
|
||||
bool GetStackAddressInformation(uptr addr, uptr access_size,
|
||||
StackAddressDescription *descr) {
|
||||
AsanThread *t = FindThreadByStackAddress(addr);
|
||||
if (!t) return false;
|
||||
|
||||
descr->addr = addr;
|
||||
descr->tid = t->tid();
|
||||
// Try to fetch precise stack frame for this access.
|
||||
AsanThread::StackFrameAccess access;
|
||||
if (!t->GetStackFrameAccessByAddr(addr, &access)) {
|
||||
descr->frame_descr = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
descr->offset = access.offset;
|
||||
descr->access_size = access_size;
|
||||
descr->frame_pc = access.frame_pc;
|
||||
descr->frame_descr = access.frame_descr;
|
||||
|
||||
#if SANITIZER_PPC64V1
|
||||
// On PowerPC64 ELFv1, the address of a function actually points to a
|
||||
// three-doubleword data structure with the first field containing
|
||||
// the address of the function's code.
|
||||
descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
|
||||
#endif
|
||||
descr->frame_pc += 16;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
|
||||
uptr access_size, uptr prev_var_end,
|
||||
uptr next_var_beg) {
|
||||
uptr var_end = var.beg + var.size;
|
||||
uptr addr_end = addr + access_size;
|
||||
const char *pos_descr = nullptr;
|
||||
// If the variable [var.beg, var_end) is the nearest variable to the
|
||||
// current memory access, indicate it in the log.
|
||||
if (addr >= var.beg) {
|
||||
if (addr_end <= var_end)
|
||||
pos_descr = "is inside"; // May happen if this is a use-after-return.
|
||||
else if (addr < var_end)
|
||||
pos_descr = "partially overflows";
|
||||
else if (addr_end <= next_var_beg &&
|
||||
next_var_beg - addr_end >= addr - var_end)
|
||||
pos_descr = "overflows";
|
||||
} else {
|
||||
if (addr_end > var.beg)
|
||||
pos_descr = "partially underflows";
|
||||
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
|
||||
pos_descr = "underflows";
|
||||
}
|
||||
InternalScopedString str(1024);
|
||||
str.append(" [%zd, %zd)", var.beg, var_end);
|
||||
// Render variable name.
|
||||
str.append(" '");
|
||||
for (uptr i = 0; i < var.name_len; ++i) {
|
||||
str.append("%c", var.name_pos[i]);
|
||||
}
|
||||
str.append("'");
|
||||
if (pos_descr) {
|
||||
Decorator d;
|
||||
// FIXME: we may want to also print the size of the access here,
|
||||
// but in case of accesses generated by memset it may be confusing.
|
||||
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
|
||||
d.Location(), addr, pos_descr, d.EndLocation());
|
||||
} else {
|
||||
str.append("\n");
|
||||
}
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
|
||||
StackAddressDescription descr;
|
||||
if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
|
||||
descr.Print();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Global descriptions
|
||||
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
|
||||
const __asan_global &g) {
|
||||
InternalScopedString str(4096);
|
||||
Decorator d;
|
||||
str.append("%s", d.Location());
|
||||
if (addr < g.beg) {
|
||||
str.append("%p is located %zd bytes to the left", (void *)addr,
|
||||
g.beg - addr);
|
||||
} else if (addr + access_size > g.beg + g.size) {
|
||||
if (addr < g.beg + g.size) addr = g.beg + g.size;
|
||||
str.append("%p is located %zd bytes to the right", (void *)addr,
|
||||
addr - (g.beg + g.size));
|
||||
} else {
|
||||
// Can it happen?
|
||||
str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
|
||||
}
|
||||
str.append(" of global variable '%s' defined in '",
|
||||
MaybeDemangleGlobalName(g.name));
|
||||
PrintGlobalLocation(&str, g);
|
||||
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
|
||||
str.append("%s", d.EndLocation());
|
||||
PrintGlobalNameIfASCII(&str, g);
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
|
||||
GlobalAddressDescription *descr) {
|
||||
descr->addr = addr;
|
||||
int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
|
||||
ARRAY_SIZE(descr->globals));
|
||||
descr->size = globals_num;
|
||||
descr->access_size = access_size;
|
||||
return globals_num != 0;
|
||||
}
|
||||
|
||||
bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
|
||||
const char *bug_type) {
|
||||
GlobalAddressDescription descr;
|
||||
if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
|
||||
|
||||
descr.Print(bug_type);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ShadowAddressDescription::Print() const {
|
||||
Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
|
||||
}
|
||||
|
||||
void GlobalAddressDescription::Print(const char *bug_type) const {
|
||||
for (int i = 0; i < size; i++) {
|
||||
DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
|
||||
if (bug_type &&
|
||||
0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
|
||||
reg_sites[i]) {
|
||||
Printf(" registered at:\n");
|
||||
StackDepotGet(reg_sites[i]).Print();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void StackAddressDescription::Print() const {
|
||||
Decorator d;
|
||||
char tname[128];
|
||||
Printf("%s", d.Location());
|
||||
Printf("Address %p is located in stack of thread T%d%s", addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
|
||||
if (!frame_descr) {
|
||||
Printf("%s\n", d.EndLocation());
|
||||
return;
|
||||
}
|
||||
Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
|
||||
|
||||
// Now we print the frame where the alloca has happened.
|
||||
// We print this frame as a stack trace with one element.
|
||||
// The symbolizer may print more than one frame if inlining was involved.
|
||||
// The frame numbers may be different than those in the stack trace printed
|
||||
// previously. That's unfortunate, but I have no better solution,
|
||||
// especially given that the alloca may be from entirely different place
|
||||
// (e.g. use-after-scope, or different thread's stack).
|
||||
Printf("%s", d.EndLocation());
|
||||
StackTrace alloca_stack(&frame_pc, 1);
|
||||
alloca_stack.Print();
|
||||
|
||||
InternalMmapVector<StackVarDescr> vars(16);
|
||||
if (!ParseFrameDescription(frame_descr, &vars)) {
|
||||
Printf(
|
||||
"AddressSanitizer can't parse the stack frame "
|
||||
"descriptor: |%s|\n",
|
||||
frame_descr);
|
||||
// 'addr' is a stack address, so return true even if we can't parse frame
|
||||
return;
|
||||
}
|
||||
uptr n_objects = vars.size();
|
||||
// Report the number of stack objects.
|
||||
Printf(" This frame has %zu object(s):\n", n_objects);
|
||||
|
||||
// Report all objects in this frame.
|
||||
for (uptr i = 0; i < n_objects; i++) {
|
||||
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
|
||||
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
|
||||
PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
|
||||
next_var_beg);
|
||||
}
|
||||
Printf(
|
||||
"HINT: this may be a false positive if your program uses "
|
||||
"some custom stack unwind mechanism or swapcontext\n");
|
||||
if (SANITIZER_WINDOWS)
|
||||
Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
|
||||
else
|
||||
Printf(" (longjmp and C++ exceptions *are* supported)\n");
|
||||
|
||||
DescribeThread(GetThreadContextByTidLocked(tid));
|
||||
}
|
||||
|
||||
void HeapAddressDescription::Print() const {
|
||||
PrintHeapChunkAccess(addr, chunk_access);
|
||||
|
||||
asanThreadRegistry().CheckLocked();
|
||||
AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
|
||||
StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
|
||||
|
||||
char tname[128];
|
||||
Decorator d;
|
||||
AsanThreadContext *free_thread = nullptr;
|
||||
if (free_tid != kInvalidTid) {
|
||||
free_thread = GetThreadContextByTidLocked(free_tid);
|
||||
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
|
||||
free_thread->tid,
|
||||
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
|
||||
d.EndAllocation());
|
||||
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
|
||||
free_stack.Print();
|
||||
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
|
||||
alloc_thread->tid,
|
||||
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||
d.EndAllocation());
|
||||
} else {
|
||||
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
|
||||
alloc_thread->tid,
|
||||
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||
d.EndAllocation());
|
||||
}
|
||||
alloc_stack.Print();
|
||||
DescribeThread(GetCurrentThread());
|
||||
if (free_thread) DescribeThread(free_thread);
|
||||
DescribeThread(alloc_thread);
|
||||
}
|
||||
|
||||
AddressDescription::AddressDescription(uptr addr, uptr access_size,
|
||||
bool shouldLockThreadRegistry) {
|
||||
if (GetShadowAddressInformation(addr, &data.shadow)) {
|
||||
data.kind = kAddressKindShadow;
|
||||
return;
|
||||
}
|
||||
if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
|
||||
data.kind = kAddressKindHeap;
|
||||
return;
|
||||
}
|
||||
|
||||
bool isStackMemory = false;
|
||||
if (shouldLockThreadRegistry) {
|
||||
ThreadRegistryLock l(&asanThreadRegistry());
|
||||
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
|
||||
} else {
|
||||
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
|
||||
}
|
||||
if (isStackMemory) {
|
||||
data.kind = kAddressKindStack;
|
||||
return;
|
||||
}
|
||||
|
||||
if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
|
||||
data.kind = kAddressKindGlobal;
|
||||
return;
|
||||
}
|
||||
data.kind = kAddressKindWild;
|
||||
addr = 0;
|
||||
}
|
||||
|
||||
void PrintAddressDescription(uptr addr, uptr access_size,
|
||||
const char *bug_type) {
|
||||
ShadowAddressDescription shadow_descr;
|
||||
if (GetShadowAddressInformation(addr, &shadow_descr)) {
|
||||
shadow_descr.Print();
|
||||
return;
|
||||
}
|
||||
|
||||
GlobalAddressDescription global_descr;
|
||||
if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
|
||||
global_descr.Print(bug_type);
|
||||
return;
|
||||
}
|
||||
|
||||
StackAddressDescription stack_descr;
|
||||
if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
|
||||
stack_descr.Print();
|
||||
return;
|
||||
}
|
||||
|
||||
HeapAddressDescription heap_descr;
|
||||
if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
|
||||
heap_descr.Print();
|
||||
return;
|
||||
}
|
||||
|
||||
// We exhausted our possibilities. Bail out.
|
||||
Printf(
|
||||
"AddressSanitizer can not describe address in more detail "
|
||||
"(wild memory access suspected).\n");
|
||||
}
|
||||
} // namespace __asan
|
251
libsanitizer/asan/asan_descriptions.h
Normal file
251
libsanitizer/asan/asan_descriptions.h
Normal file
@ -0,0 +1,251 @@
|
||||
//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan-private header for asan_descriptions.cc.
|
||||
// TODO(filcab): Most struct definitions should move to the interface headers.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_DESCRIPTIONS_H
|
||||
#define ASAN_DESCRIPTIONS_H
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void DescribeThread(AsanThreadContext *context);
|
||||
static inline void DescribeThread(AsanThread *t) {
|
||||
if (t) DescribeThread(t->context());
|
||||
}
|
||||
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
|
||||
uptr buff_len);
|
||||
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
|
||||
|
||||
class Decorator : public __sanitizer::SanitizerCommonDecorator {
|
||||
public:
|
||||
Decorator() : SanitizerCommonDecorator() {}
|
||||
const char *Access() { return Blue(); }
|
||||
const char *EndAccess() { return Default(); }
|
||||
const char *Location() { return Green(); }
|
||||
const char *EndLocation() { return Default(); }
|
||||
const char *Allocation() { return Magenta(); }
|
||||
const char *EndAllocation() { return Default(); }
|
||||
|
||||
const char *ShadowByte(u8 byte) {
|
||||
switch (byte) {
|
||||
case kAsanHeapLeftRedzoneMagic:
|
||||
case kAsanArrayCookieMagic:
|
||||
return Red();
|
||||
case kAsanHeapFreeMagic:
|
||||
return Magenta();
|
||||
case kAsanStackLeftRedzoneMagic:
|
||||
case kAsanStackMidRedzoneMagic:
|
||||
case kAsanStackRightRedzoneMagic:
|
||||
return Red();
|
||||
case kAsanStackAfterReturnMagic:
|
||||
return Magenta();
|
||||
case kAsanInitializationOrderMagic:
|
||||
return Cyan();
|
||||
case kAsanUserPoisonedMemoryMagic:
|
||||
case kAsanContiguousContainerOOBMagic:
|
||||
case kAsanAllocaLeftMagic:
|
||||
case kAsanAllocaRightMagic:
|
||||
return Blue();
|
||||
case kAsanStackUseAfterScopeMagic:
|
||||
return Magenta();
|
||||
case kAsanGlobalRedzoneMagic:
|
||||
return Red();
|
||||
case kAsanInternalHeapMagic:
|
||||
return Yellow();
|
||||
case kAsanIntraObjectRedzone:
|
||||
return Yellow();
|
||||
default:
|
||||
return Default();
|
||||
}
|
||||
}
|
||||
const char *EndShadowByte() { return Default(); }
|
||||
const char *MemoryByte() { return Magenta(); }
|
||||
const char *EndMemoryByte() { return Default(); }
|
||||
};
|
||||
|
||||
enum ShadowKind : u8 {
|
||||
kShadowKindLow,
|
||||
kShadowKindGap,
|
||||
kShadowKindHigh,
|
||||
};
|
||||
static const char *const ShadowNames[] = {"low shadow", "shadow gap",
|
||||
"high shadow"};
|
||||
|
||||
struct ShadowAddressDescription {
|
||||
uptr addr;
|
||||
ShadowKind kind;
|
||||
u8 shadow_byte;
|
||||
|
||||
void Print() const;
|
||||
};
|
||||
|
||||
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
|
||||
bool DescribeAddressIfShadow(uptr addr);
|
||||
|
||||
enum AccessType {
|
||||
kAccessTypeLeft,
|
||||
kAccessTypeRight,
|
||||
kAccessTypeInside,
|
||||
kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
|
||||
};
|
||||
|
||||
struct ChunkAccess {
|
||||
uptr bad_addr;
|
||||
sptr offset;
|
||||
uptr chunk_begin;
|
||||
uptr chunk_size;
|
||||
u32 access_type : 2;
|
||||
u32 alloc_type : 2;
|
||||
};
|
||||
|
||||
struct HeapAddressDescription {
|
||||
uptr addr;
|
||||
uptr alloc_tid;
|
||||
uptr free_tid;
|
||||
u32 alloc_stack_id;
|
||||
u32 free_stack_id;
|
||||
ChunkAccess chunk_access;
|
||||
|
||||
void Print() const;
|
||||
};
|
||||
|
||||
bool GetHeapAddressInformation(uptr addr, uptr access_size,
|
||||
HeapAddressDescription *descr);
|
||||
bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
|
||||
|
||||
struct StackAddressDescription {
|
||||
uptr addr;
|
||||
uptr tid;
|
||||
uptr offset;
|
||||
uptr frame_pc;
|
||||
uptr access_size;
|
||||
const char *frame_descr;
|
||||
|
||||
void Print() const;
|
||||
};
|
||||
|
||||
bool GetStackAddressInformation(uptr addr, uptr access_size,
|
||||
StackAddressDescription *descr);
|
||||
|
||||
struct GlobalAddressDescription {
|
||||
uptr addr;
|
||||
// Assume address is close to at most four globals.
|
||||
static const int kMaxGlobals = 4;
|
||||
__asan_global globals[kMaxGlobals];
|
||||
u32 reg_sites[kMaxGlobals];
|
||||
uptr access_size;
|
||||
u8 size;
|
||||
|
||||
void Print(const char *bug_type = "") const;
|
||||
};
|
||||
|
||||
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
|
||||
GlobalAddressDescription *descr);
|
||||
bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
|
||||
|
||||
// General function to describe an address. Will try to describe the address as
|
||||
// a shadow, global (variable), stack, or heap address.
|
||||
// bug_type is optional and is used for checking if we're reporting an
|
||||
// initialization-order-fiasco
|
||||
// The proper access_size should be passed for stack, global, and heap
|
||||
// addresses. Defaults to 1.
|
||||
// Each of the *AddressDescription functions has its own Print() member, which
|
||||
// may take access_size and bug_type parameters if needed.
|
||||
void PrintAddressDescription(uptr addr, uptr access_size = 1,
|
||||
const char *bug_type = "");
|
||||
|
||||
enum AddressKind {
|
||||
kAddressKindWild,
|
||||
kAddressKindShadow,
|
||||
kAddressKindHeap,
|
||||
kAddressKindStack,
|
||||
kAddressKindGlobal,
|
||||
};
|
||||
|
||||
class AddressDescription {
|
||||
struct AddressDescriptionData {
|
||||
AddressKind kind;
|
||||
union {
|
||||
ShadowAddressDescription shadow;
|
||||
HeapAddressDescription heap;
|
||||
StackAddressDescription stack;
|
||||
GlobalAddressDescription global;
|
||||
uptr addr;
|
||||
};
|
||||
};
|
||||
|
||||
AddressDescriptionData data;
|
||||
|
||||
public:
|
||||
AddressDescription() = default;
|
||||
// shouldLockThreadRegistry allows us to skip locking if we're sure we already
|
||||
// have done it.
|
||||
AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
|
||||
: AddressDescription(addr, 1, shouldLockThreadRegistry) {}
|
||||
AddressDescription(uptr addr, uptr access_size,
|
||||
bool shouldLockThreadRegistry = true);
|
||||
|
||||
uptr Address() const {
|
||||
switch (data.kind) {
|
||||
case kAddressKindWild:
|
||||
return data.addr;
|
||||
case kAddressKindShadow:
|
||||
return data.shadow.addr;
|
||||
case kAddressKindHeap:
|
||||
return data.heap.addr;
|
||||
case kAddressKindStack:
|
||||
return data.stack.addr;
|
||||
case kAddressKindGlobal:
|
||||
return data.global.addr;
|
||||
}
|
||||
UNREACHABLE("AddressInformation kind is invalid");
|
||||
}
|
||||
void Print(const char *bug_descr = nullptr) const {
|
||||
switch (data.kind) {
|
||||
case kAddressKindWild:
|
||||
Printf("Address %p is a wild pointer.\n", data.addr);
|
||||
return;
|
||||
case kAddressKindShadow:
|
||||
return data.shadow.Print();
|
||||
case kAddressKindHeap:
|
||||
return data.heap.Print();
|
||||
case kAddressKindStack:
|
||||
return data.stack.Print();
|
||||
case kAddressKindGlobal:
|
||||
// initialization-order-fiasco has a special Print()
|
||||
return data.global.Print(bug_descr);
|
||||
}
|
||||
UNREACHABLE("AddressInformation kind is invalid");
|
||||
}
|
||||
|
||||
void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
|
||||
|
||||
const ShadowAddressDescription *AsShadow() const {
|
||||
return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
|
||||
}
|
||||
const HeapAddressDescription *AsHeap() const {
|
||||
return data.kind == kAddressKindHeap ? &data.heap : nullptr;
|
||||
}
|
||||
const StackAddressDescription *AsStack() const {
|
||||
return data.kind == kAddressKindStack ? &data.stack : nullptr;
|
||||
}
|
||||
const GlobalAddressDescription *AsGlobal() const {
|
||||
return data.kind == kAddressKindGlobal ? &data.global : nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_DESCRIPTIONS_H
|
494
libsanitizer/asan/asan_errors.cc
Normal file
494
libsanitizer/asan/asan_errors.cc
Normal file
@ -0,0 +1,494 @@
|
||||
//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan implementation for error structures.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_errors.h"
|
||||
#include <signal.h>
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void ErrorStackOverflow::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: stack-overflow on address %p"
|
||||
" (pc %p bp %p sp %p T%d)\n",
|
||||
(void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
|
||||
Printf("%s", d.EndWarning());
|
||||
scariness.Print();
|
||||
BufferedStackTrace stack;
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
|
||||
common_flags()->fast_unwind_on_fatal);
|
||||
stack.Print();
|
||||
ReportErrorSummary("stack-overflow", &stack);
|
||||
}
|
||||
|
||||
static void MaybeDumpInstructionBytes(uptr pc) {
|
||||
if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
|
||||
InternalScopedString str(1024);
|
||||
str.append("First 16 instruction bytes at pc: ");
|
||||
if (IsAccessibleMemoryRange(pc, 16)) {
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
|
||||
}
|
||||
str.append("\n");
|
||||
} else {
|
||||
str.append("unaccessible\n");
|
||||
}
|
||||
Report("%s", str.data());
|
||||
}
|
||||
|
||||
void ErrorDeadlySignal::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
const char *description = DescribeSignalOrException(signo);
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
|
||||
"T%d)\n",
|
||||
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
|
||||
Printf("%s", d.EndWarning());
|
||||
if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
|
||||
if (is_memory_access) {
|
||||
const char *access_type =
|
||||
write_flag == SignalContext::WRITE
|
||||
? "WRITE"
|
||||
: (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
|
||||
Report("The signal is caused by a %s memory access.\n", access_type);
|
||||
if (addr < GetPageSizeCached())
|
||||
Report("Hint: address points to the zero page.\n");
|
||||
}
|
||||
scariness.Print();
|
||||
BufferedStackTrace stack;
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
|
||||
common_flags()->fast_unwind_on_fatal);
|
||||
stack.Print();
|
||||
MaybeDumpInstructionBytes(pc);
|
||||
Printf("AddressSanitizer can not provide additional info.\n");
|
||||
ReportErrorSummary(description, &stack);
|
||||
}
|
||||
|
||||
void ErrorDoubleFree::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting double-free on %p in "
|
||||
"thread T%d%s:\n",
|
||||
addr_description.addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
Printf("%s", d.EndWarning());
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
|
||||
second_free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("double-free", &stack);
|
||||
}
|
||||
|
||||
void ErrorNewDeleteSizeMismatch::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: new-delete-type-mismatch on %p in thread "
|
||||
"T%d%s:\n",
|
||||
addr_description.addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
|
||||
Printf(
|
||||
" size of the allocated type: %zd bytes;\n"
|
||||
" size of the deallocated type: %zd bytes.\n",
|
||||
addr_description.chunk_access.chunk_size, delete_size);
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("new-delete-type-mismatch", &stack);
|
||||
Report(
|
||||
"HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
|
||||
}
|
||||
|
||||
void ErrorFreeNotMalloced::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting free on address "
|
||||
"which was not malloc()-ed: %p in thread T%d%s\n",
|
||||
addr_description.Address(), tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
Printf("%s", d.EndWarning());
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("bad-free", &stack);
|
||||
}
|
||||
|
||||
void ErrorAllocTypeMismatch::Print() {
|
||||
static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
|
||||
"operator new []"};
|
||||
static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
|
||||
"operator delete []"};
|
||||
CHECK_NE(alloc_type, dealloc_type);
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
|
||||
alloc_names[alloc_type], dealloc_names[dealloc_type],
|
||||
addr_description.addr);
|
||||
Printf("%s", d.EndWarning());
|
||||
CHECK_GT(dealloc_stack->size, 0);
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
|
||||
Report(
|
||||
"HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
|
||||
}
|
||||
|
||||
void ErrorMallocUsableSizeNotOwned::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
|
||||
"pointer which is not owned: %p\n",
|
||||
addr_description.Address());
|
||||
Printf("%s", d.EndWarning());
|
||||
stack->Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("bad-malloc_usable_size", stack);
|
||||
}
|
||||
|
||||
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting to call "
|
||||
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
|
||||
addr_description.Address());
|
||||
Printf("%s", d.EndWarning());
|
||||
stack->Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
|
||||
}
|
||||
|
||||
void ErrorStringFunctionMemoryRangesOverlap::Print() {
|
||||
Decorator d;
|
||||
char bug_type[100];
|
||||
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
|
||||
"overlap\n",
|
||||
bug_type, addr1_description.Address(),
|
||||
addr1_description.Address() + length1, addr2_description.Address(),
|
||||
addr2_description.Address() + length2);
|
||||
Printf("%s", d.EndWarning());
|
||||
scariness.Print();
|
||||
stack->Print();
|
||||
addr1_description.Print();
|
||||
addr2_description.Print();
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
void ErrorStringFunctionSizeOverflow::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
const char *bug_type = "negative-size-param";
|
||||
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
|
||||
Printf("%s", d.EndWarning());
|
||||
scariness.Print();
|
||||
stack->Print();
|
||||
addr_description.Print();
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
void ErrorBadParamsToAnnotateContiguousContainer::Print() {
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: bad parameters to "
|
||||
"__sanitizer_annotate_contiguous_container:\n"
|
||||
" beg : %p\n"
|
||||
" end : %p\n"
|
||||
" old_mid : %p\n"
|
||||
" new_mid : %p\n",
|
||||
beg, end, old_mid, new_mid);
|
||||
uptr granularity = SHADOW_GRANULARITY;
|
||||
if (!IsAligned(beg, granularity))
|
||||
Report("ERROR: beg is not aligned by %d\n", granularity);
|
||||
stack->Print();
|
||||
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
|
||||
}
|
||||
|
||||
void ErrorODRViolation::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report("ERROR: AddressSanitizer: odr-violation (%p):\n", global1.beg);
|
||||
Printf("%s", d.EndWarning());
|
||||
InternalScopedString g1_loc(256), g2_loc(256);
|
||||
PrintGlobalLocation(&g1_loc, global1);
|
||||
PrintGlobalLocation(&g2_loc, global2);
|
||||
Printf(" [1] size=%zd '%s' %s\n", global1.size,
|
||||
MaybeDemangleGlobalName(global1.name), g1_loc.data());
|
||||
Printf(" [2] size=%zd '%s' %s\n", global2.size,
|
||||
MaybeDemangleGlobalName(global2.name), g2_loc.data());
|
||||
if (stack_id1 && stack_id2) {
|
||||
Printf("These globals were registered at these points:\n");
|
||||
Printf(" [1]:\n");
|
||||
StackDepotGet(stack_id1).Print();
|
||||
Printf(" [2]:\n");
|
||||
StackDepotGet(stack_id2).Print();
|
||||
}
|
||||
Report(
|
||||
"HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=detect_odr_violation=0\n");
|
||||
InternalScopedString error_msg(256);
|
||||
error_msg.append("odr-violation: global '%s' at %s",
|
||||
MaybeDemangleGlobalName(global1.name), g1_loc.data());
|
||||
ReportErrorSummary(error_msg.data());
|
||||
}
|
||||
|
||||
void ErrorInvalidPointerPair::Print() {
|
||||
const char *bug_type = "invalid-pointer-pair";
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n",
|
||||
addr1_description.Address(), addr2_description.Address());
|
||||
Printf("%s", d.EndWarning());
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
stack.Print();
|
||||
addr1_description.Print();
|
||||
addr2_description.Print();
|
||||
ReportErrorSummary(bug_type, &stack);
|
||||
}
|
||||
|
||||
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
|
||||
return s[-1] > 127 && s[1] > 127;
|
||||
}
|
||||
|
||||
ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
|
||||
bool is_write_, uptr access_size_)
|
||||
: ErrorBase(tid),
|
||||
addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
|
||||
pc(pc_),
|
||||
bp(bp_),
|
||||
sp(sp_),
|
||||
access_size(access_size_),
|
||||
is_write(is_write_),
|
||||
shadow_val(0) {
|
||||
scariness.Clear();
|
||||
if (access_size) {
|
||||
if (access_size <= 9) {
|
||||
char desr[] = "?-byte";
|
||||
desr[0] = '0' + access_size;
|
||||
scariness.Scare(access_size + access_size / 2, desr);
|
||||
} else if (access_size >= 10) {
|
||||
scariness.Scare(15, "multi-byte");
|
||||
}
|
||||
is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
|
||||
|
||||
// Determine the error type.
|
||||
bug_descr = "unknown-crash";
|
||||
if (AddrIsInMem(addr)) {
|
||||
u8 *shadow_addr = (u8 *)MemToShadow(addr);
|
||||
// If we are accessing 16 bytes, look at the second shadow byte.
|
||||
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
|
||||
// If we are in the partial right redzone, look at the next shadow byte.
|
||||
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
|
||||
bool far_from_bounds = false;
|
||||
shadow_val = *shadow_addr;
|
||||
int bug_type_score = 0;
|
||||
// For use-after-frees reads are almost as bad as writes.
|
||||
int read_after_free_bonus = 0;
|
||||
switch (shadow_val) {
|
||||
case kAsanHeapLeftRedzoneMagic:
|
||||
case kAsanArrayCookieMagic:
|
||||
bug_descr = "heap-buffer-overflow";
|
||||
bug_type_score = 10;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanHeapFreeMagic:
|
||||
bug_descr = "heap-use-after-free";
|
||||
bug_type_score = 20;
|
||||
if (!is_write) read_after_free_bonus = 18;
|
||||
break;
|
||||
case kAsanStackLeftRedzoneMagic:
|
||||
bug_descr = "stack-buffer-underflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanInitializationOrderMagic:
|
||||
bug_descr = "initialization-order-fiasco";
|
||||
bug_type_score = 1;
|
||||
break;
|
||||
case kAsanStackMidRedzoneMagic:
|
||||
case kAsanStackRightRedzoneMagic:
|
||||
bug_descr = "stack-buffer-overflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanStackAfterReturnMagic:
|
||||
bug_descr = "stack-use-after-return";
|
||||
bug_type_score = 30;
|
||||
if (!is_write) read_after_free_bonus = 18;
|
||||
break;
|
||||
case kAsanUserPoisonedMemoryMagic:
|
||||
bug_descr = "use-after-poison";
|
||||
bug_type_score = 20;
|
||||
break;
|
||||
case kAsanContiguousContainerOOBMagic:
|
||||
bug_descr = "container-overflow";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanStackUseAfterScopeMagic:
|
||||
bug_descr = "stack-use-after-scope";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanGlobalRedzoneMagic:
|
||||
bug_descr = "global-buffer-overflow";
|
||||
bug_type_score = 10;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanIntraObjectRedzone:
|
||||
bug_descr = "intra-object-overflow";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanAllocaLeftMagic:
|
||||
case kAsanAllocaRightMagic:
|
||||
bug_descr = "dynamic-stack-buffer-overflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
}
|
||||
scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
|
||||
if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void PrintContainerOverflowHint() {
|
||||
Printf("HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=detect_container_overflow=0.\n"
|
||||
"If you suspect a false positive see also: "
|
||||
"https://github.com/google/sanitizers/wiki/"
|
||||
"AddressSanitizerContainerOverflow.\n");
|
||||
}
|
||||
|
||||
static void PrintShadowByte(InternalScopedString *str, const char *before,
|
||||
u8 byte, const char *after = "\n") {
|
||||
PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
|
||||
}
|
||||
|
||||
static void PrintLegend(InternalScopedString *str) {
|
||||
str->append(
|
||||
"Shadow byte legend (one shadow byte represents %d "
|
||||
"application bytes):\n",
|
||||
(int)SHADOW_GRANULARITY);
|
||||
PrintShadowByte(str, " Addressable: ", 0);
|
||||
str->append(" Partially addressable: ");
|
||||
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
|
||||
str->append("\n");
|
||||
PrintShadowByte(str, " Heap left redzone: ",
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
|
||||
PrintShadowByte(str, " Stack left redzone: ",
|
||||
kAsanStackLeftRedzoneMagic);
|
||||
PrintShadowByte(str, " Stack mid redzone: ",
|
||||
kAsanStackMidRedzoneMagic);
|
||||
PrintShadowByte(str, " Stack right redzone: ",
|
||||
kAsanStackRightRedzoneMagic);
|
||||
PrintShadowByte(str, " Stack after return: ",
|
||||
kAsanStackAfterReturnMagic);
|
||||
PrintShadowByte(str, " Stack use after scope: ",
|
||||
kAsanStackUseAfterScopeMagic);
|
||||
PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
|
||||
PrintShadowByte(str, " Global init order: ",
|
||||
kAsanInitializationOrderMagic);
|
||||
PrintShadowByte(str, " Poisoned by user: ",
|
||||
kAsanUserPoisonedMemoryMagic);
|
||||
PrintShadowByte(str, " Container overflow: ",
|
||||
kAsanContiguousContainerOOBMagic);
|
||||
PrintShadowByte(str, " Array cookie: ",
|
||||
kAsanArrayCookieMagic);
|
||||
PrintShadowByte(str, " Intra object redzone: ",
|
||||
kAsanIntraObjectRedzone);
|
||||
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
|
||||
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
|
||||
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
|
||||
}
|
||||
|
||||
static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
||||
u8 *bytes, u8 *guilty, uptr n) {
|
||||
Decorator d;
|
||||
if (before) str->append("%s%p:", before, bytes);
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
u8 *p = bytes + i;
|
||||
const char *before =
|
||||
p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
|
||||
const char *after = p == guilty ? "]" : "";
|
||||
PrintShadowByte(str, before, *p, after);
|
||||
}
|
||||
str->append("\n");
|
||||
}
|
||||
|
||||
static void PrintShadowMemoryForAddress(uptr addr) {
|
||||
if (!AddrIsInMem(addr)) return;
|
||||
uptr shadow_addr = MemToShadow(addr);
|
||||
const uptr n_bytes_per_row = 16;
|
||||
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
|
||||
InternalScopedString str(4096 * 8);
|
||||
str.append("Shadow bytes around the buggy address:\n");
|
||||
for (int i = -5; i <= 5; i++) {
|
||||
const char *prefix = (i == 0) ? "=>" : " ";
|
||||
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
|
||||
(u8 *)shadow_addr, n_bytes_per_row);
|
||||
}
|
||||
if (flags()->print_legend) PrintLegend(&str);
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
void ErrorGeneric::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
uptr addr = addr_description.Address();
|
||||
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
|
||||
bug_descr, (void *)addr, pc, bp, sp);
|
||||
Printf("%s", d.EndWarning());
|
||||
|
||||
char tname[128];
|
||||
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
|
||||
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
|
||||
(void *)addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
|
||||
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
stack.Print();
|
||||
|
||||
// Pass bug_descr because we have a special case for
|
||||
// initialization-order-fiasco
|
||||
addr_description.Print(bug_descr);
|
||||
if (shadow_val == kAsanContiguousContainerOOBMagic)
|
||||
PrintContainerOverflowHint();
|
||||
ReportErrorSummary(bug_descr, &stack);
|
||||
PrintShadowMemoryForAddress(addr);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
376
libsanitizer/asan/asan_errors.h
Normal file
376
libsanitizer/asan/asan_errors.h
Normal file
@ -0,0 +1,376 @@
|
||||
//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan-private header for error structures.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_ERRORS_H
|
||||
#define ASAN_ERRORS_H
|
||||
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_scariness_score.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
struct ErrorBase {
|
||||
ErrorBase() = default;
|
||||
explicit ErrorBase(u32 tid_) : tid(tid_) {}
|
||||
ScarinessScoreBase scariness;
|
||||
u32 tid;
|
||||
};
|
||||
|
||||
struct ErrorStackOverflow : ErrorBase {
|
||||
uptr addr, pc, bp, sp;
|
||||
// ErrorStackOverflow never owns the context.
|
||||
void *context;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorStackOverflow() = default;
|
||||
ErrorStackOverflow(u32 tid, const SignalContext &sig)
|
||||
: ErrorBase(tid),
|
||||
addr(sig.addr),
|
||||
pc(sig.pc),
|
||||
bp(sig.bp),
|
||||
sp(sig.sp),
|
||||
context(sig.context) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "stack-overflow");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorDeadlySignal : ErrorBase {
|
||||
uptr addr, pc, bp, sp;
|
||||
// ErrorDeadlySignal never owns the context.
|
||||
void *context;
|
||||
int signo;
|
||||
SignalContext::WriteFlag write_flag;
|
||||
bool is_memory_access;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorDeadlySignal() = default;
|
||||
ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
|
||||
: ErrorBase(tid),
|
||||
addr(sig.addr),
|
||||
pc(sig.pc),
|
||||
bp(sig.bp),
|
||||
sp(sig.sp),
|
||||
context(sig.context),
|
||||
signo(signo_),
|
||||
write_flag(sig.write_flag),
|
||||
is_memory_access(sig.is_memory_access) {
|
||||
scariness.Clear();
|
||||
if (is_memory_access) {
|
||||
if (addr < GetPageSizeCached()) {
|
||||
scariness.Scare(10, "null-deref");
|
||||
} else if (addr == pc) {
|
||||
scariness.Scare(60, "wild-jump");
|
||||
} else if (write_flag == SignalContext::WRITE) {
|
||||
scariness.Scare(30, "wild-addr-write");
|
||||
} else if (write_flag == SignalContext::READ) {
|
||||
scariness.Scare(20, "wild-addr-read");
|
||||
} else {
|
||||
scariness.Scare(25, "wild-addr");
|
||||
}
|
||||
} else {
|
||||
scariness.Scare(10, "signal");
|
||||
}
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorDoubleFree : ErrorBase {
|
||||
// ErrorDoubleFree doesn't own the stack trace.
|
||||
const BufferedStackTrace *second_free_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorDoubleFree() = default;
|
||||
ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
|
||||
: ErrorBase(tid), second_free_stack(stack) {
|
||||
CHECK_GT(second_free_stack->size, 0);
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(42, "double-free");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorNewDeleteSizeMismatch : ErrorBase {
|
||||
// ErrorNewDeleteSizeMismatch doesn't own the stack trace.
|
||||
const BufferedStackTrace *free_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
uptr delete_size;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorNewDeleteSizeMismatch() = default;
|
||||
ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
|
||||
uptr delete_size_)
|
||||
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "new-delete-type-mismatch");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorFreeNotMalloced : ErrorBase {
|
||||
// ErrorFreeNotMalloced doesn't own the stack trace.
|
||||
const BufferedStackTrace *free_stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorFreeNotMalloced() = default;
|
||||
ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
|
||||
: ErrorBase(tid),
|
||||
free_stack(stack),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(40, "bad-free");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorAllocTypeMismatch : ErrorBase {
|
||||
// ErrorAllocTypeMismatch doesn't own the stack trace.
|
||||
const BufferedStackTrace *dealloc_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
AllocType alloc_type, dealloc_type;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorAllocTypeMismatch() = default;
|
||||
ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
|
||||
AllocType alloc_type_, AllocType dealloc_type_)
|
||||
: ErrorBase(tid),
|
||||
dealloc_stack(stack),
|
||||
alloc_type(alloc_type_),
|
||||
dealloc_type(dealloc_type_) {
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "alloc-dealloc-mismatch");
|
||||
};
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorMallocUsableSizeNotOwned : ErrorBase {
|
||||
// ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorMallocUsableSizeNotOwned() = default;
|
||||
ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
|
||||
: ErrorBase(tid),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
|
||||
// ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorSanitizerGetAllocatedSizeNotOwned() = default;
|
||||
ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr)
|
||||
: ErrorBase(tid),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
|
||||
// ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
uptr length1, length2;
|
||||
AddressDescription addr1_description;
|
||||
AddressDescription addr2_description;
|
||||
const char *function;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorStringFunctionMemoryRangesOverlap() = default;
|
||||
ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr1, uptr length1_, uptr addr2,
|
||||
uptr length2_, const char *function_)
|
||||
: ErrorBase(tid),
|
||||
stack(stack_),
|
||||
length1(length1_),
|
||||
length2(length2_),
|
||||
addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
|
||||
addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
|
||||
function(function_) {
|
||||
char bug_type[100];
|
||||
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, bug_type);
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorStringFunctionSizeOverflow : ErrorBase {
|
||||
// ErrorStringFunctionSizeOverflow doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
uptr size;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorStringFunctionSizeOverflow() = default;
|
||||
ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr, uptr size_)
|
||||
: ErrorBase(tid),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false),
|
||||
size(size_) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "negative-size-param");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
|
||||
// ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
uptr beg, end, old_mid, new_mid;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorBadParamsToAnnotateContiguousContainer() = default;
|
||||
// PS4: Do we want an AddressDescription for beg?
|
||||
ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
|
||||
BufferedStackTrace *stack_,
|
||||
uptr beg_, uptr end_,
|
||||
uptr old_mid_, uptr new_mid_)
|
||||
: ErrorBase(tid),
|
||||
stack(stack_),
|
||||
beg(beg_),
|
||||
end(end_),
|
||||
old_mid(old_mid_),
|
||||
new_mid(new_mid_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorODRViolation : ErrorBase {
|
||||
__asan_global global1, global2;
|
||||
u32 stack_id1, stack_id2;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorODRViolation() = default;
|
||||
ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
|
||||
const __asan_global *g2, u32 stack_id2_)
|
||||
: ErrorBase(tid),
|
||||
global1(*g1),
|
||||
global2(*g2),
|
||||
stack_id1(stack_id1_),
|
||||
stack_id2(stack_id2_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorInvalidPointerPair : ErrorBase {
|
||||
uptr pc, bp, sp;
|
||||
AddressDescription addr1_description;
|
||||
AddressDescription addr2_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorInvalidPointerPair() = default;
|
||||
ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
|
||||
uptr p2)
|
||||
: ErrorBase(tid),
|
||||
pc(pc_),
|
||||
bp(bp_),
|
||||
sp(sp_),
|
||||
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
|
||||
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorGeneric : ErrorBase {
|
||||
AddressDescription addr_description;
|
||||
uptr pc, bp, sp;
|
||||
uptr access_size;
|
||||
const char *bug_descr;
|
||||
bool is_write;
|
||||
u8 shadow_val;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorGeneric() = default;
|
||||
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
|
||||
uptr access_size_);
|
||||
void Print();
|
||||
};
|
||||
|
||||
// clang-format off
|
||||
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
|
||||
macro(StackOverflow) \
|
||||
macro(DeadlySignal) \
|
||||
macro(DoubleFree) \
|
||||
macro(NewDeleteSizeMismatch) \
|
||||
macro(FreeNotMalloced) \
|
||||
macro(AllocTypeMismatch) \
|
||||
macro(MallocUsableSizeNotOwned) \
|
||||
macro(SanitizerGetAllocatedSizeNotOwned) \
|
||||
macro(StringFunctionMemoryRangesOverlap) \
|
||||
macro(StringFunctionSizeOverflow) \
|
||||
macro(BadParamsToAnnotateContiguousContainer) \
|
||||
macro(ODRViolation) \
|
||||
macro(InvalidPointerPair) \
|
||||
macro(Generic)
|
||||
// clang-format on
|
||||
|
||||
#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
|
||||
#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
|
||||
#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
|
||||
ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
|
||||
#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
|
||||
case kErrorKind##name: \
|
||||
return name.Print();
|
||||
|
||||
enum ErrorKind {
|
||||
kErrorKindInvalid = 0,
|
||||
ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
|
||||
};
|
||||
|
||||
struct ErrorDescription {
|
||||
ErrorKind kind;
|
||||
// We're using a tagged union because it allows us to have a trivially
|
||||
// copiable type and use the same structures as the public interface.
|
||||
//
|
||||
// We can add a wrapper around it to make it "more c++-like", but that would
|
||||
// add a lot of code and the benefit wouldn't be that big.
|
||||
union {
|
||||
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
|
||||
};
|
||||
|
||||
ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
|
||||
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
|
||||
|
||||
bool IsValid() { return kind != kErrorKindInvalid; }
|
||||
void Print() {
|
||||
switch (kind) {
|
||||
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
|
||||
case kErrorKindInvalid:
|
||||
CHECK(0);
|
||||
}
|
||||
CHECK(0);
|
||||
}
|
||||
};
|
||||
|
||||
#undef ASAN_FOR_EACH_ERROR_KIND
|
||||
#undef ASAN_DEFINE_ERROR_KIND
|
||||
#undef ASAN_ERROR_DESCRIPTION_MEMBER
|
||||
#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
|
||||
#undef ASAN_ERROR_DESCRIPTION_PRINT
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_ERRORS_H
|
@ -29,7 +29,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
|
||||
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
|
||||
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
|
||||
if (class_id <= 6) {
|
||||
for (uptr i = 0; i < (1U << class_id); i++) {
|
||||
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
|
||||
shadow[i] = magic;
|
||||
// Make sure this does not become memset.
|
||||
SanitizerBreakOptimization(nullptr);
|
||||
@ -98,7 +98,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
|
||||
// if the signal arrives between checking and setting flags[pos], the
|
||||
// signal handler's fake stack will start from a different hint_position
|
||||
// and so will not touch this particular byte. So, it is safe to do this
|
||||
// with regular non-atimic load and store (at least I was not able to make
|
||||
// with regular non-atomic load and store (at least I was not able to make
|
||||
// this code crash).
|
||||
if (flags[pos]) continue;
|
||||
flags[pos] = 1;
|
||||
@ -119,7 +119,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
|
||||
uptr class_id = (ptr - beg) >> stack_size_log;
|
||||
uptr base = beg + (class_id << stack_size_log);
|
||||
CHECK_LE(base, ptr);
|
||||
CHECK_LT(ptr, base + (1UL << stack_size_log));
|
||||
CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
|
||||
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
|
||||
uptr res = base + pos * BytesInSizeClass(class_id);
|
||||
*frame_end = res + BytesInSizeClass(class_id);
|
||||
|
@ -50,7 +50,7 @@ struct FakeFrame {
|
||||
// Allocate() flips the appropriate allocation flag atomically, thus achieving
|
||||
// async-signal safety.
|
||||
// This allocator does not have quarantine per se, but it tries to allocate the
|
||||
// frames in round robin fasion to maximize the delay between a deallocation
|
||||
// frames in round robin fashion to maximize the delay between a deallocation
|
||||
// and the next allocation.
|
||||
class FakeStack {
|
||||
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
|
||||
@ -67,12 +67,12 @@ class FakeStack {
|
||||
|
||||
// stack_size_log is at least 15 (stack_size >= 32K).
|
||||
static uptr SizeRequiredForFlags(uptr stack_size_log) {
|
||||
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
|
||||
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
// Each size class occupies stack_size bytes.
|
||||
static uptr SizeRequiredForFrames(uptr stack_size_log) {
|
||||
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
|
||||
return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
|
||||
}
|
||||
|
||||
// Number of bytes requires for the whole object.
|
||||
@ -89,20 +89,20 @@ class FakeStack {
|
||||
// and so on.
|
||||
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
|
||||
uptr t = kNumberOfSizeClasses - 1 - class_id;
|
||||
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
|
||||
const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
|
||||
return ((all_ones >> t) << t) << (stack_size_log - 15);
|
||||
}
|
||||
|
||||
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
|
||||
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
|
||||
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
|
||||
}
|
||||
|
||||
// Divide n by the numbe of frames in size class.
|
||||
// Divide n by the number of frames in size class.
|
||||
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
|
||||
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
|
||||
}
|
||||
|
||||
// The the pointer to the flags of the given class_id.
|
||||
// The pointer to the flags of the given class_id.
|
||||
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
|
||||
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
|
||||
FlagsOffset(stack_size_log, class_id);
|
||||
@ -112,7 +112,8 @@ class FakeStack {
|
||||
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
|
||||
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
|
||||
SizeRequiredForFlags(stack_size_log) +
|
||||
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
|
||||
(((uptr)1) << stack_size_log) * class_id +
|
||||
BytesInSizeClass(class_id) * pos;
|
||||
}
|
||||
|
||||
// Allocate the fake frame.
|
||||
@ -135,7 +136,7 @@ class FakeStack {
|
||||
|
||||
// Number of bytes in a fake frame of this size class.
|
||||
static uptr BytesInSizeClass(uptr class_id) {
|
||||
return 1UL << (class_id + kMinStackFrameSizeLog);
|
||||
return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
// The fake frame is guaranteed to have a right redzone.
|
||||
@ -157,7 +158,7 @@ class FakeStack {
|
||||
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
|
||||
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
|
||||
COMPILER_CHECK(kNumberOfSizeClasses == 11);
|
||||
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
|
||||
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
|
||||
|
||||
uptr hint_position_[kNumberOfSizeClasses];
|
||||
uptr stack_size_log_;
|
||||
|
@ -114,15 +114,7 @@ void InitializeFlags() {
|
||||
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
|
||||
#endif
|
||||
|
||||
// Let activation flags override current settings. On Android they come
|
||||
// from a system property. On other platforms this is no-op.
|
||||
if (!flags()->start_deactivated) {
|
||||
char buf[100];
|
||||
GetExtraActivationFlags(buf, sizeof(buf));
|
||||
asan_parser.ParseString(buf);
|
||||
}
|
||||
|
||||
SetVerbosity(common_flags()->verbosity);
|
||||
InitializeCommonFlags();
|
||||
|
||||
// TODO(eugenis): dump all flags at verbosity>=2?
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
@ -165,6 +157,14 @@ void InitializeFlags() {
|
||||
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
|
||||
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
|
||||
}
|
||||
if (!f->replace_str && common_flags()->intercept_strlen) {
|
||||
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
|
||||
"Use intercept_strlen=0 to disable it.");
|
||||
}
|
||||
if (!f->replace_str && common_flags()->intercept_strchr) {
|
||||
Report("WARNING: strchr* interceptors are enabled even though "
|
||||
"replace_str=0. Use intercept_strchr=0 to disable them.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
@ -41,10 +41,7 @@ ASAN_FLAG(
|
||||
"If set, uses custom wrappers and replacements for libc string functions "
|
||||
"to find more errors.")
|
||||
ASAN_FLAG(bool, replace_intrin, true,
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
|
||||
ASAN_FLAG(bool, mac_ignore_invalid_free, false,
|
||||
"Ignore invalid free() calls to work around some bugs. Used on OS X "
|
||||
"only.")
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
|
||||
ASAN_FLAG(bool, detect_stack_use_after_return, false,
|
||||
"Enables stack-use-after-return checking at run-time.")
|
||||
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
|
||||
@ -78,6 +75,8 @@ ASAN_FLAG(bool, print_stats, false,
|
||||
"Print various statistics after printing an error message or if "
|
||||
"atexit=1.")
|
||||
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
|
||||
ASAN_FLAG(bool, print_scariness, false,
|
||||
"Print the scariness score. Experimental.")
|
||||
ASAN_FLAG(bool, atexit, false,
|
||||
"If set, prints ASan exit stats even after program terminates "
|
||||
"successfully.")
|
||||
@ -97,15 +96,15 @@ ASAN_FLAG(bool, poison_array_cookie, true,
|
||||
"Poison (or not) the array cookie after operator new[].")
|
||||
|
||||
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
|
||||
// https://github.com/google/sanitizers/issues/131
|
||||
// https://github.com/google/sanitizers/issues/309
|
||||
// TODO(glider,timurrrr): Fix known issues and enable this back.
|
||||
ASAN_FLAG(bool, alloc_dealloc_mismatch,
|
||||
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
|
||||
!SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
|
||||
"Report errors on malloc/delete, new/free, new/delete[], etc.")
|
||||
|
||||
ASAN_FLAG(bool, new_delete_type_mismatch, true,
|
||||
"Report errors on mismatch betwen size of new and delete.")
|
||||
"Report errors on mismatch between size of new and delete.")
|
||||
ASAN_FLAG(
|
||||
bool, strict_init_order, false,
|
||||
"If true, assume that dynamic initializers can never access globals from "
|
||||
@ -124,8 +123,8 @@ ASAN_FLAG(
|
||||
"The bigger the value the harder we try.")
|
||||
ASAN_FLAG(
|
||||
bool, detect_container_overflow, true,
|
||||
"If true, honor the container overflow annotations. "
|
||||
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
|
||||
"If true, honor the container overflow annotations. See "
|
||||
"https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow")
|
||||
ASAN_FLAG(int, detect_odr_violation, 2,
|
||||
"If >=2, detect violation of One-Definition-Rule (ODR); "
|
||||
"If ==1, detect ODR-violation only if the two variables "
|
||||
@ -136,3 +135,5 @@ ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
|
||||
ASAN_FLAG(bool, halt_on_error, true,
|
||||
"Crash the program after printing the first error report "
|
||||
"(WARNING: USE AT YOUR OWN RISK!)")
|
||||
ASAN_FLAG(bool, use_odr_indicator, false,
|
||||
"Use special ODR indicator symbol for ODR violation detection")
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
@ -121,16 +122,68 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
|
||||
return res;
|
||||
}
|
||||
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
|
||||
Global g = {};
|
||||
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
|
||||
internal_strncpy(descr->name, g.name, descr->name_size);
|
||||
descr->region_address = g.beg;
|
||||
descr->region_size = g.size;
|
||||
descr->region_kind = "global";
|
||||
return true;
|
||||
enum GlobalSymbolState {
|
||||
UNREGISTERED = 0,
|
||||
REGISTERED = 1
|
||||
};
|
||||
|
||||
// Check ODR violation for given global G via special ODR indicator. We use
|
||||
// this method in case compiler instruments global variables through their
|
||||
// local aliases.
|
||||
static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
|
||||
if (*odr_indicator == UNREGISTERED) {
|
||||
*odr_indicator = REGISTERED;
|
||||
return;
|
||||
}
|
||||
return false;
|
||||
// If *odr_indicator is DEFINED, some module have already registered
|
||||
// externally visible symbol with the same name. This is an ODR violation.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->odr_indicator == l->g->odr_indicator &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
// cheaply detect ODR violation: if we try to poison an already poisoned
|
||||
// global, we have ODR violation error.
|
||||
// In latter case, we poison each symbol exactly once, so we use special
|
||||
// indicator symbol to perform similar check.
|
||||
// In either case, compiler provides a special odr_indicator field to Global
|
||||
// structure, that can contain two kinds of values:
|
||||
// 1) Non-zero value. In this case, odr_indicator is an address of
|
||||
// corresponding indicator variable for given global.
|
||||
// 2) Zero. This means that we don't use private aliases for global variables
|
||||
// and can freely check ODR violation with the first method.
|
||||
//
|
||||
// This routine chooses between two different methods of ODR violation
|
||||
// detection.
|
||||
static inline bool UseODRIndicator(const Global *g) {
|
||||
// Use ODR indicator method iff use_odr_indicator flag is set and
|
||||
// indicator symbol address is not 0.
|
||||
return flags()->use_odr_indicator && g->odr_indicator > 0;
|
||||
}
|
||||
|
||||
// Register a global variable.
|
||||
@ -142,24 +195,24 @@ static void RegisterGlobal(const Global *g) {
|
||||
ReportGlobal(*g, "Added");
|
||||
CHECK(flags()->report_globals);
|
||||
CHECK(AddrIsInMem(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
if (!AddrIsAlignedByGranularity(g->beg)) {
|
||||
Report("The following global variable is not properly aligned.\n");
|
||||
Report("This may happen if another global with the same name\n");
|
||||
Report("resides in another non-instrumented module.\n");
|
||||
Report("Or the global comes from a C file built w/o -fno-common.\n");
|
||||
Report("In either case this is likely an ODR violation bug,\n");
|
||||
Report("but AddressSanitizer can not provide more details.\n");
|
||||
ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
}
|
||||
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
|
||||
// This "ODR violation" detection is fundamentally incompatible with
|
||||
// how GCC registers globals. Disable as useless until rewritten upstream.
|
||||
if (0 && flags()->detect_odr_violation) {
|
||||
if (flags()->detect_odr_violation) {
|
||||
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
@ -190,6 +243,12 @@ static void UnregisterGlobal(const Global *g) {
|
||||
// We unpoison the shadow memory for the global but we do not remove it from
|
||||
// the list because that would require O(n^2) time with the current list
|
||||
// implementation. It might not be worth doing anyway.
|
||||
|
||||
// Release ODR indicator.
|
||||
if (UseODRIndicator(g)) {
|
||||
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
|
||||
*odr_indicator = UNREGISTERED;
|
||||
}
|
||||
}
|
||||
|
||||
void StopInitOrderChecking() {
|
||||
@ -207,11 +266,70 @@ void StopInitOrderChecking() {
|
||||
}
|
||||
}
|
||||
|
||||
static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
|
||||
|
||||
const char *MaybeDemangleGlobalName(const char *name) {
|
||||
// We can spoil names of globals with C linkage, so use an heuristic
|
||||
// approach to check if the name should be demangled.
|
||||
bool should_demangle = false;
|
||||
if (name[0] == '_' && name[1] == 'Z')
|
||||
should_demangle = true;
|
||||
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
|
||||
should_demangle = true;
|
||||
|
||||
return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
|
||||
}
|
||||
|
||||
// Check if the global is a zero-terminated ASCII string. If so, print it.
|
||||
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
|
||||
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
|
||||
unsigned char c = *(unsigned char *)p;
|
||||
if (c == '\0' || !IsASCII(c)) return;
|
||||
}
|
||||
if (*(char *)(g.beg + g.size - 1) != '\0') return;
|
||||
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
|
||||
(char *)g.beg);
|
||||
}
|
||||
|
||||
static const char *GlobalFilename(const __asan_global &g) {
|
||||
const char *res = g.module_name;
|
||||
// Prefer the filename from source location, if is available.
|
||||
if (g.location) res = g.location->filename;
|
||||
CHECK(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
|
||||
str->append("%s", GlobalFilename(g));
|
||||
if (!g.location) return;
|
||||
if (g.location->line_no) str->append(":%d", g.location->line_no);
|
||||
if (g.location->column_no) str->append(":%d", g.location->column_no);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
|
||||
// Apply __asan_register_globals to all globals found in the same loaded
|
||||
// executable or shared library as `flag'. The flag tracks whether globals have
|
||||
// already been registered or not for this image.
|
||||
void __asan_register_image_globals(uptr *flag) {
|
||||
if (*flag)
|
||||
return;
|
||||
AsanApplyToGlobals(__asan_register_globals, flag);
|
||||
*flag = 1;
|
||||
}
|
||||
|
||||
// This mirrors __asan_register_image_globals.
|
||||
void __asan_unregister_image_globals(uptr *flag) {
|
||||
if (!*flag)
|
||||
return;
|
||||
AsanApplyToGlobals(__asan_unregister_globals, flag);
|
||||
*flag = 0;
|
||||
}
|
||||
|
||||
// Register an array of globals.
|
||||
void __asan_register_globals(__asan_global *globals, uptr n) {
|
||||
if (!flags()->report_globals) return;
|
||||
|
@ -17,16 +17,20 @@ extern "C" {
|
||||
// Every time the ASan ABI changes we also change the version number in the
|
||||
// __asan_init function name. Objects built with incompatible ASan ABI
|
||||
// versions will not link with run-time.
|
||||
//
|
||||
// Changes between ABI versions:
|
||||
// v1=>v2: added 'module_name' to __asan_global
|
||||
// v2=>v3: stack frame description (created by the compiler)
|
||||
// contains the function PC as the 3-rd field (see
|
||||
// DescribeAddressIfStack).
|
||||
// v3=>v4: added '__asan_global_source_location' to __asan_global.
|
||||
// contains the function PC as the 3rd field (see
|
||||
// DescribeAddressIfStack)
|
||||
// v3=>v4: added '__asan_global_source_location' to __asan_global
|
||||
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
|
||||
// __asan_stack_free_ functions.
|
||||
// __asan_stack_free_ functions
|
||||
// v5=>v6: changed the name of the version check symbol
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v6
|
||||
// v6=>v7: added 'odr_indicator' to __asan_global
|
||||
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead
|
||||
// stripping support on Mach-O platforms
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8
|
||||
}
|
||||
|
||||
#endif // ASAN_INIT_VERSION_H
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
@ -108,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
|
||||
} while (0)
|
||||
|
||||
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
#if SANITIZER_INTERCEPT_STRNLEN
|
||||
if (REAL(strnlen)) {
|
||||
return REAL(strnlen)(s, maxlen);
|
||||
}
|
||||
@ -141,6 +142,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
(void) ctx; \
|
||||
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
|
||||
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
|
||||
ASAN_INTERCEPT_FUNC_VER(name, ver)
|
||||
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
|
||||
ASAN_WRITE_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
|
||||
@ -176,7 +179,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
|
||||
// Strict init-order checking is dlopen-hostile:
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=178
|
||||
// https://github.com/google/sanitizers/issues/178
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
if (flags()->strict_init_order) { \
|
||||
StopInitOrderChecking(); \
|
||||
@ -193,6 +196,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
} else { \
|
||||
*begin = *end = 0; \
|
||||
}
|
||||
// Asan needs custom handling of these:
|
||||
#undef SANITIZER_INTERCEPT_MEMSET
|
||||
#undef SANITIZER_INTERCEPT_MEMMOVE
|
||||
#undef SANITIZER_INTERCEPT_MEMCPY
|
||||
#include "sanitizer_common/sanitizer_common_interceptors.inc"
|
||||
|
||||
// Syscall interceptors don't have contexts, we don't support suppressions
|
||||
@ -216,6 +223,7 @@ struct ThreadStartParam {
|
||||
atomic_uintptr_t is_registered;
|
||||
};
|
||||
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
|
||||
AsanThread *t = nullptr;
|
||||
@ -226,7 +234,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
return t->ThreadStart(GetTid(), ¶m->is_registered);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
INTERCEPTOR(int, pthread_create, void *thread,
|
||||
void *attr, void *(*start_routine)(void*), void *arg) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
@ -240,7 +247,17 @@ INTERCEPTOR(int, pthread_create, void *thread,
|
||||
ThreadStartParam param;
|
||||
atomic_store(¶m.t, 0, memory_order_relaxed);
|
||||
atomic_store(¶m.is_registered, 0, memory_order_relaxed);
|
||||
int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
|
||||
int result;
|
||||
{
|
||||
// Ignore all allocations made by pthread_create: thread stack/TLS may be
|
||||
// stored by pthread for future reuse even after thread destruction, and
|
||||
// the linked list it's stored in doesn't even hold valid pointers to the
|
||||
// objects, the latter are calculated by obscure pointer arithmetic.
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
#endif
|
||||
result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
|
||||
}
|
||||
if (result == 0) {
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
@ -269,7 +286,8 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(bsd_signal)(signum, handler);
|
||||
}
|
||||
return 0;
|
||||
@ -277,7 +295,8 @@ INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
|
||||
#endif
|
||||
|
||||
INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(signal)(signum, handler);
|
||||
}
|
||||
return nullptr;
|
||||
@ -285,7 +304,8 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
|
||||
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
|
||||
struct sigaction *oldact) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(sigaction)(signum, act, oldact);
|
||||
}
|
||||
return 0;
|
||||
@ -451,25 +471,6 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
|
||||
ASAN_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(char*, strchr, const char *str, int c) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strchr);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
|
||||
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
|
||||
// used.
|
||||
if (asan_init_is_running) {
|
||||
return REAL(strchr)(str, c);
|
||||
}
|
||||
ENSURE_ASAN_INITED();
|
||||
char *result = REAL(strchr)(str, c);
|
||||
if (flags()->replace_str) {
|
||||
uptr len = REAL(strlen)(str);
|
||||
uptr bytes_read = (result ? result - str : len) + 1;
|
||||
ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_INDEX
|
||||
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
INTERCEPTOR(char*, index, const char *string, int c)
|
||||
@ -547,7 +548,6 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
|
||||
return REAL(strcpy)(to, from); // NOLINT
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRDUP
|
||||
INTERCEPTOR(char*, strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
@ -562,29 +562,28 @@ INTERCEPTOR(char*, strdup, const char *s) {
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPTOR(SIZE_T, strlen, const char *s) {
|
||||
#if ASAN_INTERCEPT___STRDUP
|
||||
INTERCEPTOR(char*, __strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strlen);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
|
||||
// strlen is called from malloc_default_purgeable_zone()
|
||||
// in __asan::ReplaceSystemAlloc() on Mac.
|
||||
if (asan_init_is_running) {
|
||||
return REAL(strlen)(s);
|
||||
}
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
|
||||
ENSURE_ASAN_INITED();
|
||||
SIZE_T length = REAL(strlen)(s);
|
||||
uptr length = REAL(strlen)(s);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(ctx, s, length + 1);
|
||||
}
|
||||
return length;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *new_mem = asan_malloc(length + 1, &stack);
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
#endif // ASAN_INTERCEPT___STRDUP
|
||||
|
||||
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
|
||||
SIZE_T length = REAL(wcslen)(s);
|
||||
SIZE_T length = internal_wcslen(s);
|
||||
if (!asan_init_is_running) {
|
||||
ENSURE_ASAN_INITED();
|
||||
ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
|
||||
@ -605,19 +604,6 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
|
||||
return REAL(strncpy)(to, from, size);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = REAL(strnlen)(s, maxlen);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
|
||||
}
|
||||
return length;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_STRNLEN
|
||||
|
||||
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
|
||||
char **endptr, int base) {
|
||||
void *ctx;
|
||||
@ -700,12 +686,12 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT
|
||||
static void AtCxaAtexit(void *unused) {
|
||||
(void)unused;
|
||||
StopInitOrderChecking();
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT
|
||||
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
|
||||
void *dso_handle) {
|
||||
#if SANITIZER_MAC
|
||||
@ -732,7 +718,7 @@ INTERCEPTOR(int, fork, void) {
|
||||
namespace __asan {
|
||||
void InitializeAsanInterceptors() {
|
||||
static bool was_called_once;
|
||||
CHECK(was_called_once == false);
|
||||
CHECK(!was_called_once);
|
||||
was_called_once = true;
|
||||
InitializeCommonInterceptors();
|
||||
|
||||
@ -740,22 +726,22 @@ void InitializeAsanInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(memmove);
|
||||
ASAN_INTERCEPT_FUNC(memset);
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
|
||||
// In asan, REAL(memmove) is not used, but it is used in msan.
|
||||
ASAN_INTERCEPT_FUNC(memcpy);
|
||||
} else {
|
||||
ASSIGN_REAL(memcpy, memmove);
|
||||
}
|
||||
CHECK(REAL(memcpy));
|
||||
|
||||
// Intercept str* functions.
|
||||
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
|
||||
ASAN_INTERCEPT_FUNC(strchr);
|
||||
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
|
||||
ASAN_INTERCEPT_FUNC(strlen);
|
||||
ASAN_INTERCEPT_FUNC(wcslen);
|
||||
ASAN_INTERCEPT_FUNC(strncat);
|
||||
ASAN_INTERCEPT_FUNC(strncpy);
|
||||
#if ASAN_INTERCEPT_STRDUP
|
||||
ASAN_INTERCEPT_FUNC(strdup);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
ASAN_INTERCEPT_FUNC(strnlen);
|
||||
#if ASAN_INTERCEPT___STRDUP
|
||||
ASAN_INTERCEPT_FUNC(__strdup);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
ASAN_INTERCEPT_FUNC(index);
|
||||
|
@ -21,14 +21,12 @@
|
||||
#if !SANITIZER_WINDOWS
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
|
||||
# define ASAN_INTERCEPT__LONGJMP 1
|
||||
# define ASAN_INTERCEPT_STRDUP 1
|
||||
# define ASAN_INTERCEPT_INDEX 1
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
|
||||
# define ASAN_INTERCEPT_FORK 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
|
||||
# define ASAN_INTERCEPT__LONGJMP 0
|
||||
# define ASAN_INTERCEPT_STRDUP 0
|
||||
# define ASAN_INTERCEPT_INDEX 0
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
|
||||
# define ASAN_INTERCEPT_FORK 0
|
||||
@ -40,12 +38,6 @@
|
||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
|
||||
#endif
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
# define ASAN_INTERCEPT_STRNLEN 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_STRNLEN 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
# define ASAN_INTERCEPT_SWAPCONTEXT 1
|
||||
#else
|
||||
@ -78,6 +70,12 @@
|
||||
# define ASAN_INTERCEPT___CXA_ATEXIT 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
# define ASAN_INTERCEPT___STRDUP 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT___STRDUP 0
|
||||
#endif
|
||||
|
||||
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
|
||||
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
|
||||
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include "asan_init_version.h"
|
||||
|
||||
using __sanitizer::uptr;
|
||||
using __sanitizer::u64;
|
||||
using __sanitizer::u32;
|
||||
|
||||
extern "C" {
|
||||
// This function should be called at the very beginning of the process,
|
||||
@ -52,8 +54,17 @@ extern "C" {
|
||||
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
|
||||
__asan_global_source_location *location; // Source location of a global,
|
||||
// or NULL if it is unknown.
|
||||
uptr odr_indicator; // The address of the ODR indicator symbol.
|
||||
};
|
||||
|
||||
// These functions can be called on some platforms to find globals in the same
|
||||
// loaded image as `flag' and apply __asan_(un)register_globals to them,
|
||||
// filtering out redundant calls.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_register_image_globals(uptr *flag);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_unregister_image_globals(uptr *flag);
|
||||
|
||||
// These two functions should be called by the instrumented code.
|
||||
// 'globals' is an array of structures describing 'n' globals.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -68,6 +79,20 @@ extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_after_dynamic_init();
|
||||
|
||||
// Sets bytes of the given range of the shadow memory into specific value.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_00(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_f1(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_f2(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_f3(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_f5(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_shadow_f8(uptr addr, uptr size);
|
||||
|
||||
// These two functions are used by instrumented code in the
|
||||
// use-after-scope mode. They mark memory for local variables as
|
||||
// unaddressable when they leave scope and addressable before the
|
||||
@ -145,6 +170,9 @@ extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ const char* __asan_default_options();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
extern uptr __asan_shadow_memory_dynamic_address;
|
||||
|
||||
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
extern int __asan_option_detect_stack_use_after_return;
|
||||
|
@ -34,9 +34,9 @@
|
||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||
// will be changed towards less memory overhead.
|
||||
#ifndef ASAN_LOW_MEMORY
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
|
||||
# define ASAN_LOW_MEMORY 1
|
||||
#else
|
||||
# else
|
||||
# define ASAN_LOW_MEMORY 0
|
||||
# endif
|
||||
#endif
|
||||
@ -60,6 +60,12 @@ using __sanitizer::StackTrace;
|
||||
|
||||
void AsanInitFromRtl();
|
||||
|
||||
// asan_win.cc
|
||||
void InitializePlatformExceptionHandlers();
|
||||
|
||||
// asan_win.cc / asan_posix.cc
|
||||
const char *DescribeSignalOrException(int signo);
|
||||
|
||||
// asan_rtl.cc
|
||||
void NORETURN ShowStatsAndAbort();
|
||||
|
||||
@ -71,10 +77,15 @@ void *AsanDoesNotSupportStaticLinkage();
|
||||
void AsanCheckDynamicRTPrereqs();
|
||||
void AsanCheckIncompatibleRT();
|
||||
|
||||
// Support function for __asan_(un)register_image_globals. Searches for the
|
||||
// loaded image containing `needle' and then enumerates all global metadata
|
||||
// structures declared in that image, applying `op' (e.g.,
|
||||
// __asan_(un)register_globals) to them.
|
||||
typedef void (*globals_op_fptr)(__asan_global *, uptr);
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
|
||||
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context);
|
||||
|
||||
void DisableReexec();
|
||||
void MaybeReexec();
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
|
||||
void StopInitOrderChecking();
|
||||
|
||||
@ -95,16 +106,24 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
|
||||
bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
|
||||
(PlatformHasDifferentMemcpyAndMemmove())
|
||||
#elif SANITIZER_WINDOWS64
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
|
||||
#else
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
|
||||
#endif // SANITIZER_MAC
|
||||
|
||||
// Add convenient macro for interface functions that may be represented as
|
||||
// weak hooks.
|
||||
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
|
||||
#define ASAN_FREE_HOOK(ptr) \
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
|
||||
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||
do { \
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
|
||||
RunMallocHooks(ptr, size); \
|
||||
} while (false)
|
||||
#define ASAN_FREE_HOOK(ptr) \
|
||||
do { \
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
|
||||
RunFreeHooks(ptr); \
|
||||
} while (false)
|
||||
#define ASAN_ON_ERROR() \
|
||||
if (&__asan_on_error) __asan_on_error()
|
||||
|
||||
@ -112,15 +131,12 @@ extern int asan_inited;
|
||||
// Used to avoid infinite recursion in __asan_init().
|
||||
extern bool asan_init_is_running;
|
||||
extern void (*death_callback)(void);
|
||||
|
||||
// These magic values are written to shadow for better error reporting.
|
||||
const int kAsanHeapLeftRedzoneMagic = 0xfa;
|
||||
const int kAsanHeapRightRedzoneMagic = 0xfb;
|
||||
const int kAsanHeapFreeMagic = 0xfd;
|
||||
const int kAsanStackLeftRedzoneMagic = 0xf1;
|
||||
const int kAsanStackMidRedzoneMagic = 0xf2;
|
||||
const int kAsanStackRightRedzoneMagic = 0xf3;
|
||||
const int kAsanStackPartialRedzoneMagic = 0xf4;
|
||||
const int kAsanStackAfterReturnMagic = 0xf5;
|
||||
const int kAsanInitializationOrderMagic = 0xf6;
|
||||
const int kAsanUserPoisonedMemoryMagic = 0xf7;
|
||||
|
@ -67,20 +67,17 @@ asan_rt_version_t __asan_rt_version;
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
|
||||
void DisableReexec() {
|
||||
// No need to re-exec on Linux.
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
// No need to re-exec on Linux.
|
||||
}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
// This will fail to link with -static.
|
||||
return &_DYNAMIC; // defined in link.h
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// FIXME: should we do anything for Android?
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
|
@ -22,18 +22,11 @@
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mac.h"
|
||||
|
||||
#if !SANITIZER_IOS
|
||||
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
|
||||
#else
|
||||
extern "C" {
|
||||
extern char ***_NSGetArgv(void);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <dlfcn.h> // for dladdr()
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <libkern/OSAtomic.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach-o/getsect.h>
|
||||
#include <mach-o/loader.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h> // for free()
|
||||
@ -43,193 +36,26 @@ extern "C" {
|
||||
#include <sys/ucontext.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// from <crt_externs.h>, but we don't have that file on iOS
|
||||
extern "C" {
|
||||
extern char ***_NSGetArgv(void);
|
||||
extern char ***_NSGetEnviron(void);
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
|
||||
bool PlatformHasDifferentMemcpyAndMemmove() {
|
||||
// On OS X 10.7 memcpy() and memmove() are both resolved
|
||||
// into memmove$VARIANT$sse42.
|
||||
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
|
||||
// See also https://github.com/google/sanitizers/issues/34.
|
||||
// TODO(glider): need to check dynamically that memcpy() and memmove() are
|
||||
// actually the same function.
|
||||
return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
|
||||
}
|
||||
|
||||
extern "C"
|
||||
void __asan_init();
|
||||
|
||||
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
|
||||
LowLevelAllocator allocator_for_env;
|
||||
|
||||
// Change the value of the env var |name|, leaking the original value.
|
||||
// If |name_value| is NULL, the variable is deleted from the environment,
|
||||
// otherwise the corresponding "NAME=value" string is replaced with
|
||||
// |name_value|.
|
||||
void LeakyResetEnv(const char *name, const char *name_value) {
|
||||
char **env = GetEnviron();
|
||||
uptr name_len = internal_strlen(name);
|
||||
while (*env != 0) {
|
||||
uptr len = internal_strlen(*env);
|
||||
if (len > name_len) {
|
||||
const char *p = *env;
|
||||
if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
|
||||
// Match.
|
||||
if (name_value) {
|
||||
// Replace the old value with the new one.
|
||||
*env = const_cast<char*>(name_value);
|
||||
} else {
|
||||
// Shift the subsequent pointers back.
|
||||
char **del = env;
|
||||
do {
|
||||
del[0] = del[1];
|
||||
} while (*del++);
|
||||
}
|
||||
}
|
||||
}
|
||||
env++;
|
||||
}
|
||||
}
|
||||
|
||||
static bool reexec_disabled = false;
|
||||
|
||||
void DisableReexec() {
|
||||
reexec_disabled = true;
|
||||
}
|
||||
|
||||
extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
|
||||
static const double kMinDyldVersionWithAutoInterposition = 360.0;
|
||||
|
||||
bool DyldNeedsEnvVariable() {
|
||||
// Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
|
||||
// still may want use them on older systems. On older Darwin platforms, dyld
|
||||
// doesn't export dyldVersionNumber symbol and we simply return true.
|
||||
if (!&dyldVersionNumber) return true;
|
||||
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
|
||||
// DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
|
||||
// GetMacosVersion() doesn't work for the simulator. Let's instead check
|
||||
// `dyldVersionNumber`, which is exported by dyld, against a known version
|
||||
// number from the first OS release where this appeared.
|
||||
return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
if (reexec_disabled) return;
|
||||
|
||||
// Make sure the dynamic ASan runtime library is preloaded so that the
|
||||
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
|
||||
// ourselves.
|
||||
Dl_info info;
|
||||
CHECK(dladdr((void*)((uptr)__asan_init), &info));
|
||||
char *dyld_insert_libraries =
|
||||
const_cast<char*>(GetEnv(kDyldInsertLibraries));
|
||||
uptr old_env_len = dyld_insert_libraries ?
|
||||
internal_strlen(dyld_insert_libraries) : 0;
|
||||
uptr fname_len = internal_strlen(info.dli_fname);
|
||||
const char *dylib_name = StripModuleName(info.dli_fname);
|
||||
uptr dylib_name_len = internal_strlen(dylib_name);
|
||||
|
||||
bool lib_is_in_env =
|
||||
dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
|
||||
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
|
||||
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
|
||||
// library.
|
||||
char program_name[1024];
|
||||
uint32_t buf_size = sizeof(program_name);
|
||||
_NSGetExecutablePath(program_name, &buf_size);
|
||||
char *new_env = const_cast<char*>(info.dli_fname);
|
||||
if (dyld_insert_libraries) {
|
||||
// Append the runtime dylib name to the existing value of
|
||||
// DYLD_INSERT_LIBRARIES.
|
||||
new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
|
||||
internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
|
||||
new_env[old_env_len] = ':';
|
||||
// Copy fname_len and add a trailing zero.
|
||||
internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
|
||||
fname_len + 1);
|
||||
// Ok to use setenv() since the wrappers don't depend on the value of
|
||||
// asan_inited.
|
||||
setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
|
||||
} else {
|
||||
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
|
||||
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
|
||||
}
|
||||
VReport(1, "exec()-ing the program with\n");
|
||||
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
|
||||
VReport(1, "to enable ASan wrappers.\n");
|
||||
execv(program_name, *_NSGetArgv());
|
||||
|
||||
// We get here only if execv() failed.
|
||||
Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
|
||||
"which is required for ASan to work. ASan tried to set the "
|
||||
"environment variable and re-execute itself, but execv() failed, "
|
||||
"possibly because of sandbox restrictions. Make sure to launch the "
|
||||
"executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
|
||||
CHECK("execv failed" && 0);
|
||||
}
|
||||
|
||||
if (!lib_is_in_env)
|
||||
return;
|
||||
|
||||
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
|
||||
// the dylib from the environment variable, because interceptors are installed
|
||||
// and we don't want our children to inherit the variable.
|
||||
|
||||
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
|
||||
// Allocate memory to hold the previous env var name, its value, the '='
|
||||
// sign and the '\0' char.
|
||||
char *new_env = (char*)allocator_for_env.Allocate(
|
||||
old_env_len + 2 + env_name_len);
|
||||
CHECK(new_env);
|
||||
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
|
||||
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
|
||||
new_env[env_name_len] = '=';
|
||||
char *new_env_pos = new_env + env_name_len + 1;
|
||||
|
||||
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
|
||||
char *piece_start = dyld_insert_libraries;
|
||||
char *piece_end = NULL;
|
||||
char *old_env_end = dyld_insert_libraries + old_env_len;
|
||||
do {
|
||||
if (piece_start[0] == ':') piece_start++;
|
||||
piece_end = REAL(strchr)(piece_start, ':');
|
||||
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
|
||||
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
|
||||
uptr piece_len = piece_end - piece_start;
|
||||
|
||||
char *filename_start =
|
||||
(char *)internal_memrchr(piece_start, '/', piece_len);
|
||||
uptr filename_len = piece_len;
|
||||
if (filename_start) {
|
||||
filename_start += 1;
|
||||
filename_len = piece_len - (filename_start - piece_start);
|
||||
} else {
|
||||
filename_start = piece_start;
|
||||
}
|
||||
|
||||
// If the current piece isn't the runtime library name,
|
||||
// append it to new_env.
|
||||
if ((dylib_name_len != filename_len) ||
|
||||
(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
|
||||
if (new_env_pos != new_env + env_name_len + 1) {
|
||||
new_env_pos[0] = ':';
|
||||
new_env_pos++;
|
||||
}
|
||||
internal_strncpy(new_env_pos, piece_start, piece_len);
|
||||
new_env_pos += piece_len;
|
||||
}
|
||||
// Move on to the next piece.
|
||||
piece_start = piece_end;
|
||||
} while (piece_start < old_env_end);
|
||||
|
||||
// Can't use setenv() here, because it requires the allocator to be
|
||||
// initialized.
|
||||
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
|
||||
// a separate function called after InitializeAllocator().
|
||||
if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
|
||||
LeakyResetEnv(kDyldInsertLibraries, new_env);
|
||||
}
|
||||
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
return 0;
|
||||
@ -241,6 +67,30 @@ void AsanCheckDynamicRTPrereqs() {}
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
// Find the Mach-O header for the image containing the needle
|
||||
Dl_info info;
|
||||
int err = dladdr(needle, &info);
|
||||
if (err == 0) return;
|
||||
|
||||
#if __LP64__
|
||||
const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase;
|
||||
#else
|
||||
const struct mach_header *mh = (struct mach_header *)info.dli_fbase;
|
||||
#endif
|
||||
|
||||
// Look up the __asan_globals section in that image and register its globals
|
||||
unsigned long size = 0;
|
||||
__asan_global *globals = (__asan_global *)getsectiondata(
|
||||
mh,
|
||||
"__DATA", "__asan_globals",
|
||||
&size);
|
||||
|
||||
if (!globals) return;
|
||||
if (size % sizeof(__asan_global) != 0) return;
|
||||
op(globals, size / sizeof(__asan_global));
|
||||
}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
@ -76,7 +76,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||
void *new_ptr = asan_malloc(size, &stack);
|
||||
void *new_ptr;
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
new_ptr = AllocateFromLocalPool(size);
|
||||
} else {
|
||||
copy_size = size;
|
||||
new_ptr = asan_malloc(copy_size, &stack);
|
||||
}
|
||||
internal_memcpy(new_ptr, ptr, copy_size);
|
||||
return new_ptr;
|
||||
}
|
||||
@ -96,7 +102,7 @@ INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
|
||||
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
|
||||
DTLS_on_libc_memalign(res, size * boundary);
|
||||
DTLS_on_libc_memalign(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -52,10 +52,6 @@ using namespace __asan;
|
||||
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||
#define COMMON_MALLOC_IGNORE_INVALID_FREE flags()->mac_ignore_invalid_free
|
||||
#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
|
||||
GET_STACK_TRACE_FREE; \
|
||||
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||
#define COMMON_MALLOC_NAMESPACE __asan
|
||||
|
||||
#include "sanitizer_common/sanitizer_malloc_mac.inc"
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
@ -46,6 +48,11 @@ void _free_dbg(void *ptr, int) {
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void _free_base(void *ptr) {
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void cfree(void *ptr) {
|
||||
CHECK(!"cfree() should not be used on Windows");
|
||||
@ -57,6 +64,11 @@ void *malloc(size_t size) {
|
||||
return asan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_malloc_base(size_t size) {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_malloc_dbg(size_t size, int, const char *, int) {
|
||||
return malloc(size);
|
||||
@ -68,6 +80,11 @@ void *calloc(size_t nmemb, size_t size) {
|
||||
return asan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_calloc_base(size_t nmemb, size_t size) {
|
||||
return calloc(nmemb, size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
|
||||
return calloc(nmemb, size);
|
||||
@ -90,6 +107,11 @@ void *_realloc_dbg(void *ptr, size_t size, int) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_realloc_base(void *ptr, size_t size) {
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_recalloc(void *p, size_t n, size_t elem_size) {
|
||||
if (!p)
|
||||
@ -101,7 +123,12 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
size_t _msize(void *ptr) {
|
||||
void *_recalloc_base(void *p, size_t n, size_t elem_size) {
|
||||
return _recalloc(p, n, elem_size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
size_t _msize(const void *ptr) {
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
(void)sp;
|
||||
return asan_malloc_usable_size(ptr, pc, bp);
|
||||
@ -137,38 +164,90 @@ int _CrtSetReportMode(int, int) {
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
|
||||
SIZE_T dwBytes) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *p = asan_malloc(dwBytes, &stack);
|
||||
// Reading MSDN suggests that the *entire* usable allocation is zeroed out.
|
||||
// Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
|
||||
// https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
|
||||
if (dwFlags == HEAP_ZERO_MEMORY)
|
||||
internal_memset(p, 0, asan_mz_size(p));
|
||||
else
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
return p;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) {
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_free(lpMem, &stack, FROM_MALLOC);
|
||||
return true;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags,
|
||||
LPVOID lpMem, SIZE_T dwBytes) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
// Realloc should never reallocate in place.
|
||||
if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY)
|
||||
return nullptr;
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
return asan_realloc(lpMem, dwBytes, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(SIZE_T, HeapSize, HANDLE hHeap, DWORD dwFlags,
|
||||
LPCVOID lpMem) {
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
(void)sp;
|
||||
return asan_malloc_usable_size(lpMem, pc, bp);
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static void TryToOverrideFunction(const char *fname, uptr new_func) {
|
||||
// Failure here is not fatal. The CRT may not be present, and different CRT
|
||||
// versions use different symbols.
|
||||
if (!__interception::OverrideFunction(fname, new_func))
|
||||
VPrintf(2, "Failed to override function %s\n", fname);
|
||||
}
|
||||
|
||||
void ReplaceSystemMalloc() {
|
||||
#if defined(ASAN_DYNAMIC)
|
||||
// We don't check the result because CRT might not be used in the process.
|
||||
__interception::OverrideFunction("free", (uptr)free);
|
||||
__interception::OverrideFunction("malloc", (uptr)malloc);
|
||||
__interception::OverrideFunction("_malloc_crt", (uptr)malloc);
|
||||
__interception::OverrideFunction("calloc", (uptr)calloc);
|
||||
__interception::OverrideFunction("_calloc_crt", (uptr)calloc);
|
||||
__interception::OverrideFunction("realloc", (uptr)realloc);
|
||||
__interception::OverrideFunction("_realloc_crt", (uptr)realloc);
|
||||
__interception::OverrideFunction("_recalloc", (uptr)_recalloc);
|
||||
__interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
|
||||
__interception::OverrideFunction("_msize", (uptr)_msize);
|
||||
__interception::OverrideFunction("_expand", (uptr)_expand);
|
||||
TryToOverrideFunction("free", (uptr)free);
|
||||
TryToOverrideFunction("_free_base", (uptr)free);
|
||||
TryToOverrideFunction("malloc", (uptr)malloc);
|
||||
TryToOverrideFunction("_malloc_base", (uptr)malloc);
|
||||
TryToOverrideFunction("_malloc_crt", (uptr)malloc);
|
||||
TryToOverrideFunction("calloc", (uptr)calloc);
|
||||
TryToOverrideFunction("_calloc_base", (uptr)calloc);
|
||||
TryToOverrideFunction("_calloc_crt", (uptr)calloc);
|
||||
TryToOverrideFunction("realloc", (uptr)realloc);
|
||||
TryToOverrideFunction("_realloc_base", (uptr)realloc);
|
||||
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
|
||||
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
|
||||
TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
|
||||
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
|
||||
TryToOverrideFunction("_msize", (uptr)_msize);
|
||||
TryToOverrideFunction("_expand", (uptr)_expand);
|
||||
TryToOverrideFunction("_expand_base", (uptr)_expand);
|
||||
|
||||
// Override different versions of 'operator new' and 'operator delete'.
|
||||
// No need to override the nothrow versions as they just wrap the throw
|
||||
// versions.
|
||||
// FIXME: Unfortunately, MSVC miscompiles the statements that take the
|
||||
// addresses of the array versions of these operators,
|
||||
// see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
|
||||
// We might want to try to work around this by [inline] assembly or compiling
|
||||
// parts of the RTL with Clang.
|
||||
void *(*op_new)(size_t sz) = operator new;
|
||||
void (*op_delete)(void *p) = operator delete;
|
||||
void *(*op_array_new)(size_t sz) = operator new[];
|
||||
void (*op_array_delete)(void *p) = operator delete[];
|
||||
__interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
|
||||
__interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
|
||||
__interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
|
||||
__interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
|
||||
// Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which
|
||||
// enable cross-module inlining. This means our _malloc_base hook won't catch
|
||||
// all CRT allocations. This code here patches the import table of
|
||||
// ucrtbase.dll so that all attempts to use the lower-level win32 heap
|
||||
// allocation API will be directed to ASan's heap. We don't currently
|
||||
// intercept all calls to HeapAlloc. If we did, we would have to check on
|
||||
// HeapFree whether the pointer came from ASan of from the system.
|
||||
#define INTERCEPT_UCRT_FUNCTION(func) \
|
||||
if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \
|
||||
"api-ms-win-core-heap-l1-1-0.dll", func)) \
|
||||
VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapAlloc);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapFree);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapReAlloc);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapSize);
|
||||
#undef INTERCEPT_UCRT_FUNCTION
|
||||
#endif
|
||||
}
|
||||
} // namespace __asan
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "asan_internal.h"
|
||||
|
||||
// The full explanation of the memory mapping could be found here:
|
||||
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
|
||||
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
|
||||
//
|
||||
// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000:
|
||||
// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem ||
|
||||
@ -85,6 +85,20 @@
|
||||
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
|
||||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
|
||||
//
|
||||
// Default Linux/S390 mapping:
|
||||
// || `[0x30000000, 0x7fffffff]` || HighMem ||
|
||||
// || `[0x26000000, 0x2fffffff]` || HighShadow ||
|
||||
// || `[0x24000000, 0x25ffffff]` || ShadowGap ||
|
||||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
|
||||
//
|
||||
// Default Linux/SystemZ mapping:
|
||||
// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem ||
|
||||
// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow ||
|
||||
// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap ||
|
||||
// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
|
||||
// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
|
||||
//
|
||||
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
|
||||
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
|
||||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
|
||||
@ -109,17 +123,19 @@
|
||||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
||||
|
||||
static const u64 kDefaultShadowScale = 3;
|
||||
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
||||
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
|
||||
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kIosShadowOffset64 = 0x130000000;
|
||||
static const u64 kIosShadowOffset64 = 0x120200000;
|
||||
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
|
||||
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
@ -136,28 +152,36 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
|
||||
# elif SANITIZER_WINDOWS
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset32
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# if SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset32
|
||||
# else
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# endif
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# endif
|
||||
#else
|
||||
# if defined(__aarch64__)
|
||||
# if SANITIZER_IOS
|
||||
# if SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset64
|
||||
# else
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# endif
|
||||
# elif defined(__aarch64__)
|
||||
# define SHADOW_OFFSET kAArch64_ShadowOffset64
|
||||
# elif defined(__powerpc64__)
|
||||
# define SHADOW_OFFSET kPPC64_ShadowOffset64
|
||||
# elif defined(__s390x__)
|
||||
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
|
||||
# elif SANITIZER_MAC
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset64
|
||||
# elif defined(__mips64)
|
||||
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset64
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset64
|
||||
# elif SANITIZER_WINDOWS64
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
|
||||
# endif
|
||||
@ -243,9 +267,25 @@ static inline bool AddrIsInMidMem(uptr a) {
|
||||
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInShadowGap(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
if (kMidMemBeg) {
|
||||
if (a <= kShadowGapEnd)
|
||||
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
|
||||
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
|
||||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
|
||||
}
|
||||
// In zero-based shadow mode we treat addresses near zero as addresses
|
||||
// in shadow gap as well.
|
||||
if (SHADOW_OFFSET == 0)
|
||||
return a <= kShadowGapEnd;
|
||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
|
||||
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
|
||||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
|
||||
}
|
||||
|
||||
static inline uptr MemToShadow(uptr p) {
|
||||
@ -269,21 +309,6 @@ static inline bool AddrIsInShadow(uptr a) {
|
||||
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
|
||||
}
|
||||
|
||||
static inline bool AddrIsInShadowGap(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
if (kMidMemBeg) {
|
||||
if (a <= kShadowGapEnd)
|
||||
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
|
||||
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
|
||||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
|
||||
}
|
||||
// In zero-based shadow mode we treat addresses near zero as addresses
|
||||
// in shadow gap as well.
|
||||
if (SHADOW_OFFSET == 0)
|
||||
return a <= kShadowGapEnd;
|
||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsAlignedByGranularity(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return (a & (SHADOW_GRANULARITY - 1)) == 0;
|
||||
|
98
libsanitizer/asan/asan_memory_profile.cc
Normal file
98
libsanitizer/asan/asan_memory_profile.cc
Normal file
@ -0,0 +1,98 @@
|
||||
//===-- asan_memory_profile.cc.cc -----------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// This file implements __sanitizer_print_memory_profile.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "asan/asan_allocator.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
|
||||
namespace __asan {
|
||||
|
||||
struct AllocationSite {
|
||||
u32 id;
|
||||
uptr total_size;
|
||||
uptr count;
|
||||
};
|
||||
|
||||
class HeapProfile {
|
||||
public:
|
||||
HeapProfile() : allocations_(1024) {}
|
||||
void Insert(u32 id, uptr size) {
|
||||
total_allocated_ += size;
|
||||
total_count_++;
|
||||
// Linear lookup will be good enough for most cases (although not all).
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
if (allocations_[i].id == id) {
|
||||
allocations_[i].total_size += size;
|
||||
allocations_[i].count++;
|
||||
return;
|
||||
}
|
||||
}
|
||||
allocations_.push_back({id, size, 1});
|
||||
}
|
||||
|
||||
void Print(uptr top_percent) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
CHECK(total_allocated_);
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
|
||||
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
auto &a = allocations_[i];
|
||||
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
|
||||
a.total_size * 100 / total_allocated_, a.count);
|
||||
StackDepotGet(a.id).Print();
|
||||
total_shown += a.total_size;
|
||||
if (total_shown * 100 / total_allocated_ > top_percent)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uptr total_allocated_ = 0;
|
||||
uptr total_count_ = 0;
|
||||
InternalMmapVector<AllocationSite> allocations_;
|
||||
};
|
||||
|
||||
static void ChunkCallback(uptr chunk, void *arg) {
|
||||
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
|
||||
AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
|
||||
if (!cv.IsAllocated()) return;
|
||||
u32 id = cv.GetAllocStackId();
|
||||
if (!id) return;
|
||||
hp->Insert(id, cv.UsedSize());
|
||||
}
|
||||
|
||||
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
|
||||
void *argument) {
|
||||
HeapProfile hp;
|
||||
__lsan::ForEachChunk(ChunkCallback, &hp);
|
||||
hp.Print(reinterpret_cast<uptr>(argument));
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(uptr top_percent) {
|
||||
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS
|
@ -18,9 +18,25 @@
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
// C++ operators can't have visibility attributes on Windows.
|
||||
// C++ operators can't have dllexport attributes on Windows. We export them
|
||||
// anyway by passing extra -export flags to the linker, which is exactly that
|
||||
// dllexport would normally do. We need to export them in order to make the
|
||||
// VS2015 dynamic CRT (MD) work.
|
||||
#if SANITIZER_WINDOWS
|
||||
# define CXX_OPERATOR_ATTRIBUTE
|
||||
# ifdef _WIN64
|
||||
# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new
|
||||
# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete
|
||||
# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete
|
||||
# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[]
|
||||
# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[]
|
||||
# else
|
||||
# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new
|
||||
# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete
|
||||
# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete
|
||||
# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[]
|
||||
# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[]
|
||||
# endif
|
||||
#else
|
||||
# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
|
||||
#endif
|
||||
@ -37,7 +53,7 @@ using namespace __asan; // NOLINT
|
||||
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
|
||||
|
||||
// This code has issues on OSX.
|
||||
// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
|
||||
// See https://github.com/google/sanitizers/issues/131.
|
||||
|
||||
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
|
||||
namespace std {
|
||||
|
@ -67,7 +67,7 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
||||
uptr page_size = GetPageSizeCached();
|
||||
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
|
||||
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
|
||||
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
|
||||
ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
|
||||
}
|
||||
|
||||
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
|
||||
@ -100,7 +100,7 @@ using namespace __asan; // NOLINT
|
||||
// that user program (un)poisons the memory it owns. It poisons memory
|
||||
// conservatively, and unpoisons progressively to make sure asan shadow
|
||||
// mapping invariant is preserved (see detailed mapping description here:
|
||||
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
|
||||
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
|
||||
//
|
||||
// * if user asks to poison region [left, right), the program poisons
|
||||
// at least [left, AlignDown(right)).
|
||||
@ -115,9 +115,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
|
||||
ShadowSegmentEndpoint beg(beg_addr);
|
||||
ShadowSegmentEndpoint end(end_addr);
|
||||
if (beg.chunk == end.chunk) {
|
||||
CHECK(beg.offset < end.offset);
|
||||
CHECK_LT(beg.offset, end.offset);
|
||||
s8 value = beg.value;
|
||||
CHECK(value == end.value);
|
||||
CHECK_EQ(value, end.value);
|
||||
// We can only poison memory if the byte in end.offset is unaddressable.
|
||||
// No need to re-poison memory if it is poisoned already.
|
||||
if (value > 0 && value <= end.offset) {
|
||||
@ -129,7 +129,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
|
||||
}
|
||||
return;
|
||||
}
|
||||
CHECK(beg.chunk < end.chunk);
|
||||
CHECK_LT(beg.chunk, end.chunk);
|
||||
if (beg.offset > 0) {
|
||||
// Mark bytes from beg.offset as unaddressable.
|
||||
if (beg.value == 0) {
|
||||
@ -155,9 +155,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
|
||||
ShadowSegmentEndpoint beg(beg_addr);
|
||||
ShadowSegmentEndpoint end(end_addr);
|
||||
if (beg.chunk == end.chunk) {
|
||||
CHECK(beg.offset < end.offset);
|
||||
CHECK_LT(beg.offset, end.offset);
|
||||
s8 value = beg.value;
|
||||
CHECK(value == end.value);
|
||||
CHECK_EQ(value, end.value);
|
||||
// We unpoison memory bytes up to enbytes up to end.offset if it is not
|
||||
// unpoisoned already.
|
||||
if (value != 0) {
|
||||
@ -165,7 +165,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
|
||||
}
|
||||
return;
|
||||
}
|
||||
CHECK(beg.chunk < end.chunk);
|
||||
CHECK_LT(beg.chunk, end.chunk);
|
||||
if (beg.offset > 0) {
|
||||
*beg.chunk = 0;
|
||||
beg.chunk++;
|
||||
@ -312,6 +312,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
||||
}
|
||||
}
|
||||
|
||||
void __asan_set_shadow_00(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0, size);
|
||||
}
|
||||
|
||||
void __asan_set_shadow_f1(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0xf1, size);
|
||||
}
|
||||
|
||||
void __asan_set_shadow_f2(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0xf2, size);
|
||||
}
|
||||
|
||||
void __asan_set_shadow_f3(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0xf3, size);
|
||||
}
|
||||
|
||||
void __asan_set_shadow_f5(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0xf5, size);
|
||||
}
|
||||
|
||||
void __asan_set_shadow_f8(uptr addr, uptr size) {
|
||||
REAL(memset)((void *)addr, 0xf8, size);
|
||||
}
|
||||
|
||||
void __asan_poison_stack_memory(uptr addr, uptr size) {
|
||||
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
|
||||
PoisonAlignedStackMemory(addr, size, true);
|
||||
@ -341,7 +365,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
||||
&stack);
|
||||
}
|
||||
CHECK_LE(end - beg,
|
||||
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
|
||||
FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
|
||||
|
||||
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
|
||||
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
|
||||
@ -352,7 +376,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
||||
// Make a quick sanity check that we are indeed in this state.
|
||||
//
|
||||
// FIXME: Two of these three checks are disabled until we fix
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=258.
|
||||
// https://github.com/google/sanitizers/issues/258.
|
||||
// if (d1 != d2)
|
||||
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
|
||||
if (a + granularity <= d1)
|
||||
|
@ -84,7 +84,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
|
||||
}
|
||||
}
|
||||
|
||||
// Calls __sanitizer::FlushUnneededShadowMemory() on
|
||||
// Calls __sanitizer::ReleaseMemoryToOS() on
|
||||
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
|
||||
void FlushUnneededASanShadowMemory(uptr p, uptr size);
|
||||
|
||||
|
@ -31,17 +31,39 @@
|
||||
|
||||
namespace __asan {
|
||||
|
||||
const char *DescribeSignalOrException(int signo) {
|
||||
switch (signo) {
|
||||
case SIGFPE:
|
||||
return "FPE";
|
||||
case SIGILL:
|
||||
return "ILL";
|
||||
case SIGABRT:
|
||||
return "ABRT";
|
||||
default:
|
||||
return "SEGV";
|
||||
}
|
||||
}
|
||||
|
||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
ScopedDeadlySignal signal_scope(GetCurrentThread());
|
||||
int code = (int)((siginfo_t*)siginfo)->si_code;
|
||||
// Write the first message using the bullet-proof write.
|
||||
if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
|
||||
// Write the first message using fd=2, just in case.
|
||||
// It may actually fail to write in case stderr is closed.
|
||||
internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
|
||||
SignalContext sig = SignalContext::Create(siginfo, context);
|
||||
|
||||
// Access at a reasonable offset above SP, or slightly below it (to account
|
||||
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
|
||||
// probably a stack overflow.
|
||||
#ifdef __s390__
|
||||
// On s390, the fault address in siginfo points to start of the page, not
|
||||
// to the precise word that was accessed. Mask off the low bits of sp to
|
||||
// take it into account.
|
||||
bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
|
||||
sig.addr < sig.sp + 0xFFFF;
|
||||
#else
|
||||
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
|
||||
#endif
|
||||
|
||||
#if __powerpc__
|
||||
// Large stack frames can be allocated with e.g.
|
||||
@ -73,10 +95,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
// unaligned memory access.
|
||||
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
|
||||
ReportStackOverflow(sig);
|
||||
else if (signo == SIGFPE)
|
||||
ReportDeadlySignal("FPE", sig);
|
||||
else
|
||||
ReportDeadlySignal("SEGV", sig);
|
||||
ReportDeadlySignal(signo, sig);
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{1
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,34 +23,28 @@ struct StackVarDescr {
|
||||
uptr name_len;
|
||||
};
|
||||
|
||||
struct AddressDescription {
|
||||
char *name;
|
||||
uptr name_size;
|
||||
uptr region_address;
|
||||
uptr region_size;
|
||||
const char *region_kind;
|
||||
};
|
||||
|
||||
// Returns the number of globals close to the provided address and copies
|
||||
// them to "globals" array.
|
||||
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
|
||||
int max_globals);
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
|
||||
|
||||
const char *MaybeDemangleGlobalName(const char *name);
|
||||
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
|
||||
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
|
||||
|
||||
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
|
||||
bool in_shadow, const char *after = "\n");
|
||||
|
||||
// The following functions prints address description depending
|
||||
// on the memory type (shadow/heap/stack/global).
|
||||
void DescribeHeapAddress(uptr addr, uptr access_size);
|
||||
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
|
||||
bool print = true);
|
||||
bool ParseFrameDescription(const char *frame_descr,
|
||||
InternalMmapVector<StackVarDescr> *vars);
|
||||
bool DescribeAddressIfStack(uptr addr, uptr access_size);
|
||||
void DescribeThread(AsanThreadContext *context);
|
||||
|
||||
// Different kinds of error reports.
|
||||
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
uptr access_size, u32 exp, bool fatal);
|
||||
void ReportStackOverflow(const SignalContext &sig);
|
||||
void ReportDeadlySignal(const char *description, const SignalContext &sig);
|
||||
void ReportDeadlySignal(int signo, const SignalContext &sig);
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
BufferedStackTrace *free_stack);
|
||||
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
|
||||
@ -75,8 +69,6 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
|
||||
const __asan_global *g2, u32 stack_id2);
|
||||
|
||||
// Mac-specific errors and warnings.
|
||||
void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
|
||||
const char *zone_name,
|
||||
BufferedStackTrace *stack);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "ubsan/ubsan_init.h"
|
||||
#include "ubsan/ubsan_platform.h"
|
||||
|
||||
uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
|
||||
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
|
||||
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
|
||||
|
||||
@ -84,8 +85,8 @@ void ShowStatsAndAbort() {
|
||||
// Reserve memory range [beg, end].
|
||||
// We need to use inclusive range because end+1 may not be representable.
|
||||
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
|
||||
CHECK_EQ((beg % GetPageSizeCached()), 0);
|
||||
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
|
||||
CHECK_EQ((beg % GetMmapGranularity()), 0);
|
||||
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
|
||||
uptr size = end - beg + 1;
|
||||
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
|
||||
void *res = MmapFixedNoReserve(beg, size, name);
|
||||
@ -261,6 +262,7 @@ static NOINLINE void force_interface_symbols() {
|
||||
volatile int fake_condition = 0; // prevent dead condition elimination.
|
||||
// __asan_report_* functions are noreturn, so we need a switch to prevent
|
||||
// the compiler from removing any of them.
|
||||
// clang-format off
|
||||
switch (fake_condition) {
|
||||
case 1: __asan_report_load1(0); break;
|
||||
case 2: __asan_report_load2(0); break;
|
||||
@ -300,7 +302,14 @@ static NOINLINE void force_interface_symbols() {
|
||||
case 37: __asan_unpoison_stack_memory(0, 0); break;
|
||||
case 38: __asan_region_is_poisoned(0, 0); break;
|
||||
case 39: __asan_describe_address(0); break;
|
||||
case 40: __asan_set_shadow_00(0, 0); break;
|
||||
case 41: __asan_set_shadow_f1(0, 0); break;
|
||||
case 42: __asan_set_shadow_f2(0, 0); break;
|
||||
case 43: __asan_set_shadow_f3(0, 0); break;
|
||||
case 44: __asan_set_shadow_f5(0, 0); break;
|
||||
case 45: __asan_set_shadow_f8(0, 0); break;
|
||||
}
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
static void asan_atexit() {
|
||||
@ -318,26 +327,39 @@ static void InitializeHighMemEnd() {
|
||||
kHighMemEnd = GetMaxVirtualAddress();
|
||||
// Increase kHighMemEnd to make sure it's properly
|
||||
// aligned together with kHighMemBeg:
|
||||
kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
|
||||
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
|
||||
#endif // !ASAN_FIXED_MAPPING
|
||||
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
|
||||
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
||||
}
|
||||
|
||||
static void ProtectGap(uptr addr, uptr size) {
|
||||
if (!flags()->protect_shadow_gap)
|
||||
if (!flags()->protect_shadow_gap) {
|
||||
// The shadow gap is unprotected, so there is a chance that someone
|
||||
// is actually using this memory. Which means it needs a shadow...
|
||||
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
|
||||
uptr GapShadowEnd =
|
||||
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
|
||||
if (Verbosity())
|
||||
Printf("protect_shadow_gap=0:"
|
||||
" not protecting shadow gap, allocating gap's shadow\n"
|
||||
"|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
|
||||
GapShadowEnd);
|
||||
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
|
||||
"unprotected gap shadow");
|
||||
return;
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
}
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
// A few pages at the start of the address space can not be protected.
|
||||
// But we really want to protect as much as possible, to prevent this memory
|
||||
// being returned as a result of a non-FIXED mmap().
|
||||
if (addr == kZeroBaseShadowStart) {
|
||||
uptr step = GetPageSizeCached();
|
||||
uptr step = GetMmapGranularity();
|
||||
while (size > step && addr < kZeroBaseMaxShadowStart) {
|
||||
addr += step;
|
||||
size -= step;
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
}
|
||||
@ -413,10 +435,13 @@ static void AsanInitInternal() {
|
||||
|
||||
AsanCheckIncompatibleRT();
|
||||
AsanCheckDynamicRTPrereqs();
|
||||
AvoidCVE_2016_2143();
|
||||
|
||||
SetCanPoisonMemory(flags()->poison_heap);
|
||||
SetMallocContextSize(common_flags()->malloc_context_size);
|
||||
|
||||
InitializePlatformExceptionHandlers();
|
||||
|
||||
InitializeHighMemEnd();
|
||||
|
||||
// Make sure we are not statically linked.
|
||||
@ -429,7 +454,6 @@ static void AsanInitInternal() {
|
||||
|
||||
__sanitizer_set_report_path(common_flags()->log_path);
|
||||
|
||||
// Enable UAR detection, if required.
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
flags()->detect_stack_use_after_return;
|
||||
|
||||
@ -448,7 +472,30 @@ static void AsanInitInternal() {
|
||||
|
||||
ReplaceSystemMalloc();
|
||||
|
||||
// Set the shadow memory address to uninitialized.
|
||||
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
|
||||
|
||||
uptr shadow_start = kLowShadowBeg;
|
||||
// Detect if a dynamic shadow address must used and find a available location
|
||||
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
|
||||
// expands to |__asan_shadow_memory_dynamic_address| which is
|
||||
// |kDefaultShadowSentinel|.
|
||||
if (shadow_start == kDefaultShadowSentinel) {
|
||||
__asan_shadow_memory_dynamic_address = 0;
|
||||
CHECK_EQ(0, kLowShadowBeg);
|
||||
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = 8 * granularity;
|
||||
uptr left_padding = granularity;
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
|
||||
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
}
|
||||
// Update the shadow memory address (potentially) used by instrumentation.
|
||||
__asan_shadow_memory_dynamic_address = shadow_start;
|
||||
|
||||
if (kLowShadowBeg)
|
||||
shadow_start -= GetMmapGranularity();
|
||||
bool full_shadow_is_available =
|
||||
@ -537,12 +584,12 @@ static void AsanInitInternal() {
|
||||
force_interface_symbols(); // no-op.
|
||||
SanitizerInitializeUnwinder();
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
}
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
#if CAN_SANITIZE_UB
|
||||
__ubsan::InitAsPlugin();
|
||||
@ -550,6 +597,15 @@ static void AsanInitInternal() {
|
||||
|
||||
InitializeSuppressions();
|
||||
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
// LateInitialize() calls dlsym, which can allocate an error string buffer
|
||||
// in the TLS. Let's ignore the allocation to avoid reporting a leak.
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
Symbolizer::LateInitialize();
|
||||
} else {
|
||||
Symbolizer::LateInitialize();
|
||||
}
|
||||
|
||||
VReport(1, "AddressSanitizer Init done\n");
|
||||
}
|
||||
|
||||
@ -579,6 +635,9 @@ static AsanInitializer asan_initializer;
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
void NOINLINE __asan_handle_no_return() {
|
||||
if (asan_init_is_running)
|
||||
return;
|
||||
|
||||
int local_stack;
|
||||
AsanThread *curr_thread = GetCurrentThread();
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
@ -603,7 +662,7 @@ void NOINLINE __asan_handle_no_return() {
|
||||
"stack top: %p; bottom %p; size: %p (%zd)\n"
|
||||
"False positive error reports may follow\n"
|
||||
"For details see "
|
||||
"http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
|
||||
"https://github.com/google/sanitizers/issues/189\n",
|
||||
top, bottom, top - bottom, top - bottom);
|
||||
return;
|
||||
}
|
||||
|
72
libsanitizer/asan/asan_scariness_score.h
Normal file
72
libsanitizer/asan/asan_scariness_score.h
Normal file
@ -0,0 +1,72 @@
|
||||
//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Compute the level of scariness of the error message.
|
||||
// Don't expect any deep science here, just a set of heuristics that suggest
|
||||
// that e.g. 1-byte-read-global-buffer-overflow is less scary than
|
||||
// 8-byte-write-stack-use-after-return.
|
||||
//
|
||||
// Every error report has one or more features, such as memory access size,
|
||||
// type (read or write), type of accessed memory (e.g. free-d heap, or a global
|
||||
// redzone), etc. Every such feature has an int score and a string description.
|
||||
// The overall score is the sum of all feature scores and the description
|
||||
// is a concatenation of feature descriptions.
|
||||
// Examples:
|
||||
// 17 (4-byte-read-heap-buffer-overflow)
|
||||
// 65 (multi-byte-write-stack-use-after-return)
|
||||
// 10 (null-deref)
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_SCARINESS_SCORE_H
|
||||
#define ASAN_SCARINESS_SCORE_H
|
||||
|
||||
#include "asan_flags.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
namespace __asan {
|
||||
struct ScarinessScoreBase {
|
||||
void Clear() {
|
||||
descr[0] = 0;
|
||||
score = 0;
|
||||
}
|
||||
void Scare(int add_to_score, const char *reason) {
|
||||
if (descr[0])
|
||||
internal_strlcat(descr, "-", sizeof(descr));
|
||||
internal_strlcat(descr, reason, sizeof(descr));
|
||||
score += add_to_score;
|
||||
};
|
||||
int GetScore() const { return score; }
|
||||
const char *GetDescription() const { return descr; }
|
||||
void Print() {
|
||||
if (score && flags()->print_scariness)
|
||||
Printf("SCARINESS: %d (%s)\n", score, descr);
|
||||
}
|
||||
static void PrintSimple(int score, const char *descr) {
|
||||
ScarinessScoreBase SSB;
|
||||
SSB.Clear();
|
||||
SSB.Scare(score, descr);
|
||||
SSB.Print();
|
||||
}
|
||||
|
||||
private:
|
||||
int score;
|
||||
char descr[1024];
|
||||
};
|
||||
|
||||
struct ScarinessScore : ScarinessScoreBase {
|
||||
ScarinessScore() {
|
||||
Clear();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_SCARINESS_SCORE_H
|
@ -46,7 +46,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
||||
uptr stack_top = t->stack_top();
|
||||
uptr stack_bottom = t->stack_bottom();
|
||||
ScopedUnwinding unwind_scope(t);
|
||||
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
|
||||
if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
|
||||
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom,
|
||||
fast);
|
||||
}
|
||||
} else if (!t && !fast) {
|
||||
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
|
||||
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
|
||||
|
@ -87,6 +87,7 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
|
||||
|
||||
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
|
||||
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
|
||||
CHECK(frames);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
const char *function_name = cur->info.function;
|
||||
if (!function_name) {
|
||||
|
@ -118,6 +118,77 @@ void AsanThread::Destroy() {
|
||||
DTLS_Destroy();
|
||||
}
|
||||
|
||||
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
|
||||
uptr size) {
|
||||
if (atomic_load(&stack_switching_, memory_order_relaxed)) {
|
||||
Report("ERROR: starting fiber switch while in fiber switch\n");
|
||||
Die();
|
||||
}
|
||||
|
||||
next_stack_bottom_ = bottom;
|
||||
next_stack_top_ = bottom + size;
|
||||
atomic_store(&stack_switching_, 1, memory_order_release);
|
||||
|
||||
FakeStack *current_fake_stack = fake_stack_;
|
||||
if (fake_stack_save)
|
||||
*fake_stack_save = fake_stack_;
|
||||
fake_stack_ = nullptr;
|
||||
SetTLSFakeStack(nullptr);
|
||||
// if fake_stack_save is null, the fiber will die, delete the fakestack
|
||||
if (!fake_stack_save && current_fake_stack)
|
||||
current_fake_stack->Destroy(this->tid());
|
||||
}
|
||||
|
||||
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
|
||||
uptr *bottom_old,
|
||||
uptr *size_old) {
|
||||
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
|
||||
Report("ERROR: finishing a fiber switch that has not started\n");
|
||||
Die();
|
||||
}
|
||||
|
||||
if (fake_stack_save) {
|
||||
SetTLSFakeStack(fake_stack_save);
|
||||
fake_stack_ = fake_stack_save;
|
||||
}
|
||||
|
||||
if (bottom_old)
|
||||
*bottom_old = stack_bottom_;
|
||||
if (size_old)
|
||||
*size_old = stack_top_ - stack_bottom_;
|
||||
stack_bottom_ = next_stack_bottom_;
|
||||
stack_top_ = next_stack_top_;
|
||||
atomic_store(&stack_switching_, 0, memory_order_release);
|
||||
next_stack_top_ = 0;
|
||||
next_stack_bottom_ = 0;
|
||||
}
|
||||
|
||||
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
||||
if (!atomic_load(&stack_switching_, memory_order_acquire))
|
||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
||||
char local;
|
||||
const uptr cur_stack = (uptr)&local;
|
||||
// Note: need to check next stack first, because FinishSwitchFiber
|
||||
// may be in process of overwriting stack_top_/bottom_. But in such case
|
||||
// we are already on the next stack.
|
||||
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
|
||||
return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
|
||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_top() {
|
||||
return GetStackBounds().top;
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_bottom() {
|
||||
return GetStackBounds().bottom;
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_size() {
|
||||
const auto bounds = GetStackBounds();
|
||||
return bounds.top - bounds.bottom;
|
||||
}
|
||||
|
||||
// We want to create the FakeStack lazyly on the first use, but not eralier
|
||||
// than the stack size is known and the procedure has to be async-signal safe.
|
||||
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
@ -148,6 +219,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
}
|
||||
|
||||
void AsanThread::Init() {
|
||||
next_stack_top_ = next_stack_bottom_ = 0;
|
||||
atomic_store(&stack_switching_, false, memory_order_release);
|
||||
fake_stack_ = nullptr; // Will be initialized lazily if needed.
|
||||
CHECK_EQ(this->stack_size(), 0U);
|
||||
SetThreadStackAndTls();
|
||||
@ -193,10 +266,12 @@ thread_return_t AsanThread::ThreadStart(
|
||||
|
||||
void AsanThread::SetThreadStackAndTls() {
|
||||
uptr tls_size = 0;
|
||||
GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
|
||||
&tls_size);
|
||||
stack_top_ = stack_bottom_ + stack_size_;
|
||||
uptr stack_size = 0;
|
||||
GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
|
||||
const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
|
||||
stack_top_ = stack_bottom_ + stack_size;
|
||||
tls_end_ = tls_begin_ + tls_size;
|
||||
dtls_ = DTLS_Get();
|
||||
|
||||
int local;
|
||||
CHECK(AddrIsInStack((uptr)&local));
|
||||
@ -247,6 +322,11 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AsanThread::AddrIsInStack(uptr addr) {
|
||||
const auto bounds = GetStackBounds();
|
||||
return addr >= bounds.bottom && addr < bounds.top;
|
||||
}
|
||||
|
||||
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
|
||||
void *addr) {
|
||||
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
|
||||
@ -269,7 +349,7 @@ AsanThread *GetCurrentThread() {
|
||||
// limits, so only do this magic on Android, and only if the found thread
|
||||
// is the main thread.
|
||||
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
|
||||
if (ThreadStackContainsAddress(tctx, &context)) {
|
||||
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
|
||||
SetCurrentThread(tctx->thread);
|
||||
return tctx->thread;
|
||||
}
|
||||
@ -320,8 +400,8 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end,
|
||||
uptr *cache_begin, uptr *cache_end) {
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
if (!t) return false;
|
||||
*stack_begin = t->stack_bottom();
|
||||
@ -331,6 +411,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
// ASan doesn't keep allocator caches in TLS, so these are unused.
|
||||
*cache_begin = 0;
|
||||
*cache_end = 0;
|
||||
*dtls = t->dtls();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -353,3 +434,33 @@ void EnsureMainThreadIDIsCorrect() {
|
||||
__asan::EnsureMainThreadIDIsCorrect();
|
||||
}
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
|
||||
uptr size) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_finish_switch_fiber(void* fakestack,
|
||||
const void **bottom_old,
|
||||
uptr *size_old) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->FinishSwitchFiber((FakeStack*)fakestack,
|
||||
(uptr*)bottom_old,
|
||||
(uptr*)size_old);
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,10 @@
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
struct DTLS;
|
||||
} // namespace __sanitizer
|
||||
|
||||
namespace __asan {
|
||||
|
||||
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
|
||||
@ -60,11 +64,12 @@ class AsanThread {
|
||||
thread_return_t ThreadStart(uptr os_id,
|
||||
atomic_uintptr_t *signal_thread_is_registered);
|
||||
|
||||
uptr stack_top() { return stack_top_; }
|
||||
uptr stack_bottom() { return stack_bottom_; }
|
||||
uptr stack_size() { return stack_size_; }
|
||||
uptr stack_top();
|
||||
uptr stack_bottom();
|
||||
uptr stack_size();
|
||||
uptr tls_begin() { return tls_begin_; }
|
||||
uptr tls_end() { return tls_end_; }
|
||||
DTLS *dtls() { return dtls_; }
|
||||
u32 tid() { return context_->tid; }
|
||||
AsanThreadContext *context() { return context_; }
|
||||
void set_context(AsanThreadContext *context) { context_ = context; }
|
||||
@ -76,9 +81,7 @@ class AsanThread {
|
||||
};
|
||||
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
|
||||
|
||||
bool AddrIsInStack(uptr addr) {
|
||||
return addr >= stack_bottom_ && addr < stack_top_;
|
||||
}
|
||||
bool AddrIsInStack(uptr addr);
|
||||
|
||||
void DeleteFakeStack(int tid) {
|
||||
if (!fake_stack_) return;
|
||||
@ -88,13 +91,20 @@ class AsanThread {
|
||||
t->Destroy(tid);
|
||||
}
|
||||
|
||||
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
|
||||
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
|
||||
uptr *size_old);
|
||||
|
||||
bool has_fake_stack() {
|
||||
return (reinterpret_cast<uptr>(fake_stack_) > 1);
|
||||
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
|
||||
(reinterpret_cast<uptr>(fake_stack_) > 1);
|
||||
}
|
||||
|
||||
FakeStack *fake_stack() {
|
||||
if (!__asan_option_detect_stack_use_after_return)
|
||||
return nullptr;
|
||||
if (atomic_load(&stack_switching_, memory_order_relaxed))
|
||||
return nullptr;
|
||||
if (!has_fake_stack())
|
||||
return AsyncSignalSafeLazyInitFakeStack();
|
||||
return fake_stack_;
|
||||
@ -120,16 +130,27 @@ class AsanThread {
|
||||
void ClearShadowForThreadStackAndTLS();
|
||||
FakeStack *AsyncSignalSafeLazyInitFakeStack();
|
||||
|
||||
struct StackBounds {
|
||||
uptr bottom;
|
||||
uptr top;
|
||||
};
|
||||
StackBounds GetStackBounds() const;
|
||||
|
||||
AsanThreadContext *context_;
|
||||
thread_callback_t start_routine_;
|
||||
void *arg_;
|
||||
|
||||
uptr stack_top_;
|
||||
uptr stack_bottom_;
|
||||
// stack_size_ == stack_top_ - stack_bottom_;
|
||||
// It needs to be set in a async-signal-safe manner.
|
||||
uptr stack_size_;
|
||||
// these variables are used when the thread is about to switch stack
|
||||
uptr next_stack_top_;
|
||||
uptr next_stack_bottom_;
|
||||
// true if switching is in progress
|
||||
atomic_uint8_t stack_switching_;
|
||||
|
||||
uptr tls_begin_;
|
||||
uptr tls_end_;
|
||||
DTLS *dtls_;
|
||||
|
||||
FakeStack *fake_stack_;
|
||||
AsanThreadLocalMallocStorage malloc_storage_;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
@ -34,7 +35,13 @@ int __asan_should_detect_stack_use_after_return() {
|
||||
return __asan_option_detect_stack_use_after_return;
|
||||
}
|
||||
|
||||
// -------------------- A workaround for the abscence of weak symbols ----- {{{
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __asan_get_shadow_memory_dynamic_address() {
|
||||
__asan_init();
|
||||
return __asan_shadow_memory_dynamic_address;
|
||||
}
|
||||
|
||||
// -------------------- A workaround for the absence of weak symbols ----- {{{
|
||||
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
|
||||
// use the /alternatename directive to tell the linker to default a specific
|
||||
// symbol to a specific value, which works nicely for allocator hooks and
|
||||
@ -44,21 +51,49 @@ void __sanitizer_default_free_hook(void *ptr) { }
|
||||
const char* __asan_default_default_options() { return ""; }
|
||||
const char* __asan_default_default_suppressions() { return ""; }
|
||||
void __asan_default_on_error() {}
|
||||
// 64-bit msvc will not prepend an underscore for symbols.
|
||||
#ifdef _WIN64
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
|
||||
#else
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
|
||||
#endif
|
||||
// }}}
|
||||
} // extern "C"
|
||||
|
||||
// ---------------------- Windows-specific inteceptors ---------------- {{{
|
||||
// ---------------------- Windows-specific interceptors ---------------- {{{
|
||||
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
|
||||
CHECK(REAL(RtlRaiseException));
|
||||
// This is a noreturn function, unless it's one of the exceptions raised to
|
||||
// communicate with the debugger, such as the one from OutputDebugString.
|
||||
if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
|
||||
__asan_handle_no_return();
|
||||
REAL(RtlRaiseException)(ExceptionRecord);
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(RaiseException));
|
||||
__asan_handle_no_return();
|
||||
REAL(RaiseException)(a, b, c, d);
|
||||
}
|
||||
|
||||
#ifdef _WIN64
|
||||
|
||||
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
|
||||
CHECK(REAL(__C_specific_handler));
|
||||
__asan_handle_no_return();
|
||||
return REAL(__C_specific_handler)(a, b, c, d);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler3));
|
||||
__asan_handle_no_return();
|
||||
@ -74,6 +109,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread*)arg;
|
||||
@ -99,52 +135,33 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
|
||||
asan_thread_start, t, thr_flags, tid);
|
||||
}
|
||||
|
||||
namespace {
|
||||
BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
|
||||
|
||||
void EnsureWorkerThreadRegistered() {
|
||||
// FIXME: GetCurrentThread relies on TSD, which might not play well with
|
||||
// system thread pools. We might want to use something like reference
|
||||
// counting to zero out GetCurrentThread() underlying storage when the last
|
||||
// work item finishes? Or can we disable reclaiming of threads in the pool?
|
||||
BlockingMutexLock l(&mu_for_thread_tracking);
|
||||
if (__asan::GetCurrentThread())
|
||||
return;
|
||||
|
||||
AsanThread *t = AsanThread::Create(
|
||||
/* start_routine */ nullptr, /* arg */ nullptr,
|
||||
/* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
|
||||
t->Init();
|
||||
asanThreadRegistry().StartThread(t->tid(), 0, 0);
|
||||
SetCurrentThread(t);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
|
||||
// NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
|
||||
// query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
|
||||
// System worker pool threads are created at arbitraty point in time and
|
||||
// without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
|
||||
// instead and don't register a specific parent_tid/stack.
|
||||
EnsureWorkerThreadRegistered();
|
||||
return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
|
||||
}
|
||||
|
||||
// }}}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(RaiseException);
|
||||
|
||||
#ifdef _WIN64
|
||||
ASAN_INTERCEPT_FUNC(__C_specific_handler);
|
||||
#else
|
||||
ASAN_INTERCEPT_FUNC(_except_handler3);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler4);
|
||||
#endif
|
||||
|
||||
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
|
||||
CHECK(::__interception::OverrideFunction(
|
||||
"NtWaitForWorkViaWorkerFactory",
|
||||
(uptr)WRAP(NtWaitForWorkViaWorkerFactory),
|
||||
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
|
||||
// Try to intercept kernel32!RaiseException, and if that fails, intercept
|
||||
// ntdll!RtlRaiseException instead.
|
||||
if (!::__interception::OverrideFunction("RaiseException",
|
||||
(uptr)WRAP(RaiseException),
|
||||
(uptr *)&REAL(RaiseException))) {
|
||||
CHECK(::__interception::OverrideFunction("RtlRaiseException",
|
||||
(uptr)WRAP(RtlRaiseException),
|
||||
(uptr *)&REAL(RtlRaiseException)));
|
||||
}
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{
|
||||
@ -173,14 +190,6 @@ void PlatformTSDDtor(void *tsd) {
|
||||
// }}}
|
||||
|
||||
// ---------------------- Various stuff ---------------- {{{
|
||||
void DisableReexec() {
|
||||
// No need to re-exec on Windows.
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
// No need to re-exec on Windows.
|
||||
}
|
||||
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
#if defined(_DEBUG)
|
||||
#error Please build the runtime with a non-debug CRT: /MD or /MT
|
||||
@ -200,20 +209,95 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
// Exception handler for dealing with shadow memory.
|
||||
static LONG CALLBACK
|
||||
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
|
||||
uptr page_size = GetPageSizeCached();
|
||||
// Only handle access violations.
|
||||
if (exception_pointers->ExceptionRecord->ExceptionCode !=
|
||||
EXCEPTION_ACCESS_VIOLATION) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
// Only handle access violations that land within the shadow memory.
|
||||
uptr addr =
|
||||
(uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]);
|
||||
|
||||
// Check valid shadow range.
|
||||
if (!AddrIsInShadow(addr)) return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// This is an access violation while trying to read from the shadow. Commit
|
||||
// the relevant page and let execution continue.
|
||||
|
||||
// Determine the address of the page that is being accessed.
|
||||
uptr page = RoundDownTo(addr, page_size);
|
||||
|
||||
// Query the existing page.
|
||||
MEMORY_BASIC_INFORMATION mem_info = {};
|
||||
if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0)
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// Commit the page.
|
||||
uptr result =
|
||||
(uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
|
||||
if (result != page) return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// The page mapping succeeded, so continue execution as usual.
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void InitializePlatformExceptionHandlers() {
|
||||
#if SANITIZER_WINDOWS64
|
||||
// On Win64, we map memory on demand with access violation handler.
|
||||
// Install our exception handler.
|
||||
CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler));
|
||||
#endif
|
||||
}
|
||||
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
||||
|
||||
// Check based on flags if we should report this exception.
|
||||
static bool ShouldReportDeadlyException(unsigned code) {
|
||||
switch (code) {
|
||||
case EXCEPTION_ACCESS_VIOLATION:
|
||||
case EXCEPTION_IN_PAGE_ERROR:
|
||||
return common_flags()->handle_segv;
|
||||
case EXCEPTION_BREAKPOINT:
|
||||
case EXCEPTION_ILLEGAL_INSTRUCTION: {
|
||||
return common_flags()->handle_sigill;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return the textual name for this exception.
|
||||
const char *DescribeSignalOrException(int signo) {
|
||||
unsigned code = signo;
|
||||
// Get the string description of the exception if this is a known deadly
|
||||
// exception.
|
||||
switch (code) {
|
||||
case EXCEPTION_ACCESS_VIOLATION:
|
||||
return "access-violation";
|
||||
case EXCEPTION_IN_PAGE_ERROR:
|
||||
return "in-page-error";
|
||||
case EXCEPTION_BREAKPOINT:
|
||||
return "breakpoint";
|
||||
case EXCEPTION_ILLEGAL_INSTRUCTION:
|
||||
return "illegal-instruction";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
||||
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
|
||||
CONTEXT *context = info->ContextRecord;
|
||||
|
||||
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
|
||||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
|
||||
const char *description =
|
||||
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
|
||||
? "access-violation"
|
||||
: "in-page-error";
|
||||
if (ShouldReportDeadlyException(exception_record->ExceptionCode)) {
|
||||
SignalContext sig = SignalContext::Create(exception_record, context);
|
||||
ReportDeadlySignal(description, sig);
|
||||
ReportDeadlySignal(exception_record->ExceptionCode, sig);
|
||||
}
|
||||
|
||||
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
|
||||
@ -248,10 +332,16 @@ int __asan_set_seh_filter() {
|
||||
}
|
||||
|
||||
#if !ASAN_DYNAMIC
|
||||
// Put a pointer to __asan_set_seh_filter at the end of the global list
|
||||
// of C initializers, after the default EH is set by the CRT.
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XIZ"))
|
||||
// The CRT runs initializers in this order:
|
||||
// - C initializers, from XIA to XIZ
|
||||
// - C++ initializers, from XCA to XCZ
|
||||
// Prior to 2015, the CRT set the unhandled exception filter at priority XIY,
|
||||
// near the end of C initialization. Starting in 2015, it was moved to the
|
||||
// beginning of C++ initialization. We set our priority to XCAB to run
|
||||
// immediately after the CRT runs. This way, our exception filter is called
|
||||
// first and we can delegate to their filter if appropriate.
|
||||
#pragma section(".CRT$XCAB", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XCAB"))
|
||||
int (*__intercept_seh)() = __asan_set_seh_filter;
|
||||
#endif
|
||||
// }}}
|
||||
|
@ -10,16 +10,16 @@
|
||||
// This file defines a family of thunks that should be statically linked into
|
||||
// the DLLs that have ASan instrumentation in order to delegate the calls to the
|
||||
// shared runtime that lives in the main binary.
|
||||
// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
|
||||
// details.
|
||||
// See https://github.com/google/sanitizers/issues/209 for the details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Only compile this code when buidling asan_dll_thunk.lib
|
||||
// Only compile this code when building asan_dll_thunk.lib
|
||||
// Using #ifdef rather than relying on Makefiles etc.
|
||||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DLL_THUNK
|
||||
#include "asan_init_version.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||
|
||||
// ---------- Function interception helper functions and macros ----------- {{{1
|
||||
extern "C" {
|
||||
@ -28,6 +28,8 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name);
|
||||
void abort();
|
||||
}
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
static uptr getRealProcAddressOrDie(const char *name) {
|
||||
uptr ret =
|
||||
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
|
||||
@ -196,9 +198,11 @@ static void InterceptHooks();
|
||||
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
|
||||
// want to call it in the __asan_init interceptor.
|
||||
WRAP_W_V(__asan_should_detect_stack_use_after_return)
|
||||
WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
|
||||
|
||||
extern "C" {
|
||||
int __asan_option_detect_stack_use_after_return;
|
||||
uptr __asan_shadow_memory_dynamic_address;
|
||||
|
||||
// Manually wrap __asan_init as we need to initialize
|
||||
// __asan_option_detect_stack_use_after_return afterwards.
|
||||
@ -212,7 +216,8 @@ extern "C" {
|
||||
fn();
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
(__asan_should_detect_stack_use_after_return() != 0);
|
||||
|
||||
__asan_shadow_memory_dynamic_address =
|
||||
(uptr)__asan_get_shadow_memory_dynamic_address();
|
||||
InterceptHooks();
|
||||
}
|
||||
}
|
||||
@ -255,6 +260,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
|
||||
INTERFACE_FUNCTION(__asan_memset);
|
||||
INTERFACE_FUNCTION(__asan_memmove);
|
||||
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_00);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f1);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f2);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f3);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f5);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f8);
|
||||
|
||||
INTERFACE_FUNCTION(__asan_alloca_poison);
|
||||
INTERFACE_FUNCTION(__asan_allocas_unpoison);
|
||||
|
||||
@ -309,8 +321,6 @@ INTERFACE_FUNCTION(__sanitizer_cov_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
|
||||
@ -324,6 +334,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
|
||||
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
|
||||
@ -333,6 +345,7 @@ INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
|
||||
@ -340,23 +353,31 @@ INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
|
||||
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
|
||||
|
||||
// TODO(timurrrr): Add more interface functions on the as-needed basis.
|
||||
|
||||
// ----------------- Memory allocation functions ---------------------
|
||||
WRAP_V_W(free)
|
||||
WRAP_V_W(_free_base)
|
||||
WRAP_V_WW(_free_dbg)
|
||||
|
||||
WRAP_W_W(malloc)
|
||||
WRAP_W_W(_malloc_base)
|
||||
WRAP_W_WWWW(_malloc_dbg)
|
||||
|
||||
WRAP_W_WW(calloc)
|
||||
WRAP_W_WW(_calloc_base)
|
||||
WRAP_W_WWWWW(_calloc_dbg)
|
||||
WRAP_W_WWW(_calloc_impl)
|
||||
|
||||
WRAP_W_WW(realloc)
|
||||
WRAP_W_WW(_realloc_base)
|
||||
WRAP_W_WWW(_realloc_dbg)
|
||||
WRAP_W_WWW(_recalloc)
|
||||
WRAP_W_WWW(_recalloc_base)
|
||||
|
||||
WRAP_W_W(_msize)
|
||||
WRAP_W_W(_expand)
|
||||
@ -369,6 +390,10 @@ WRAP_W_W(_expand_dbg)
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoi);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atol);
|
||||
|
||||
#ifdef _WIN64
|
||||
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
|
||||
#else
|
||||
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
|
||||
|
||||
// _except_handler4 checks -GS cookie which is different for each module, so we
|
||||
@ -377,10 +402,13 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(frexp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(longjmp);
|
||||
#if SANITIZER_INTERCEPT_MEMCHR
|
||||
INTERCEPT_LIBRARY_FUNCTION(memchr);
|
||||
#endif
|
||||
INTERCEPT_LIBRARY_FUNCTION(memcmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(memcpy);
|
||||
INTERCEPT_LIBRARY_FUNCTION(memmove);
|
||||
@ -390,12 +418,14 @@ INTERCEPT_LIBRARY_FUNCTION(strchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strdup);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncat);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncpy);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strnlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strrchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strstr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
@ -405,7 +435,9 @@ INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
// is defined.
|
||||
void InterceptHooks() {
|
||||
INTERCEPT_HOOKS();
|
||||
#ifndef _WIN64
|
||||
INTERCEPT_FUNCTION(_except_handler4);
|
||||
#endif
|
||||
}
|
||||
|
||||
// We want to call __asan_init before C/C++ initializers/constructors are
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
|
||||
//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
@ -14,11 +14,11 @@
|
||||
// This includes:
|
||||
// - forwarding the detect_stack_use_after_return runtime option
|
||||
// - working around deficiencies of the MD runtime
|
||||
// - installing a custom SEH handlerx
|
||||
// - installing a custom SEH handler
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
|
||||
// Only compile this code when building asan_dynamic_runtime_thunk.lib
|
||||
// Using #ifdef rather than relying on Makefiles etc.
|
||||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
@ -27,7 +27,7 @@
|
||||
|
||||
// First, declare CRT sections we'll be using in this file
|
||||
#pragma section(".CRT$XID", long, read) // NOLINT
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
#pragma section(".CRT$XCAB", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTW", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTY", long, read) // NOLINT
|
||||
|
||||
@ -40,12 +40,16 @@
|
||||
// attribute adds __imp_ prefix to the symbol name of a variable.
|
||||
// Since in general we don't know if a given TU is going to be used
|
||||
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
|
||||
// just to work around this issue, let's clone the a variable that is
|
||||
// constant after initialization anyways.
|
||||
// just to work around this issue, let's clone the variable that is constant
|
||||
// after initialization anyways.
|
||||
extern "C" {
|
||||
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
|
||||
int __asan_option_detect_stack_use_after_return =
|
||||
__asan_should_detect_stack_use_after_return();
|
||||
|
||||
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
|
||||
void* __asan_shadow_memory_dynamic_address =
|
||||
__asan_get_shadow_memory_dynamic_address();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -57,6 +61,7 @@ int __asan_option_detect_stack_use_after_return =
|
||||
// using atexit() that calls a small subset of C terminators
|
||||
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
|
||||
// are there.
|
||||
extern "C" int __cdecl atexit(void (__cdecl *f)(void));
|
||||
extern "C" void __cdecl _initterm(void *a, void *b);
|
||||
|
||||
namespace {
|
||||
@ -70,6 +75,7 @@ void UnregisterGlobals() {
|
||||
int ScheduleUnregisterGlobals() {
|
||||
return atexit(UnregisterGlobals);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
|
||||
// atexit() is initialized (.CRT$XIC). As this is executed before C++
|
||||
@ -78,8 +84,6 @@ int ScheduleUnregisterGlobals() {
|
||||
__declspec(allocate(".CRT$XID"))
|
||||
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
|
||||
|
||||
} // namespace
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// ASan SEH handling.
|
||||
// We need to set the ASan-specific SEH handler at the end of CRT initialization
|
||||
@ -90,7 +94,8 @@ static int SetSEHFilter() { return __asan_set_seh_filter(); }
|
||||
|
||||
// Unfortunately, putting a pointer to __asan_set_seh_filter into
|
||||
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
|
||||
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
|
||||
__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
|
||||
SetSEHFilter;
|
||||
}
|
||||
|
||||
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
|
@ -3,4 +3,4 @@
|
||||
# a separate file so that version updates don't involve re-running
|
||||
# automake.
|
||||
# CURRENT:REVISION:AGE
|
||||
3:0:0
|
||||
4:0:0
|
||||
|
169
libsanitizer/builtins/assembly.h
Normal file
169
libsanitizer/builtins/assembly.h
Normal file
@ -0,0 +1,169 @@
|
||||
/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
|
||||
*
|
||||
* The LLVM Compiler Infrastructure
|
||||
*
|
||||
* This file is dual licensed under the MIT and the University of Illinois Open
|
||||
* Source Licenses. See LICENSE.TXT for details.
|
||||
*
|
||||
* ===----------------------------------------------------------------------===
|
||||
*
|
||||
* This file defines macros for use in compiler-rt assembler source.
|
||||
* This file is not part of the interface of this library.
|
||||
*
|
||||
* ===----------------------------------------------------------------------===
|
||||
*/
|
||||
|
||||
#ifndef COMPILERRT_ASSEMBLY_H
|
||||
#define COMPILERRT_ASSEMBLY_H
|
||||
|
||||
#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
|
||||
#define SEPARATOR @
|
||||
#else
|
||||
#define SEPARATOR ;
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#define HIDDEN(name) .private_extern name
|
||||
#define LOCAL_LABEL(name) L_##name
|
||||
// tell linker it can break up file at label boundaries
|
||||
#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
|
||||
#define SYMBOL_IS_FUNC(name)
|
||||
#define CONST_SECTION .const
|
||||
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
#elif defined(__ELF__)
|
||||
|
||||
#define HIDDEN(name) .hidden name
|
||||
#define LOCAL_LABEL(name) .L_##name
|
||||
#define FILE_LEVEL_DIRECTIVE
|
||||
#if defined(__arm__)
|
||||
#define SYMBOL_IS_FUNC(name) .type name,%function
|
||||
#else
|
||||
#define SYMBOL_IS_FUNC(name) .type name,@function
|
||||
#endif
|
||||
#define CONST_SECTION .section .rodata
|
||||
|
||||
#if defined(__GNU__) || defined(__ANDROID__) || defined(__FreeBSD__)
|
||||
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
|
||||
#else
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
#endif
|
||||
|
||||
#else // !__APPLE__ && !__ELF__
|
||||
|
||||
#define HIDDEN(name)
|
||||
#define LOCAL_LABEL(name) .L ## name
|
||||
#define FILE_LEVEL_DIRECTIVE
|
||||
#define SYMBOL_IS_FUNC(name) \
|
||||
.def name SEPARATOR \
|
||||
.scl 2 SEPARATOR \
|
||||
.type 32 SEPARATOR \
|
||||
.endef
|
||||
#define CONST_SECTION .section .rdata,"rd"
|
||||
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__arm__)
|
||||
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
|
||||
#define ARM_HAS_BX
|
||||
#endif
|
||||
#if !defined(__ARM_FEATURE_CLZ) && \
|
||||
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
|
||||
#define __ARM_FEATURE_CLZ
|
||||
#endif
|
||||
|
||||
#ifdef ARM_HAS_BX
|
||||
#define JMP(r) bx r
|
||||
#define JMPc(r, c) bx##c r
|
||||
#else
|
||||
#define JMP(r) mov pc, r
|
||||
#define JMPc(r, c) mov##c pc, r
|
||||
#endif
|
||||
|
||||
// pop {pc} can't switch Thumb mode on ARMv4T
|
||||
#if __ARM_ARCH >= 5
|
||||
#define POP_PC() pop {pc}
|
||||
#else
|
||||
#define POP_PC() \
|
||||
pop {ip}; \
|
||||
JMP(ip)
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH_ISA_THUMB == 2
|
||||
#define IT(cond) it cond
|
||||
#define ITT(cond) itt cond
|
||||
#else
|
||||
#define IT(cond)
|
||||
#define ITT(cond)
|
||||
#endif
|
||||
|
||||
#if __ARM_ARCH_ISA_THUMB == 2
|
||||
#define WIDE(op) op.w
|
||||
#else
|
||||
#define WIDE(op) op
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define GLUE2(a, b) a##b
|
||||
#define GLUE(a, b) GLUE2(a, b)
|
||||
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
|
||||
|
||||
#ifdef VISIBILITY_HIDDEN
|
||||
#define DECLARE_SYMBOL_VISIBILITY(name) \
|
||||
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
|
||||
#else
|
||||
#define DECLARE_SYMBOL_VISIBILITY(name)
|
||||
#endif
|
||||
|
||||
#define DEFINE_COMPILERRT_FUNCTION(name) \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
|
||||
.thumb_func SEPARATOR \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
|
||||
.globl name SEPARATOR \
|
||||
SYMBOL_IS_FUNC(name) SEPARATOR \
|
||||
HIDDEN(name) SEPARATOR \
|
||||
name:
|
||||
|
||||
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
|
||||
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
|
||||
|
||||
#if defined(__ARM_EABI__)
|
||||
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
|
||||
DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
|
||||
#else
|
||||
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
|
||||
#endif
|
||||
|
||||
#ifdef __ELF__
|
||||
#define END_COMPILERRT_FUNCTION(name) \
|
||||
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
|
||||
#else
|
||||
#define END_COMPILERRT_FUNCTION(name)
|
||||
#endif
|
||||
|
||||
#endif /* COMPILERRT_ASSEMBLY_H */
|
6
libsanitizer/configure
vendored
6
libsanitizer/configure
vendored
@ -604,6 +604,7 @@ ac_subst_vars='am__EXEEXT_FALSE
|
||||
am__EXEEXT_TRUE
|
||||
LTLIBOBJS
|
||||
LIBOBJS
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS
|
||||
TSAN_TARGET_DEPENDENT_OBJECTS
|
||||
LIBBACKTRACE_SUPPORTED_FALSE
|
||||
LIBBACKTRACE_SUPPORTED_TRUE
|
||||
@ -12027,7 +12028,7 @@ else
|
||||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 12030 "configure"
|
||||
#line 12031 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
@ -12133,7 +12134,7 @@ else
|
||||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 12136 "configure"
|
||||
#line 12137 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
@ -16498,6 +16499,7 @@ fi
|
||||
|
||||
|
||||
|
||||
|
||||
cat >confcache <<\_ACEOF
|
||||
# This file is a shell script that caches the results of configure
|
||||
# tests run on this system so they can be shared between configure
|
||||
|
@ -375,5 +375,6 @@ _EOF
|
||||
fi
|
||||
|
||||
AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
|
||||
AC_SUBST([SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS])
|
||||
|
||||
AC_OUTPUT
|
||||
|
@ -20,12 +20,14 @@
|
||||
|
||||
# Filter out unsupported systems.
|
||||
TSAN_TARGET_DEPENDENT_OBJECTS=
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=
|
||||
case "${target}" in
|
||||
x86_64-*-linux* | i?86-*-linux*)
|
||||
if test x$ac_cv_sizeof_void_p = x8; then
|
||||
TSAN_SUPPORTED=yes
|
||||
LSAN_SUPPORTED=yes
|
||||
TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_amd64.lo
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_x86_64.lo
|
||||
fi
|
||||
;;
|
||||
powerpc*-*-linux*)
|
||||
|
@ -57,6 +57,23 @@ extern "C" {
|
||||
deallocation of "ptr". */
|
||||
void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
|
||||
void __sanitizer_free_hook(const volatile void *ptr);
|
||||
|
||||
/* Installs a pair of hooks for malloc/free.
|
||||
Several (currently, 5) hook pairs may be installed, they are executed
|
||||
in the order they were installed and after calling
|
||||
__sanitizer_malloc_hook/__sanitizer_free_hook.
|
||||
Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
|
||||
chained and do not rely on weak symbols working on the platform, but
|
||||
require __sanitizer_install_malloc_and_free_hooks to be called at startup
|
||||
and thus will not be called on malloc/free very early in the process.
|
||||
Returns the number of hooks currently installed or 0 on failure.
|
||||
Not thread-safe, should be called in the main thread before starting
|
||||
other threads.
|
||||
*/
|
||||
int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*malloc_hook)(const volatile void *, size_t),
|
||||
void (*free_hook)(const volatile void *));
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -39,6 +39,9 @@ extern "C" {
|
||||
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on. The reserved
|
||||
// parameter will be used in the future to hold a structure with functions
|
||||
@ -112,6 +115,16 @@ extern "C" {
|
||||
// Print the stack trace leading to this call. Useful for debugging user code.
|
||||
void __sanitizer_print_stack_trace();
|
||||
|
||||
// Symbolizes the supplied 'pc' using the format string 'fmt'.
|
||||
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
|
||||
// The format syntax is described in
|
||||
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
|
||||
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
|
||||
size_t out_buf_size);
|
||||
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
|
||||
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
|
||||
char *out_buf, size_t out_buf_size);
|
||||
|
||||
// Sets the callback to be called right before death on error.
|
||||
// Passing 0 will unset the callback.
|
||||
void __sanitizer_set_death_callback(void (*callback)(void));
|
||||
@ -123,9 +136,50 @@ extern "C" {
|
||||
// to know what is being passed to libc functions, e.g. memcmp.
|
||||
// FIXME: implement more hooks.
|
||||
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
|
||||
const void *s2, size_t n);
|
||||
const void *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n);
|
||||
const char *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void __sanitizer_weak_hook_memmem(void *called_pc,
|
||||
const void *s1, size_t len1,
|
||||
const void *s2, size_t len2, void *result);
|
||||
|
||||
// Prints stack traces for all live heap allocations ordered by total
|
||||
// allocation size until `top_percent` of total live heap is shown.
|
||||
// `top_percent` should be between 1 and 100.
|
||||
// Experimental feature currently available only with asan on Linux/x86_64.
|
||||
void __sanitizer_print_memory_profile(size_t top_percent);
|
||||
|
||||
// Fiber annotation interface.
|
||||
// Before switching to a different stack, one must call
|
||||
// __sanitizer_start_switch_fiber with a pointer to the bottom of the
|
||||
// destination stack and its size. When code starts running on the new stack,
|
||||
// it must call __sanitizer_finish_switch_fiber to finalize the switch.
|
||||
// The start_switch function takes a void** to store the current fake stack if
|
||||
// there is one (it is needed when detect_stack_use_after_return is enabled).
|
||||
// When restoring a stack, this pointer must be given to the finish_switch
|
||||
// function. In most cases, this void* can be stored on the stack just before
|
||||
// switching. When leaving a fiber definitely, null must be passed as first
|
||||
// argument to the start_switch function so that the fake stack is destroyed.
|
||||
// If you do not want support for stack use-after-return detection, you can
|
||||
// always pass null to these two functions.
|
||||
// Note that the fake stack mechanism is disabled during fiber switch, so if a
|
||||
// signal callback runs during the switch, it will not benefit from the stack
|
||||
// use-after-return detection.
|
||||
void __sanitizer_start_switch_fiber(void **fake_stack_save,
|
||||
const void *bottom, size_t size);
|
||||
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
|
||||
const void **bottom_old,
|
||||
size_t *size_old);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -56,6 +56,7 @@ extern "C" {
|
||||
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
|
||||
uintptr_t
|
||||
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
48
libsanitizer/include/sanitizer/esan_interface.h
Normal file
48
libsanitizer/include/sanitizer/esan_interface.h
Normal file
@ -0,0 +1,48 @@
|
||||
//===-- sanitizer/esan_interface.h ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of EfficiencySanitizer, a family of performance tuners.
|
||||
//
|
||||
// Public interface header.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ESAN_INTERFACE_H
|
||||
#define SANITIZER_ESAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
// We declare our interface routines as weak to allow the user to avoid
|
||||
// ifdefs and instead use this pattern to allow building the same sources
|
||||
// with and without our runtime library:
|
||||
// if (__esan_report)
|
||||
// __esan_report();
|
||||
#ifdef _MSC_VER
|
||||
/* selectany is as close to weak as we'll get. */
|
||||
#define COMPILER_RT_WEAK __declspec(selectany)
|
||||
#elif __GNUC__
|
||||
#define COMPILER_RT_WEAK __attribute__((weak))
|
||||
#else
|
||||
#define COMPILER_RT_WEAK
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// This function can be called mid-run (or at the end of a run for
|
||||
// a server process that doesn't shut down normally) to request that
|
||||
// data for that point in the run be reported from the tool.
|
||||
void COMPILER_RT_WEAK __esan_report();
|
||||
|
||||
// This function returns the number of samples that the esan tool has collected
|
||||
// to this point. This is useful for testing.
|
||||
unsigned int COMPILER_RT_WEAK __esan_get_sample_count();
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_ESAN_INTERFACE_H
|
@ -1833,6 +1833,17 @@
|
||||
__sanitizer_syscall_pre_impl_vfork()
|
||||
#define __sanitizer_syscall_post_vfork(res) \
|
||||
__sanitizer_syscall_post_impl_vfork(res)
|
||||
#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \
|
||||
__sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
|
||||
#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \
|
||||
__sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \
|
||||
(long)oldact)
|
||||
#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \
|
||||
__sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \
|
||||
(long)oldact, (long)sz)
|
||||
#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \
|
||||
__sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \
|
||||
(long)oldact, (long)sz)
|
||||
|
||||
// And now a few syscalls we don't handle yet.
|
||||
#define __sanitizer_syscall_pre_afs_syscall(...)
|
||||
@ -1887,7 +1898,6 @@
|
||||
#define __sanitizer_syscall_pre_query_module(...)
|
||||
#define __sanitizer_syscall_pre_readahead(...)
|
||||
#define __sanitizer_syscall_pre_readdir(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigaction(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigreturn(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigsuspend(...)
|
||||
#define __sanitizer_syscall_pre_security(...)
|
||||
@ -1901,7 +1911,6 @@
|
||||
#define __sanitizer_syscall_pre_setreuid32(...)
|
||||
#define __sanitizer_syscall_pre_set_thread_area(...)
|
||||
#define __sanitizer_syscall_pre_setuid32(...)
|
||||
#define __sanitizer_syscall_pre_sigaction(...)
|
||||
#define __sanitizer_syscall_pre_sigaltstack(...)
|
||||
#define __sanitizer_syscall_pre_sigreturn(...)
|
||||
#define __sanitizer_syscall_pre_sigsuspend(...)
|
||||
@ -1969,7 +1978,6 @@
|
||||
#define __sanitizer_syscall_post_query_module(res, ...)
|
||||
#define __sanitizer_syscall_post_readahead(res, ...)
|
||||
#define __sanitizer_syscall_post_readdir(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigaction(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
|
||||
#define __sanitizer_syscall_post_security(res, ...)
|
||||
@ -1983,7 +1991,6 @@
|
||||
#define __sanitizer_syscall_post_setreuid32(res, ...)
|
||||
#define __sanitizer_syscall_post_set_thread_area(res, ...)
|
||||
#define __sanitizer_syscall_post_setuid32(res, ...)
|
||||
#define __sanitizer_syscall_post_sigaction(res, ...)
|
||||
#define __sanitizer_syscall_post_sigaltstack(res, ...)
|
||||
#define __sanitizer_syscall_post_sigreturn(res, ...)
|
||||
#define __sanitizer_syscall_post_sigsuspend(res, ...)
|
||||
@ -3060,7 +3067,13 @@ void __sanitizer_syscall_pre_impl_fork();
|
||||
void __sanitizer_syscall_post_impl_fork(long res);
|
||||
void __sanitizer_syscall_pre_impl_vfork();
|
||||
void __sanitizer_syscall_post_impl_vfork(long res);
|
||||
|
||||
void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact);
|
||||
void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act,
|
||||
long oldact);
|
||||
void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act,
|
||||
long oldact, long sz);
|
||||
void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
|
||||
long oldact, long sz);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -169,6 +169,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
|
@ -90,8 +90,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
|
||||
|
||||
// Just a pair of pointers.
|
||||
struct interpose_substitution {
|
||||
const uptr replacement;
|
||||
const uptr original;
|
||||
const __sanitizer::uptr replacement;
|
||||
const __sanitizer::uptr original;
|
||||
};
|
||||
|
||||
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
|
||||
@ -156,10 +156,12 @@ const interpose_substitution substitution_##func_name[] \
|
||||
namespace __interception { \
|
||||
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
}
|
||||
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
|
||||
#else // __APPLE__
|
||||
# define REAL(x) x
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define ASSIGN_REAL(x, y)
|
||||
#endif // __APPLE__
|
||||
|
||||
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,31 @@ bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
|
||||
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
|
||||
uptr InternalGetProcAddress(void *module, const char *func_name);
|
||||
|
||||
// Overrides a function only when it is called from a specific DLL. For example,
|
||||
// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
|
||||
// affecting other third party libraries.
|
||||
bool OverrideImportedFunction(const char *module_to_patch,
|
||||
const char *imported_module,
|
||||
const char *function_name, uptr new_function,
|
||||
uptr *orig_old_func);
|
||||
|
||||
#if !SANITIZER_WINDOWS64
|
||||
// Exposed for unittests
|
||||
bool OverrideFunctionWithDetour(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
#endif
|
||||
|
||||
// Exposed for unittests
|
||||
bool OverrideFunctionWithRedirectJump(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
bool OverrideFunctionWithHotPatch(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
bool OverrideFunctionWithTrampoline(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
|
||||
// Exposed for unittests
|
||||
void TestOnlyReleaseTrampolineRegions();
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#if defined(INTERCEPTION_DYNAMIC_CRT)
|
||||
@ -48,5 +73,10 @@ uptr InternalGetProcAddress(void *module, const char *func_name);
|
||||
|
||||
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
|
||||
|
||||
#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
|
||||
::__interception::OverrideImportedFunction( \
|
||||
user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
|
||||
(::__interception::uptr *)&REAL(func))
|
||||
|
||||
#endif // INTERCEPTION_WIN_H
|
||||
#endif // _WIN32
|
||||
|
@ -211,6 +211,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
|
@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
|
@ -41,6 +41,7 @@ static void InitializeFlags() {
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
|
||||
cf.malloc_context_size = 30;
|
||||
cf.intercept_tls_get_addr = true;
|
||||
cf.detect_leaks = true;
|
||||
cf.exitcode = 23;
|
||||
OverrideCommonFlags(cf);
|
||||
@ -69,6 +70,7 @@ extern "C" void __lsan_init() {
|
||||
lsan_init_is_running = true;
|
||||
SanitizerToolName = "LeakSanitizer";
|
||||
CacheBinaryName();
|
||||
AvoidCVE_2016_2143();
|
||||
InitializeFlags();
|
||||
InitCommonLsan();
|
||||
InitializeAllocator();
|
||||
|
@ -22,8 +22,11 @@
|
||||
stack_top = t->stack_end(); \
|
||||
stack_bottom = t->stack_begin(); \
|
||||
} \
|
||||
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
|
||||
/* context */ 0, stack_top, stack_bottom, fast); \
|
||||
if (!SANITIZER_MIPS || \
|
||||
IsValidFrame(GET_CURRENT_FRAME(), stack_top, stack_bottom)) { \
|
||||
stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
|
||||
/* context */ 0, stack_top, stack_bottom, fast); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define GET_STACK_TRACE_FATAL \
|
||||
|
@ -41,10 +41,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
PrimaryAllocator;
|
||||
#else
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
static const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
|
||||
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
|
||||
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator<> SecondaryAllocator;
|
||||
@ -97,11 +104,13 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
memset(p, 0, size);
|
||||
RegisterAllocation(stack, p, size);
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
|
||||
RunMallocHooks(p, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
|
||||
RunFreeHooks(p);
|
||||
RegisterDeallocation(p);
|
||||
allocator.Deallocate(&cache, p);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_suppressions.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
namespace __lsan {
|
||||
@ -29,8 +30,17 @@ namespace __lsan {
|
||||
// also to protect the global list of root regions.
|
||||
BlockingMutex global_mutex(LINKER_INITIALIZED);
|
||||
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
THREADLOCAL int disable_counter;
|
||||
bool DisabledInThisThread() { return disable_counter > 0; }
|
||||
void DisableInThisThread() { disable_counter++; }
|
||||
void EnableInThisThread() {
|
||||
if (!disable_counter && common_flags()->detect_leaks) {
|
||||
Report("Unmatched call to __lsan_enable().\n");
|
||||
Die();
|
||||
}
|
||||
disable_counter--;
|
||||
}
|
||||
|
||||
Flags lsan_flags;
|
||||
|
||||
@ -183,9 +193,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
|
||||
LOG_THREADS("Processing thread %d.\n", os_id);
|
||||
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
|
||||
DTLS *dtls;
|
||||
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
|
||||
&tls_begin, &tls_end,
|
||||
&cache_begin, &cache_end);
|
||||
&cache_begin, &cache_end, &dtls);
|
||||
if (!thread_found) {
|
||||
// If a thread can't be found in the thread registry, it's probably in the
|
||||
// process of destruction. Log this event and move on.
|
||||
@ -209,9 +220,18 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
|
||||
if (sp < stack_begin || sp >= stack_end) {
|
||||
// SP is outside the recorded stack range (e.g. the thread is running a
|
||||
// signal handler on alternate stack). Again, consider the entire stack
|
||||
// range to be reachable.
|
||||
// signal handler on alternate stack, or swapcontext was used).
|
||||
// Again, consider the entire stack range to be reachable.
|
||||
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
|
||||
uptr page_size = GetPageSizeCached();
|
||||
int skipped = 0;
|
||||
while (stack_begin < stack_end &&
|
||||
!IsAccessibleMemoryRange(stack_begin, 1)) {
|
||||
skipped++;
|
||||
stack_begin += page_size;
|
||||
}
|
||||
LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
|
||||
skipped, stack_begin, stack_end);
|
||||
} else {
|
||||
// Shrink the stack range to ignore out-of-scope values.
|
||||
stack_begin = sp;
|
||||
@ -236,6 +256,17 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
if (tls_end > cache_end)
|
||||
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
|
||||
}
|
||||
if (dtls) {
|
||||
for (uptr j = 0; j < dtls->dtv_size; ++j) {
|
||||
uptr dtls_beg = dtls->dtv[j].beg;
|
||||
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
|
||||
if (dtls_beg < dtls_end) {
|
||||
LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
|
||||
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
|
||||
kReachable);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -414,6 +445,11 @@ static bool CheckForLeaks() {
|
||||
|
||||
if (!param.success) {
|
||||
Report("LeakSanitizer has encountered a fatal error.\n");
|
||||
Report(
|
||||
"HINT: For debugging, try setting environment variable "
|
||||
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
|
||||
Report(
|
||||
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
|
||||
Die();
|
||||
}
|
||||
param.leak_report.ApplySuppressions();
|
||||
@ -615,6 +651,13 @@ uptr LeakReport::UnsuppressedLeakCount() {
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
#else // CAN_SANITIZE_LEAKS
|
||||
namespace __lsan {
|
||||
void InitCommonLsan() { }
|
||||
void DoLeakCheck() { }
|
||||
void DisableInThisThread() { }
|
||||
void EnableInThisThread() { }
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
using namespace __lsan; // NOLINT
|
||||
@ -680,18 +723,14 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_disable() {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::disable_counter++;
|
||||
__lsan::DisableInThisThread();
|
||||
#endif
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_enable() {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
if (!__lsan::disable_counter && common_flags()->detect_leaks) {
|
||||
Report("Unmatched call to __lsan_enable().\n");
|
||||
Die();
|
||||
}
|
||||
__lsan::disable_counter--;
|
||||
__lsan::EnableInThisThread();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
class FlagParser;
|
||||
struct DTLS;
|
||||
}
|
||||
|
||||
namespace __lsan {
|
||||
@ -116,6 +117,16 @@ void InitCommonLsan();
|
||||
void DoLeakCheck();
|
||||
bool DisabledInThisThread();
|
||||
|
||||
// Used to implement __lsan::ScopedDisabler.
|
||||
void DisableInThisThread();
|
||||
void EnableInThisThread();
|
||||
// Can be used to ignore memory allocated by an intercepted
|
||||
// function.
|
||||
struct ScopedInterceptorDisabler {
|
||||
ScopedInterceptorDisabler() { DisableInThisThread(); }
|
||||
~ScopedInterceptorDisabler() { EnableInThisThread(); }
|
||||
};
|
||||
|
||||
// Special case for "new T[0]" where T is a type with DTOR.
|
||||
// new T[0] will allocate one word for the array size (0) and store a pointer
|
||||
// to the end of allocated chunk.
|
||||
@ -139,8 +150,8 @@ bool WordIsPoisoned(uptr addr);
|
||||
void LockThreadRegistry();
|
||||
void UnlockThreadRegistry();
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end,
|
||||
uptr *cache_begin, uptr *cache_end);
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls);
|
||||
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
|
||||
void *arg);
|
||||
// If called from the main thread, updates the main thread's TID in the thread
|
||||
|
@ -24,9 +24,8 @@
|
||||
namespace __lsan {
|
||||
|
||||
static const char kLinkerName[] = "ld";
|
||||
// We request 2 modules matching "ld", so we can print a warning if there's more
|
||||
// than one match. But only the first one is actually used.
|
||||
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
|
||||
|
||||
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
|
||||
static LoadedModule *linker = nullptr;
|
||||
|
||||
static bool IsLinker(const char* full_name) {
|
||||
@ -34,20 +33,24 @@ static bool IsLinker(const char* full_name) {
|
||||
}
|
||||
|
||||
void InitializePlatformSpecificModules() {
|
||||
internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
|
||||
uptr num_matches = GetListOfModules(
|
||||
reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
|
||||
if (num_matches == 1) {
|
||||
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
||||
return;
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
for (LoadedModule &module : modules) {
|
||||
if (!IsLinker(module.full_name())) continue;
|
||||
if (linker == nullptr) {
|
||||
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
||||
*linker = module;
|
||||
module = LoadedModule();
|
||||
} else {
|
||||
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
||||
"TLS will not be handled correctly.\n", kLinkerName);
|
||||
linker->clear();
|
||||
linker = nullptr;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (num_matches == 0)
|
||||
VReport(1, "LeakSanitizer: Dynamic linker not found. "
|
||||
"TLS will not be handled correctly.\n");
|
||||
else if (num_matches > 1)
|
||||
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
||||
"TLS will not be handled correctly.\n", kLinkerName);
|
||||
linker = nullptr;
|
||||
VReport(1, "LeakSanitizer: Dynamic linker not found. "
|
||||
"TLS will not be handled correctly.\n");
|
||||
}
|
||||
|
||||
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||
@ -66,7 +69,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
|
||||
if (begin <= allocator_begin && allocator_begin < end) {
|
||||
CHECK_LE(allocator_begin, allocator_end);
|
||||
CHECK_LT(allocator_end, end);
|
||||
CHECK_LE(allocator_end, end);
|
||||
if (begin < allocator_begin)
|
||||
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
|
||||
kReachable);
|
||||
@ -98,6 +101,7 @@ static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||
struct ProcessPlatformAllocParam {
|
||||
Frontier *frontier;
|
||||
StackDepotReverseMap *stack_depot_reverse_map;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
|
||||
@ -115,7 +119,8 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
||||
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
linker->containsAddress(caller_pc))) {
|
||||
m.set_tag(kReachable);
|
||||
param->frontier->push_back(chunk);
|
||||
}
|
||||
@ -140,10 +145,12 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
if (!flags()->use_tls) return;
|
||||
if (!linker) return;
|
||||
StackDepotReverseMap stack_depot_reverse_map;
|
||||
ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
|
||||
ProcessPlatformAllocParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
|
||||
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,10 @@ LSAN_FLAG(bool, use_tls, true,
|
||||
"Root set: include TLS and thread-specific storage")
|
||||
LSAN_FLAG(bool, use_root_regions, true,
|
||||
"Root set: include regions added via __lsan_register_root_region().")
|
||||
LSAN_FLAG(bool, use_ld_allocations, true,
|
||||
"Root set: mark as reachable all allocations made from dynamic "
|
||||
"linker. This was the old way to handle dynamic TLS, and will "
|
||||
"be removed soon. Do not use this flag.")
|
||||
|
||||
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
|
||||
LSAN_FLAG(bool, use_poisoned, false,
|
||||
|
@ -18,8 +18,10 @@
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_common.h"
|
||||
#include "lsan_thread.h"
|
||||
|
||||
using namespace __lsan;
|
||||
@ -102,6 +104,14 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
DTLS_on_libc_memalign(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, valloc, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
@ -172,11 +182,6 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
|
||||
OPERATOR_DELETE_BODY;
|
||||
}
|
||||
|
||||
// We need this to intercept the __libc_memalign calls that are used to
|
||||
// allocate dynamic TLS space in ld-linux.so.
|
||||
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
|
||||
ALIAS(WRAPPER_NAME(memalign));
|
||||
|
||||
///// Thread initialization and finalization. /////
|
||||
|
||||
static unsigned g_thread_finalize_key;
|
||||
@ -235,7 +240,15 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
||||
p.callback = callback;
|
||||
p.param = param;
|
||||
atomic_store(&p.tid, 0, memory_order_relaxed);
|
||||
int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
int res;
|
||||
{
|
||||
// Ignore all allocations made by pthread_create: thread stack/TLS may be
|
||||
// stored by pthread for future reuse even after thread destruction, and
|
||||
// the linked list it's stored in doesn't even hold valid pointers to the
|
||||
// objects, the latter are calculated by obscure pointer arithmetic.
|
||||
ScopedInterceptorDisabler disabler;
|
||||
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
}
|
||||
if (res == 0) {
|
||||
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
|
||||
CHECK_NE(tid, 0);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "lsan_allocator.h"
|
||||
|
||||
namespace __lsan {
|
||||
@ -33,7 +34,7 @@ static const uptr kMaxThreads = 1 << 13;
|
||||
static const uptr kThreadQuarantineSize = 64;
|
||||
|
||||
void InitializeThreadRegistry() {
|
||||
static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
|
||||
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
|
||||
thread_registry = new(thread_registry_placeholder)
|
||||
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
|
||||
}
|
||||
@ -47,18 +48,20 @@ void SetCurrentThread(u32 tid) {
|
||||
}
|
||||
|
||||
ThreadContext::ThreadContext(int tid)
|
||||
: ThreadContextBase(tid),
|
||||
stack_begin_(0),
|
||||
stack_end_(0),
|
||||
cache_begin_(0),
|
||||
cache_end_(0),
|
||||
tls_begin_(0),
|
||||
tls_end_(0) {}
|
||||
: ThreadContextBase(tid),
|
||||
stack_begin_(0),
|
||||
stack_end_(0),
|
||||
cache_begin_(0),
|
||||
cache_end_(0),
|
||||
tls_begin_(0),
|
||||
tls_end_(0),
|
||||
dtls_(nullptr) {}
|
||||
|
||||
struct OnStartedArgs {
|
||||
uptr stack_begin, stack_end,
|
||||
cache_begin, cache_end,
|
||||
tls_begin, tls_end;
|
||||
DTLS *dtls;
|
||||
};
|
||||
|
||||
void ThreadContext::OnStarted(void *arg) {
|
||||
@ -69,10 +72,12 @@ void ThreadContext::OnStarted(void *arg) {
|
||||
tls_end_ = args->tls_end;
|
||||
cache_begin_ = args->cache_begin;
|
||||
cache_end_ = args->cache_end;
|
||||
dtls_ = args->dtls;
|
||||
}
|
||||
|
||||
void ThreadContext::OnFinished() {
|
||||
AllocatorThreadFinish();
|
||||
DTLS_Destroy();
|
||||
}
|
||||
|
||||
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
|
||||
@ -89,6 +94,7 @@ void ThreadStart(u32 tid, uptr os_id) {
|
||||
args.stack_end = args.stack_begin + stack_size;
|
||||
args.tls_end = args.tls_begin + tls_size;
|
||||
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
|
||||
args.dtls = DTLS_Get();
|
||||
thread_registry->StartThread(tid, os_id, &args);
|
||||
}
|
||||
|
||||
@ -129,8 +135,8 @@ void EnsureMainThreadIDIsCorrect() {
|
||||
///// Interface to the common LSan module. /////
|
||||
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end,
|
||||
uptr *cache_begin, uptr *cache_end) {
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
ThreadContext *context = static_cast<ThreadContext *>(
|
||||
thread_registry->FindThreadContextByOsIDLocked(os_id));
|
||||
if (!context) return false;
|
||||
@ -140,6 +146,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
*tls_end = context->tls_end();
|
||||
*cache_begin = context->cache_begin();
|
||||
*cache_end = context->cache_end();
|
||||
*dtls = context->dtls();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,10 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
struct DTLS;
|
||||
}
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
class ThreadContext : public ThreadContextBase {
|
||||
@ -28,10 +32,13 @@ class ThreadContext : public ThreadContextBase {
|
||||
uptr tls_end() { return tls_end_; }
|
||||
uptr cache_begin() { return cache_begin_; }
|
||||
uptr cache_end() { return cache_end_; }
|
||||
DTLS *dtls() { return dtls_; }
|
||||
|
||||
private:
|
||||
uptr stack_begin_, stack_end_,
|
||||
cache_begin_, cache_end_,
|
||||
tls_begin_, tls_end_;
|
||||
DTLS *dtls_;
|
||||
};
|
||||
|
||||
void InitializeThreadRegistry();
|
||||
|
@ -72,6 +72,10 @@ merge lib/sanitizer_common sanitizer_common
|
||||
merge lib/interception interception
|
||||
merge lib/ubsan ubsan
|
||||
|
||||
# Need to merge lib/builtins/assembly.h file:
|
||||
mkdir -p builtins
|
||||
cp -v upstream/lib/builtins/assembly.h builtins/assembly.h
|
||||
|
||||
rm -rf upstream
|
||||
|
||||
# Update the MERGE file.
|
||||
|
@ -32,6 +32,7 @@ sanitizer_common_files = \
|
||||
sanitizer_libignore.cc \
|
||||
sanitizer_linux.cc \
|
||||
sanitizer_linux_libcdep.cc \
|
||||
sanitizer_linux_s390.cc \
|
||||
sanitizer_mac.cc \
|
||||
sanitizer_persistent_allocator.cc \
|
||||
sanitizer_platform_limits_linux.cc \
|
||||
@ -55,6 +56,7 @@ sanitizer_common_files = \
|
||||
sanitizer_symbolizer_libcdep.cc \
|
||||
sanitizer_symbolizer_posix_libcdep.cc \
|
||||
sanitizer_symbolizer_win.cc \
|
||||
sanitizer_termination.cc \
|
||||
sanitizer_thread_registry.cc \
|
||||
sanitizer_tls_get_addr.cc \
|
||||
sanitizer_unwind_linux_libcdep.cc \
|
||||
@ -62,6 +64,9 @@ sanitizer_common_files = \
|
||||
|
||||
|
||||
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
|
||||
EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
|
||||
libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
|
||||
libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
|
||||
|
||||
# Work around what appears to be a GNU make bug handling MAKEFLAGS
|
||||
# values defined in terms of make variables, as is the case for CC and
|
||||
|
@ -79,7 +79,7 @@ CONFIG_HEADER = $(top_builddir)/config.h
|
||||
CONFIG_CLEAN_FILES =
|
||||
CONFIG_CLEAN_VPATH_FILES =
|
||||
LTLIBRARIES = $(noinst_LTLIBRARIES)
|
||||
libsanitizer_common_la_LIBADD =
|
||||
am__DEPENDENCIES_1 =
|
||||
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
||||
sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
|
||||
sanitizer_coverage_mapping_libcdep.lo \
|
||||
@ -87,8 +87,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
||||
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
|
||||
sanitizer_flag_parser.lo sanitizer_libc.lo \
|
||||
sanitizer_libignore.lo sanitizer_linux.lo \
|
||||
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
|
||||
sanitizer_persistent_allocator.lo \
|
||||
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
|
||||
sanitizer_mac.lo sanitizer_persistent_allocator.lo \
|
||||
sanitizer_platform_limits_linux.lo \
|
||||
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
|
||||
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
|
||||
@ -102,15 +102,20 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
||||
sanitizer_symbolizer_libbacktrace.lo \
|
||||
sanitizer_symbolizer_libcdep.lo \
|
||||
sanitizer_symbolizer_posix_libcdep.lo \
|
||||
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
|
||||
sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
|
||||
sanitizer_win.lo
|
||||
sanitizer_symbolizer_win.lo sanitizer_termination.lo \
|
||||
sanitizer_thread_registry.lo sanitizer_tls_get_addr.lo \
|
||||
sanitizer_unwind_linux_libcdep.lo sanitizer_win.lo
|
||||
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
|
||||
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
|
||||
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
|
||||
depcomp = $(SHELL) $(top_srcdir)/../depcomp
|
||||
am__depfiles_maybe = depfiles
|
||||
am__mv = mv -f
|
||||
CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
|
||||
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
|
||||
LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
|
||||
--mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
|
||||
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
|
||||
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
|
||||
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
|
||||
LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
|
||||
@ -120,7 +125,17 @@ CXXLD = $(CXX)
|
||||
CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
|
||||
--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
|
||||
$(LDFLAGS) -o $@
|
||||
SOURCES = $(libsanitizer_common_la_SOURCES)
|
||||
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
|
||||
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
|
||||
LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
|
||||
--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
|
||||
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
|
||||
CCLD = $(CC)
|
||||
LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
|
||||
--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
|
||||
$(LDFLAGS) -o $@
|
||||
SOURCES = $(libsanitizer_common_la_SOURCES) \
|
||||
$(EXTRA_libsanitizer_common_la_SOURCES)
|
||||
am__can_run_installinfo = \
|
||||
case $$AM_UPDATE_INFO_DIR in \
|
||||
n|no|NO) false;; \
|
||||
@ -198,6 +213,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
PATH_SEPARATOR = @PATH_SEPARATOR@
|
||||
RANLIB = @RANLIB@
|
||||
RPC_DEFS = @RPC_DEFS@
|
||||
SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
|
||||
SED = @SED@
|
||||
SET_MAKE = @SET_MAKE@
|
||||
SHELL = @SHELL@
|
||||
@ -296,6 +312,7 @@ sanitizer_common_files = \
|
||||
sanitizer_libignore.cc \
|
||||
sanitizer_linux.cc \
|
||||
sanitizer_linux_libcdep.cc \
|
||||
sanitizer_linux_s390.cc \
|
||||
sanitizer_mac.cc \
|
||||
sanitizer_persistent_allocator.cc \
|
||||
sanitizer_platform_limits_linux.cc \
|
||||
@ -319,12 +336,16 @@ sanitizer_common_files = \
|
||||
sanitizer_symbolizer_libcdep.cc \
|
||||
sanitizer_symbolizer_posix_libcdep.cc \
|
||||
sanitizer_symbolizer_win.cc \
|
||||
sanitizer_termination.cc \
|
||||
sanitizer_thread_registry.cc \
|
||||
sanitizer_tls_get_addr.cc \
|
||||
sanitizer_unwind_linux_libcdep.cc \
|
||||
sanitizer_win.cc
|
||||
|
||||
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
|
||||
EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
|
||||
libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
|
||||
libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
|
||||
|
||||
# Work around what appears to be a GNU make bug handling MAKEFLAGS
|
||||
# values defined in terms of make variables, as is the case for CC and
|
||||
@ -368,7 +389,7 @@ MAKEOVERRIDES =
|
||||
all: all-am
|
||||
|
||||
.SUFFIXES:
|
||||
.SUFFIXES: .cc .lo .o .obj
|
||||
.SUFFIXES: .S .cc .lo .o .obj
|
||||
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
|
||||
@for dep in $?; do \
|
||||
case '$(am__configure_deps)' in \
|
||||
@ -430,6 +451,9 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_mips64.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
|
||||
@ -453,11 +477,33 @@ distclean-compile:
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_termination.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
|
||||
|
||||
.S.o:
|
||||
@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
|
||||
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
|
||||
@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
|
||||
|
||||
.S.obj:
|
||||
@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
|
||||
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
|
||||
@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
|
||||
|
||||
.S.lo:
|
||||
@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
|
||||
@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
|
||||
@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
|
||||
@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
|
||||
|
||||
.cc.o:
|
||||
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
|
||||
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
|
||||
|
@ -11,39 +11,72 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// ThreadSanitizer for Go uses libc malloc/free.
|
||||
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
|
||||
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
# if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
extern "C" void *__libc_malloc(uptr size);
|
||||
# if !SANITIZER_GO
|
||||
extern "C" void *__libc_memalign(uptr alignment, uptr size);
|
||||
# endif
|
||||
extern "C" void *__libc_realloc(void *ptr, uptr size);
|
||||
extern "C" void __libc_free(void *ptr);
|
||||
# define LIBC_MALLOC __libc_malloc
|
||||
# define LIBC_FREE __libc_free
|
||||
# else
|
||||
# include <stdlib.h>
|
||||
# define LIBC_MALLOC malloc
|
||||
# define LIBC_FREE free
|
||||
# define __libc_malloc malloc
|
||||
# if !SANITIZER_GO
|
||||
static void *__libc_memalign(uptr alignment, uptr size) {
|
||||
void *p;
|
||||
uptr error = posix_memalign(&p, alignment, size);
|
||||
if (error) return nullptr;
|
||||
return p;
|
||||
}
|
||||
# endif
|
||||
# define __libc_realloc realloc
|
||||
# define __libc_free free
|
||||
# endif
|
||||
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
uptr alignment) {
|
||||
(void)cache;
|
||||
return LIBC_MALLOC(size);
|
||||
#if !SANITIZER_GO
|
||||
if (alignment == 0)
|
||||
return __libc_malloc(size);
|
||||
else
|
||||
return __libc_memalign(alignment, size);
|
||||
#else
|
||||
// Windows does not provide __libc_memalign/posix_memalign. It provides
|
||||
// __aligned_malloc, but the allocated blocks can't be passed to free,
|
||||
// they need to be passed to __aligned_free. InternalAlloc interface does
|
||||
// not account for such requirement. Alignemnt does not seem to be used
|
||||
// anywhere in runtime, so just call __libc_malloc for now.
|
||||
DCHECK_EQ(alignment, 0);
|
||||
return __libc_malloc(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
InternalAllocatorCache *cache) {
|
||||
(void)cache;
|
||||
return __libc_realloc(ptr, size);
|
||||
}
|
||||
|
||||
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
(void)cache;
|
||||
LIBC_FREE(ptr);
|
||||
__libc_free(ptr);
|
||||
}
|
||||
|
||||
InternalAllocator *internal_allocator() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else // SANITIZER_GO
|
||||
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
|
||||
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
|
||||
static atomic_uint8_t internal_allocator_initialized;
|
||||
@ -66,13 +99,26 @@ InternalAllocator *internal_allocator() {
|
||||
return internal_allocator_instance;
|
||||
}
|
||||
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
uptr alignment) {
|
||||
if (alignment == 0) alignment = 8;
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
|
||||
false);
|
||||
return internal_allocator()->Allocate(&internal_allocator_cache, size,
|
||||
alignment, false);
|
||||
}
|
||||
return internal_allocator()->Allocate(cache, size, 8, false);
|
||||
return internal_allocator()->Allocate(cache, size, alignment, false);
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
InternalAllocatorCache *cache) {
|
||||
uptr alignment = 8;
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
|
||||
size, alignment);
|
||||
}
|
||||
return internal_allocator()->Reallocate(cache, ptr, size, alignment);
|
||||
}
|
||||
|
||||
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
@ -83,20 +129,42 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
internal_allocator()->Deallocate(cache, ptr);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_GO
|
||||
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
|
||||
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
|
||||
if (size + sizeof(u64) < size)
|
||||
return nullptr;
|
||||
void *p = RawInternalAlloc(size + sizeof(u64), cache);
|
||||
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
((u64*)p)[0] = kBlockMagic;
|
||||
return (char*)p + sizeof(u64);
|
||||
}
|
||||
|
||||
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||
if (!addr)
|
||||
return InternalAlloc(size, cache);
|
||||
if (size + sizeof(u64) < size)
|
||||
return nullptr;
|
||||
addr = (char*)addr - sizeof(u64);
|
||||
size = size + sizeof(u64);
|
||||
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
|
||||
void *p = RawInternalRealloc(addr, size, cache);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
return (char*)p + sizeof(u64);
|
||||
}
|
||||
|
||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||
if (CallocShouldReturnNullDueToOverflow(count, size))
|
||||
return internal_allocator()->ReturnNullOrDieOnBadRequest();
|
||||
void *p = InternalAlloc(count * size, cache);
|
||||
if (p) internal_memset(p, 0, count * size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||
if (!addr)
|
||||
return;
|
||||
@ -138,7 +206,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
|
||||
return (max / size) < n;
|
||||
}
|
||||
|
||||
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||
static atomic_uint8_t reporting_out_of_memory = {0};
|
||||
|
||||
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
|
||||
|
||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
|
||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||
SanitizerToolName);
|
||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||
|
File diff suppressed because it is too large
Load Diff
100
libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h
Normal file
100
libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h
Normal file
@ -0,0 +1,100 @@
|
||||
//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Maps integers in rage [0, kSize) to u8 values.
|
||||
template<u64 kSize>
|
||||
class FlatByteMap {
|
||||
public:
|
||||
void TestOnlyInit() {
|
||||
internal_memset(map_, 0, sizeof(map_));
|
||||
}
|
||||
|
||||
void set(uptr idx, u8 val) {
|
||||
CHECK_LT(idx, kSize);
|
||||
CHECK_EQ(0U, map_[idx]);
|
||||
map_[idx] = val;
|
||||
}
|
||||
u8 operator[] (uptr idx) {
|
||||
CHECK_LT(idx, kSize);
|
||||
// FIXME: CHECK may be too expensive here.
|
||||
return map_[idx];
|
||||
}
|
||||
private:
|
||||
u8 map_[kSize];
|
||||
};
|
||||
|
||||
// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
|
||||
// It is implemented as a two-dimensional array: array of kSize1 pointers
|
||||
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
|
||||
// Each value is initially zero and can be set to something else only once.
|
||||
// Setting and getting values from multiple threads is safe w/o extra locking.
|
||||
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
class TwoLevelByteMap {
|
||||
public:
|
||||
void TestOnlyInit() {
|
||||
internal_memset(map1_, 0, sizeof(map1_));
|
||||
mu_.Init();
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
for (uptr i = 0; i < kSize1; i++) {
|
||||
u8 *p = Get(i);
|
||||
if (!p) continue;
|
||||
MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
|
||||
UnmapOrDie(p, kSize2);
|
||||
}
|
||||
}
|
||||
|
||||
uptr size() const { return kSize1 * kSize2; }
|
||||
uptr size1() const { return kSize1; }
|
||||
uptr size2() const { return kSize2; }
|
||||
|
||||
void set(uptr idx, u8 val) {
|
||||
CHECK_LT(idx, kSize1 * kSize2);
|
||||
u8 *map2 = GetOrCreate(idx / kSize2);
|
||||
CHECK_EQ(0U, map2[idx % kSize2]);
|
||||
map2[idx % kSize2] = val;
|
||||
}
|
||||
|
||||
u8 operator[] (uptr idx) const {
|
||||
CHECK_LT(idx, kSize1 * kSize2);
|
||||
u8 *map2 = Get(idx / kSize2);
|
||||
if (!map2) return 0;
|
||||
return map2[idx % kSize2];
|
||||
}
|
||||
|
||||
private:
|
||||
u8 *Get(uptr idx) const {
|
||||
CHECK_LT(idx, kSize1);
|
||||
return reinterpret_cast<u8 *>(
|
||||
atomic_load(&map1_[idx], memory_order_acquire));
|
||||
}
|
||||
|
||||
u8 *GetOrCreate(uptr idx) {
|
||||
u8 *res = Get(idx);
|
||||
if (!res) {
|
||||
SpinMutexLock l(&mu_);
|
||||
if (!(res = Get(idx))) {
|
||||
res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
|
||||
MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
|
||||
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
|
||||
memory_order_release);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
atomic_uintptr_t map1_[kSize1];
|
||||
StaticSpinMutex mu_;
|
||||
};
|
209
libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
Normal file
209
libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
Normal file
@ -0,0 +1,209 @@
|
||||
//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// This class implements a complete memory allocator by using two
|
||||
// internal allocators:
|
||||
// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
|
||||
// When allocating 2^x bytes it should return 2^x aligned chunk.
|
||||
// PrimaryAllocator is used via a local AllocatorCache.
|
||||
// SecondaryAllocator can allocate anything, but is not efficient.
|
||||
template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator> // NOLINT
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
void InitCommon(bool may_return_null) {
|
||||
primary_.Init();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
secondary_.InitLinkerInitialized(may_return_null);
|
||||
stats_.InitLinkerInitialized();
|
||||
InitCommon(may_return_null);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
secondary_.Init(may_return_null);
|
||||
stats_.Init();
|
||||
InitCommon(may_return_null);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
|
||||
bool cleared = false, bool check_rss_limit = false) {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
|
||||
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
|
||||
if (alignment > 8)
|
||||
size = RoundUpTo(size, alignment);
|
||||
void *res;
|
||||
bool from_primary = primary_.CanAllocate(size, alignment);
|
||||
if (from_primary)
|
||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||
else
|
||||
res = secondary_.Allocate(&stats_, size, alignment);
|
||||
if (alignment > 8)
|
||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||
if (cleared && res && from_primary)
|
||||
internal_bzero_aligned16(res, RoundUpTo(size, 16));
|
||||
return res;
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
secondary_.SetMayReturnNull(may_return_null);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
bool RssLimitIsExceeded() {
|
||||
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
|
||||
atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
|
||||
memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *cache, void *p) {
|
||||
if (!p) return;
|
||||
if (primary_.PointerIsMine(p))
|
||||
cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
|
||||
else
|
||||
secondary_.Deallocate(&stats_, p);
|
||||
}
|
||||
|
||||
void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
|
||||
uptr alignment) {
|
||||
if (!p)
|
||||
return Allocate(cache, new_size, alignment);
|
||||
if (!new_size) {
|
||||
Deallocate(cache, p);
|
||||
return nullptr;
|
||||
}
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr old_size = GetActuallyAllocatedSize(p);
|
||||
uptr memcpy_size = Min(new_size, old_size);
|
||||
void *new_p = Allocate(cache, new_size, alignment);
|
||||
if (new_p)
|
||||
internal_memcpy(new_p, p, memcpy_size);
|
||||
Deallocate(cache, p);
|
||||
return new_p;
|
||||
}
|
||||
|
||||
bool PointerIsMine(void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return true;
|
||||
return secondary_.PointerIsMine(p);
|
||||
}
|
||||
|
||||
bool FromPrimary(void *p) {
|
||||
return primary_.PointerIsMine(p);
|
||||
}
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetMetaData(p);
|
||||
return secondary_.GetMetaData(p);
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetBlockBegin(p);
|
||||
return secondary_.GetBlockBegin(p);
|
||||
}
|
||||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetBlockBegin(p);
|
||||
return secondary_.GetBlockBeginFastLocked(p);
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetActuallyAllocatedSize(p);
|
||||
return secondary_.GetActuallyAllocatedSize(p);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
|
||||
|
||||
void InitCache(AllocatorCache *cache) {
|
||||
cache->Init(&stats_);
|
||||
}
|
||||
|
||||
void DestroyCache(AllocatorCache *cache) {
|
||||
cache->Destroy(&primary_, &stats_);
|
||||
}
|
||||
|
||||
void SwallowCache(AllocatorCache *cache) {
|
||||
cache->Drain(&primary_);
|
||||
}
|
||||
|
||||
void GetStats(AllocatorStatCounters s) const {
|
||||
stats_.Get(s);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
primary_.PrintStats();
|
||||
secondary_.PrintStats();
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() {
|
||||
primary_.ForceLock();
|
||||
secondary_.ForceLock();
|
||||
}
|
||||
|
||||
void ForceUnlock() {
|
||||
secondary_.ForceUnlock();
|
||||
primary_.ForceUnlock();
|
||||
}
|
||||
|
||||
void ReleaseToOS() { primary_.ReleaseToOS(); }
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
primary_.ForEachChunk(callback, arg);
|
||||
secondary_.ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
private:
|
||||
PrimaryAllocator primary_;
|
||||
SecondaryAllocator secondary_;
|
||||
AllocatorGlobalStats stats_;
|
||||
atomic_uint8_t may_return_null_;
|
||||
atomic_uint8_t rss_limit_is_exceeded_;
|
||||
};
|
@ -27,10 +27,18 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*malloc_hook)(const void *, uptr),
|
||||
void (*free_hook)(const void *));
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
|
||||
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(int top_percent);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
|
@ -43,7 +43,12 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
||||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||
LargeMmapAllocator<> > InternalAllocator;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr);
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||
uptr alignment = 0);
|
||||
void *InternalRealloc(void *p, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void *InternalCalloc(uptr countr, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
||||
InternalAllocator *internal_allocator();
|
||||
|
||||
@ -54,8 +59,8 @@ enum InternalAllocEnum {
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
InternalAllocEnum) {
|
||||
return InternalAlloc(size);
|
||||
__sanitizer::InternalAllocEnum) {
|
||||
return __sanitizer::InternalAlloc(size);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
|
246
libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
Normal file
246
libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
Normal file
@ -0,0 +1,246 @@
|
||||
//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Objects of this type should be used as local caches for SizeClassAllocator64
|
||||
// or SizeClassAllocator32. Since the typical use of this class is to have one
|
||||
// object per thread in TLS, is has to be POD.
|
||||
template<class SizeClassAllocator>
|
||||
struct SizeClassAllocatorLocalCache
|
||||
: SizeClassAllocator::AllocatorCache {
|
||||
};
|
||||
|
||||
// Cache used by SizeClassAllocator64.
|
||||
template <class SizeClassAllocator>
|
||||
struct SizeClassAllocator64LocalCache {
|
||||
typedef SizeClassAllocator Allocator;
|
||||
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
|
||||
typedef typename Allocator::SizeClassMapT SizeClassMap;
|
||||
typedef typename Allocator::CompactPtrT CompactPtrT;
|
||||
|
||||
void Init(AllocatorGlobalStats *s) {
|
||||
stats_.Init();
|
||||
if (s)
|
||||
s->Register(&stats_);
|
||||
}
|
||||
|
||||
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
|
||||
Drain(allocator);
|
||||
if (s)
|
||||
s->Unregister(&stats_);
|
||||
}
|
||||
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(c, allocator, class_id);
|
||||
CHECK_GT(c->count, 0);
|
||||
CompactPtrT chunk = c->chunks[--c->count];
|
||||
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
|
||||
allocator->GetRegionBeginBySizeClass(class_id), chunk));
|
||||
return res;
|
||||
}
|
||||
|
||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(c, allocator, class_id, c->max_count / 2);
|
||||
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
||||
allocator->GetRegionBeginBySizeClass(class_id),
|
||||
reinterpret_cast<uptr>(p));
|
||||
c->chunks[c->count++] = chunk;
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
|
||||
PerClass *c = &per_class_[class_id];
|
||||
while (c->count > 0)
|
||||
Drain(c, allocator, class_id, c->count);
|
||||
}
|
||||
}
|
||||
|
||||
// private:
|
||||
struct PerClass {
|
||||
u32 count;
|
||||
u32 max_count;
|
||||
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache() {
|
||||
if (per_class_[1].max_count)
|
||||
return;
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache();
|
||||
uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
|
||||
allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks);
|
||||
c->count = num_requested_chunks;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
|
||||
uptr count) {
|
||||
InitCache();
|
||||
CHECK_GE(c->count, count);
|
||||
uptr first_idx_to_drain = c->count - count;
|
||||
c->count -= count;
|
||||
allocator->ReturnToAllocator(&stats_, class_id,
|
||||
&c->chunks[first_idx_to_drain], count);
|
||||
}
|
||||
};
|
||||
|
||||
// Cache used by SizeClassAllocator32.
|
||||
template <class SizeClassAllocator>
|
||||
struct SizeClassAllocator32LocalCache {
|
||||
typedef SizeClassAllocator Allocator;
|
||||
typedef typename Allocator::TransferBatch TransferBatch;
|
||||
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
|
||||
|
||||
void Init(AllocatorGlobalStats *s) {
|
||||
stats_.Init();
|
||||
if (s)
|
||||
s->Register(&stats_);
|
||||
}
|
||||
|
||||
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
|
||||
Drain(allocator);
|
||||
if (s)
|
||||
s->Unregister(&stats_);
|
||||
}
|
||||
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(allocator, class_id);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
return res;
|
||||
}
|
||||
|
||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(allocator, class_id);
|
||||
c->batch[c->count++] = p;
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
|
||||
PerClass *c = &per_class_[class_id];
|
||||
while (c->count > 0)
|
||||
Drain(allocator, class_id);
|
||||
}
|
||||
}
|
||||
|
||||
// private:
|
||||
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
|
||||
struct PerClass {
|
||||
uptr count;
|
||||
uptr max_count;
|
||||
void *batch[2 * TransferBatch::kMaxNumCached];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache() {
|
||||
if (per_class_[1].max_count)
|
||||
return;
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * TransferBatch::MaxCached(i);
|
||||
}
|
||||
}
|
||||
|
||||
// TransferBatch class is declared in SizeClassAllocator.
|
||||
// We transfer chunks between central and thread-local free lists in batches.
|
||||
// For small size classes we allocate batches separately.
|
||||
// For large size classes we may use one of the chunks to store the batch.
|
||||
// sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
|
||||
static uptr SizeClassForTransferBatch(uptr class_id) {
|
||||
if (Allocator::ClassIdToSize(class_id) <
|
||||
TransferBatch::AllocationSizeRequiredForNElements(
|
||||
TransferBatch::MaxCached(class_id)))
|
||||
return SizeClassMap::ClassID(sizeof(TransferBatch));
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Returns a TransferBatch suitable for class_id.
|
||||
// For small size classes allocates the batch from the allocator.
|
||||
// For large size classes simply returns b.
|
||||
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
|
||||
return (TransferBatch*)Allocate(allocator, batch_class_id);
|
||||
return b;
|
||||
}
|
||||
|
||||
// Destroys TransferBatch b.
|
||||
// For small size classes deallocates b to the allocator.
|
||||
// Does notthing for large size classes.
|
||||
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
|
||||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
CHECK_GT(b->Count(), 0);
|
||||
b->CopyToArray(c->batch);
|
||||
c->count = b->Count();
|
||||
DestroyBatch(class_id, allocator, b);
|
||||
}
|
||||
|
||||
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
uptr cnt = Min(c->max_count / 2, c->count);
|
||||
uptr first_idx_to_drain = c->count - cnt;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
|
||||
&c->batch[first_idx_to_drain], cnt);
|
||||
c->count -= cnt;
|
||||
allocator->DeallocateBatch(&stats_, class_id, b);
|
||||
}
|
||||
};
|
302
libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
Normal file
302
libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
Normal file
@ -0,0 +1,302 @@
|
||||
//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
||||
|
||||
// SizeClassAllocator32 -- allocator for 32-bit address space.
|
||||
// This allocator can theoretically be used on 64-bit arch, but there it is less
|
||||
// efficient than SizeClassAllocator64.
|
||||
//
|
||||
// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
|
||||
// be returned by MmapOrDie().
|
||||
//
|
||||
// Region:
|
||||
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||
// Since the regions are aligned by kRegionSize, there are exactly
|
||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||
// a ByteMap possible_regions to store the size classes of each Region.
|
||||
// 0 size class means the region is not used by the allocator.
|
||||
//
|
||||
// One Region is used to allocate chunks of a single size class.
|
||||
// A Region looks like this:
|
||||
// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
|
||||
//
|
||||
// In order to avoid false sharing the objects of this class should be
|
||||
// chache-line aligned.
|
||||
template <const uptr kSpaceBeg, const u64 kSpaceSize,
|
||||
const uptr kMetadataSize, class SizeClassMap,
|
||||
const uptr kRegionSizeLog,
|
||||
class ByteMap,
|
||||
class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
class SizeClassAllocator32 {
|
||||
public:
|
||||
struct TransferBatch {
|
||||
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
|
||||
void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
|
||||
count_ = count;
|
||||
CHECK_LE(count_, kMaxNumCached);
|
||||
for (uptr i = 0; i < count; i++)
|
||||
batch_[i] = batch[i];
|
||||
}
|
||||
uptr Count() const { return count_; }
|
||||
void Clear() { count_ = 0; }
|
||||
void Add(void *ptr) {
|
||||
batch_[count_++] = ptr;
|
||||
CHECK_LE(count_, kMaxNumCached);
|
||||
}
|
||||
void CopyToArray(void *to_batch[]) {
|
||||
for (uptr i = 0, n = Count(); i < n; i++)
|
||||
to_batch[i] = batch_[i];
|
||||
}
|
||||
|
||||
// How much memory do we need for a batch containing n elements.
|
||||
static uptr AllocationSizeRequiredForNElements(uptr n) {
|
||||
return sizeof(uptr) * 2 + sizeof(void *) * n;
|
||||
}
|
||||
static uptr MaxCached(uptr class_id) {
|
||||
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
|
||||
}
|
||||
|
||||
TransferBatch *next;
|
||||
|
||||
private:
|
||||
uptr count_;
|
||||
void *batch_[kMaxNumCached];
|
||||
};
|
||||
|
||||
static const uptr kBatchSize = sizeof(TransferBatch);
|
||||
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
|
||||
COMPILER_CHECK(sizeof(TransferBatch) ==
|
||||
SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
|
||||
|
||||
static uptr ClassIdToSize(uptr class_id) {
|
||||
return SizeClassMap::Size(class_id);
|
||||
}
|
||||
|
||||
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
|
||||
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
|
||||
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
|
||||
|
||||
void Init() {
|
||||
possible_regions.TestOnlyInit();
|
||||
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
|
||||
}
|
||||
|
||||
void *MapWithCallback(uptr size) {
|
||||
size = RoundUpTo(size, GetPageSizeCached());
|
||||
void *res = MmapOrDie(size, "SizeClassAllocator32");
|
||||
MapUnmapCallback().OnMap((uptr)res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
void UnmapWithCallback(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
}
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
uptr beg = ComputeRegionBeg(mem);
|
||||
uptr size = ClassIdToSize(GetSizeClass(p));
|
||||
u32 offset = mem - beg;
|
||||
uptr n = offset / (u32)size; // 32-bit division
|
||||
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
|
||||
return reinterpret_cast<void*>(meta);
|
||||
}
|
||||
|
||||
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
|
||||
uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty())
|
||||
PopulateFreeList(stat, c, sci, class_id);
|
||||
CHECK(!sci->free_list.empty());
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
return b;
|
||||
}
|
||||
|
||||
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
|
||||
TransferBatch *b) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_front(b);
|
||||
}
|
||||
|
||||
uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
|
||||
return false;
|
||||
return GetSizeClass(p) != 0;
|
||||
}
|
||||
|
||||
uptr GetSizeClass(const void *p) {
|
||||
return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
uptr beg = ComputeRegionBeg(mem);
|
||||
uptr size = ClassIdToSize(GetSizeClass(p));
|
||||
u32 offset = mem - beg;
|
||||
u32 n = offset / (u32)size; // 32-bit division
|
||||
uptr res = beg + (n * (u32)size);
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
return ClassIdToSize(GetSizeClass(p));
|
||||
}
|
||||
|
||||
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
// No need to lock here.
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||
if (possible_regions[i])
|
||||
res += kRegionSize;
|
||||
return res;
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||
if (possible_regions[i])
|
||||
UnmapWithCallback((i * kRegionSize), kRegionSize);
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetSizeClassInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() {
|
||||
for (int i = kNumClasses - 1; i >= 0; i--) {
|
||||
GetSizeClassInfo(i)->mutex.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
for (uptr region = 0; region < kNumPossibleRegions; region++)
|
||||
if (possible_regions[region]) {
|
||||
uptr chunk_size = ClassIdToSize(possible_regions[region]);
|
||||
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
|
||||
uptr region_beg = region * kRegionSize;
|
||||
for (uptr chunk = region_beg;
|
||||
chunk < region_beg + max_chunks_in_region * chunk_size;
|
||||
chunk += chunk_size) {
|
||||
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||
callback(chunk, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
}
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// This is empty here. Currently only implemented in 64-bit allocator.
|
||||
void ReleaseToOS() { }
|
||||
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
|
||||
private:
|
||||
static const uptr kRegionSize = 1 << kRegionSizeLog;
|
||||
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
|
||||
|
||||
struct SizeClassInfo {
|
||||
SpinMutex mutex;
|
||||
IntrusiveList<TransferBatch> free_list;
|
||||
char padding[kCacheLineSize - sizeof(uptr) -
|
||||
sizeof(IntrusiveList<TransferBatch>)];
|
||||
};
|
||||
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
|
||||
|
||||
uptr ComputeRegionId(uptr mem) {
|
||||
uptr res = mem >> kRegionSizeLog;
|
||||
CHECK_LT(res, kNumPossibleRegions);
|
||||
return res;
|
||||
}
|
||||
|
||||
uptr ComputeRegionBeg(uptr mem) {
|
||||
return mem & ~(kRegionSize - 1);
|
||||
}
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||
"SizeClassAllocator32"));
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
|
||||
return res;
|
||||
}
|
||||
|
||||
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr reg = AllocateRegion(stat, class_id);
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = TransferBatch::MaxCached(class_id);
|
||||
TransferBatch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)i);
|
||||
if (b->Count() == max_count) {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
b = nullptr;
|
||||
}
|
||||
}
|
||||
if (b) {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
}
|
||||
}
|
||||
|
||||
ByteMap possible_regions;
|
||||
SizeClassInfo size_class_info_array[kNumClasses];
|
||||
};
|
503
libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
Normal file
503
libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
Normal file
@ -0,0 +1,503 @@
|
||||
//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
|
||||
|
||||
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
||||
// The template parameter Params is a class containing the actual parameters.
|
||||
//
|
||||
// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
|
||||
// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
|
||||
// Otherwise SpaceBeg=kSpaceBeg (fixed address).
|
||||
// kSpaceSize is a power of two.
|
||||
// At the beginning the entire space is mprotect-ed, then small parts of it
|
||||
// are mapped on demand.
|
||||
//
|
||||
// Region: a part of Space dedicated to a single size class.
|
||||
// There are kNumClasses Regions of equal size.
|
||||
//
|
||||
// UserChunk: a piece of memory returned to user.
|
||||
// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
|
||||
|
||||
// FreeArray is an array free-d chunks (stored as 4-byte offsets)
|
||||
//
|
||||
// A Region looks like this:
|
||||
// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
|
||||
|
||||
struct SizeClassAllocator64FlagMasks { // Bit masks.
|
||||
enum {
|
||||
kRandomShuffleChunks = 1,
|
||||
};
|
||||
};
|
||||
|
||||
template <class Params>
|
||||
class SizeClassAllocator64 {
|
||||
public:
|
||||
static const uptr kSpaceBeg = Params::kSpaceBeg;
|
||||
static const uptr kSpaceSize = Params::kSpaceSize;
|
||||
static const uptr kMetadataSize = Params::kMetadataSize;
|
||||
typedef typename Params::SizeClassMap SizeClassMap;
|
||||
typedef typename Params::MapUnmapCallback MapUnmapCallback;
|
||||
|
||||
static const bool kRandomShuffleChunks =
|
||||
Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
|
||||
|
||||
typedef SizeClassAllocator64<Params> ThisT;
|
||||
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
||||
|
||||
// When we know the size class (the region base) we can represent a pointer
|
||||
// as a 4-byte integer (offset from the region start shifted right by 4).
|
||||
typedef u32 CompactPtrT;
|
||||
static const uptr kCompactPtrScale = 4;
|
||||
CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
|
||||
return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
|
||||
}
|
||||
uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
|
||||
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
|
||||
}
|
||||
|
||||
void Init() {
|
||||
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
|
||||
if (kUsingConstantSpaceBeg) {
|
||||
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
|
||||
MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
|
||||
} else {
|
||||
NonConstSpaceBeg =
|
||||
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
|
||||
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
|
||||
}
|
||||
MapWithCallback(SpaceEnd(), AdditionalSize());
|
||||
}
|
||||
|
||||
void MapWithCallback(uptr beg, uptr size) {
|
||||
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
}
|
||||
|
||||
void UnmapWithCallback(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
}
|
||||
|
||||
NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
|
||||
const CompactPtrT *chunks, uptr n_chunks) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
uptr old_num_chunks = region->num_freed_chunks;
|
||||
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
|
||||
EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
free_array[old_num_chunks + i] = chunks[i];
|
||||
region->num_freed_chunks = new_num_freed_chunks;
|
||||
region->n_freed += n_chunks;
|
||||
}
|
||||
|
||||
NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||
CompactPtrT *chunks, uptr n_chunks) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
|
||||
PopulateFreeArray(stat, class_id, region,
|
||||
n_chunks - region->num_freed_chunks);
|
||||
CHECK_GE(region->num_freed_chunks, n_chunks);
|
||||
}
|
||||
region->num_freed_chunks -= n_chunks;
|
||||
uptr base_idx = region->num_freed_chunks;
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
chunks[i] = free_array[base_idx + i];
|
||||
region->n_allocated += n_chunks;
|
||||
}
|
||||
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
uptr P = reinterpret_cast<uptr>(p);
|
||||
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
|
||||
return P / kSpaceSize == kSpaceBeg / kSpaceSize;
|
||||
return P >= SpaceBeg() && P < SpaceEnd();
|
||||
}
|
||||
|
||||
uptr GetRegionBegin(const void *p) {
|
||||
if (kUsingConstantSpaceBeg)
|
||||
return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
|
||||
uptr space_beg = SpaceBeg();
|
||||
return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
|
||||
space_beg;
|
||||
}
|
||||
|
||||
uptr GetRegionBeginBySizeClass(uptr class_id) {
|
||||
return SpaceBeg() + kRegionSize * class_id;
|
||||
}
|
||||
|
||||
uptr GetSizeClass(const void *p) {
|
||||
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
|
||||
return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
|
||||
return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
|
||||
kNumClassesRounded;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
uptr class_id = GetSizeClass(p);
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
if (!size) return nullptr;
|
||||
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
||||
uptr reg_beg = GetRegionBegin(p);
|
||||
uptr beg = chunk_idx * size;
|
||||
uptr next_beg = beg + size;
|
||||
if (class_id >= kNumClasses) return nullptr;
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user >= next_beg)
|
||||
return reinterpret_cast<void*>(reg_beg + beg);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
return ClassIdToSize(GetSizeClass(p));
|
||||
}
|
||||
|
||||
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
uptr class_id = GetSizeClass(p);
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
|
||||
(1 + chunk_idx) * kMetadataSize);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < kNumClasses; i++)
|
||||
res += GetRegionInfo(i)->allocated_user;
|
||||
return res;
|
||||
}
|
||||
|
||||
// Test-only.
|
||||
void TestOnlyUnmap() {
|
||||
UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
|
||||
}
|
||||
|
||||
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
|
||||
uptr stats_size) {
|
||||
for (uptr class_id = 0; class_id < stats_size; class_id++)
|
||||
if (stats[class_id] == start)
|
||||
stats[class_id] = rss;
|
||||
}
|
||||
|
||||
void PrintStats(uptr class_id, uptr rss) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user == 0) return;
|
||||
uptr in_use = region->n_allocated - region->n_freed;
|
||||
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
|
||||
Printf(
|
||||
" %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
|
||||
"num_freed_chunks %zd"
|
||||
" avail: %zd rss: %zdK releases: %zd\n",
|
||||
class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
|
||||
region->n_allocated, region->n_freed, in_use,
|
||||
region->num_freed_chunks, avail_chunks, rss >> 10,
|
||||
region->rtoi.num_releases);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
uptr total_mapped = 0;
|
||||
uptr n_allocated = 0;
|
||||
uptr n_freed = 0;
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
total_mapped += region->mapped_user;
|
||||
n_allocated += region->n_allocated;
|
||||
n_freed += region->n_freed;
|
||||
}
|
||||
Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
|
||||
"remains %zd\n",
|
||||
total_mapped >> 20, n_allocated, n_allocated - n_freed);
|
||||
uptr rss_stats[kNumClasses];
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
|
||||
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
|
||||
GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++)
|
||||
PrintStats(class_id, rss_stats[class_id]);
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetRegionInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() {
|
||||
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
|
||||
GetRegionInfo(i)->mutex.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr chunk_size = ClassIdToSize(class_id);
|
||||
uptr region_beg = SpaceBeg() + class_id * kRegionSize;
|
||||
for (uptr chunk = region_beg;
|
||||
chunk < region_beg + region->allocated_user;
|
||||
chunk += chunk_size) {
|
||||
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||
callback(chunk, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static uptr ClassIdToSize(uptr class_id) {
|
||||
return SizeClassMap::Size(class_id);
|
||||
}
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
|
||||
GetPageSizeCached());
|
||||
}
|
||||
|
||||
void ReleaseToOS() {
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++)
|
||||
ReleaseToOS(class_id);
|
||||
}
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
|
||||
|
||||
private:
|
||||
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
|
||||
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
|
||||
// In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
|
||||
// elements, but in reality this will not happen. For simplicity we
|
||||
// dedicate 1/8 of the region's virtual space to FreeArray.
|
||||
static const uptr kFreeArraySize = kRegionSize / 8;
|
||||
|
||||
static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
|
||||
uptr NonConstSpaceBeg;
|
||||
uptr SpaceBeg() const {
|
||||
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
|
||||
}
|
||||
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
|
||||
// kRegionSize must be >= 2^32.
|
||||
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
||||
// kRegionSize must be <= 2^36, see CompactPtrT.
|
||||
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
|
||||
// Call mmap for user memory with at least this size.
|
||||
static const uptr kUserMapSize = 1 << 16;
|
||||
// Call mmap for metadata memory with at least this size.
|
||||
static const uptr kMetaMapSize = 1 << 16;
|
||||
// Call mmap for free array memory with at least this size.
|
||||
static const uptr kFreeArrayMapSize = 1 << 16;
|
||||
// Granularity of ReleaseToOs (aka madvise).
|
||||
static const uptr kReleaseToOsGranularity = 1 << 12;
|
||||
|
||||
struct ReleaseToOsInfo {
|
||||
uptr n_freed_at_last_release;
|
||||
uptr num_releases;
|
||||
};
|
||||
|
||||
struct RegionInfo {
|
||||
BlockingMutex mutex;
|
||||
uptr num_freed_chunks; // Number of elements in the freearray.
|
||||
uptr mapped_free_array; // Bytes mapped for freearray.
|
||||
uptr allocated_user; // Bytes allocated for user memory.
|
||||
uptr allocated_meta; // Bytes allocated for metadata.
|
||||
uptr mapped_user; // Bytes mapped for user memory.
|
||||
uptr mapped_meta; // Bytes mapped for metadata.
|
||||
u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
|
||||
uptr n_allocated, n_freed; // Just stats.
|
||||
ReleaseToOsInfo rtoi;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
|
||||
|
||||
u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
|
||||
return (*state = *state * 1103515245 + 12345) >> 16;
|
||||
}
|
||||
|
||||
u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
|
||||
|
||||
void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
|
||||
if (n <= 1) return;
|
||||
for (u32 i = n - 1; i > 0; i--)
|
||||
Swap(a[i], a[RandN(rand_state, i + 1)]);
|
||||
}
|
||||
|
||||
RegionInfo *GetRegionInfo(uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *regions =
|
||||
reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
|
||||
return ®ions[class_id];
|
||||
}
|
||||
|
||||
uptr GetMetadataEnd(uptr region_beg) {
|
||||
return region_beg + kRegionSize - kFreeArraySize;
|
||||
}
|
||||
|
||||
uptr GetChunkIdx(uptr chunk, uptr size) {
|
||||
if (!kUsingConstantSpaceBeg)
|
||||
chunk -= SpaceBeg();
|
||||
|
||||
uptr offset = chunk % kRegionSize;
|
||||
// Here we divide by a non-constant. This is costly.
|
||||
// size always fits into 32-bits. If the offset fits too, use 32-bit div.
|
||||
if (offset >> (SANITIZER_WORDSIZE / 2))
|
||||
return offset / size;
|
||||
return (u32)offset / (u32)size;
|
||||
}
|
||||
|
||||
CompactPtrT *GetFreeArray(uptr region_beg) {
|
||||
return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
|
||||
kFreeArraySize);
|
||||
}
|
||||
|
||||
void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
|
||||
uptr num_freed_chunks) {
|
||||
uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
|
||||
if (region->mapped_free_array < needed_space) {
|
||||
CHECK_LE(needed_space, kFreeArraySize);
|
||||
uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
|
||||
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
|
||||
region->mapped_free_array;
|
||||
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
|
||||
MapWithCallback(current_map_end, new_map_size);
|
||||
region->mapped_free_array = new_mapped_free_array;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
|
||||
RegionInfo *region, uptr requested_count) {
|
||||
// region->mutex is held.
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr beg_idx = region->allocated_user;
|
||||
uptr end_idx = beg_idx + requested_count * size;
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
if (end_idx > region->mapped_user) {
|
||||
if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
|
||||
region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
|
||||
// Do the mmap for the user memory.
|
||||
uptr map_size = kUserMapSize;
|
||||
while (end_idx > region->mapped_user + map_size)
|
||||
map_size += kUserMapSize;
|
||||
CHECK_GE(region->mapped_user + map_size, end_idx);
|
||||
MapWithCallback(region_beg + region->mapped_user, map_size);
|
||||
stat->Add(AllocatorStatMapped, map_size);
|
||||
region->mapped_user += map_size;
|
||||
}
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
uptr total_count = (region->mapped_user - beg_idx) / size;
|
||||
uptr num_freed_chunks = region->num_freed_chunks;
|
||||
EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
|
||||
for (uptr i = 0; i < total_count; i++) {
|
||||
uptr chunk = beg_idx + i * size;
|
||||
free_array[num_freed_chunks + total_count - 1 - i] =
|
||||
PointerToCompactPtr(0, chunk);
|
||||
}
|
||||
if (kRandomShuffleChunks)
|
||||
RandomShuffle(&free_array[num_freed_chunks], total_count,
|
||||
®ion->rand_state);
|
||||
region->num_freed_chunks += total_count;
|
||||
region->allocated_user += total_count * size;
|
||||
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||
|
||||
region->allocated_meta += total_count * kMetadataSize;
|
||||
if (region->allocated_meta > region->mapped_meta) {
|
||||
uptr map_size = kMetaMapSize;
|
||||
while (region->allocated_meta > region->mapped_meta + map_size)
|
||||
map_size += kMetaMapSize;
|
||||
// Do the mmap for the metadata.
|
||||
CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
|
||||
MapWithCallback(GetMetadataEnd(region_beg) -
|
||||
region->mapped_meta - map_size, map_size);
|
||||
region->mapped_meta += map_size;
|
||||
}
|
||||
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||
if (region->mapped_user + region->mapped_meta >
|
||||
kRegionSize - kFreeArraySize) {
|
||||
Printf("%s: Out of memory. Dying. ", SanitizerToolName);
|
||||
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
||||
kRegionSize / 1024 / 1024, size);
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
bool MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
|
||||
CompactPtrT first, CompactPtrT last) {
|
||||
uptr beg_ptr = CompactPtrToPointer(region_beg, first);
|
||||
uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
|
||||
CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity);
|
||||
beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity);
|
||||
end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity);
|
||||
if (end_ptr == beg_ptr) return false;
|
||||
ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Releases some RAM back to OS.
|
||||
// Algorithm:
|
||||
// * Lock the region.
|
||||
// * Sort the chunks.
|
||||
// * Find ranges fully covered by free-d chunks
|
||||
// * Release them to OS with madvise.
|
||||
//
|
||||
// TODO(kcc): make sure we don't do it too frequently.
|
||||
void ReleaseToOS(uptr class_id) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
uptr chunk_size = ClassIdToSize(class_id);
|
||||
uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
|
||||
const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale;
|
||||
BlockingMutexLock l(®ion->mutex);
|
||||
uptr n = region->num_freed_chunks;
|
||||
if (n * chunk_size < kReleaseToOsGranularity)
|
||||
return; // No chance to release anything.
|
||||
if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size <
|
||||
kReleaseToOsGranularity)
|
||||
return; // Nothing new to release.
|
||||
SortArray(free_array, n);
|
||||
uptr beg = free_array[0];
|
||||
uptr prev = free_array[0];
|
||||
for (uptr i = 1; i < n; i++) {
|
||||
uptr chunk = free_array[i];
|
||||
CHECK_GT(chunk, prev);
|
||||
if (chunk - prev != scaled_chunk_size) {
|
||||
CHECK_GT(chunk - prev, scaled_chunk_size);
|
||||
if (prev + scaled_chunk_size - beg >= kScaledGranularity) {
|
||||
MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev);
|
||||
region->rtoi.n_freed_at_last_release = region->n_freed;
|
||||
region->rtoi.num_releases++;
|
||||
}
|
||||
beg = chunk;
|
||||
}
|
||||
prev = chunk;
|
||||
}
|
||||
}
|
||||
};
|
271
libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
Normal file
271
libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
Normal file
@ -0,0 +1,271 @@
|
||||
//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||
// The main purpose of this allocator is to cover large and rare allocation
|
||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
page_size_ = GetPageSizeCached();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized(may_return_null);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = RoundUpMapSize(size);
|
||||
if (alignment > page_size_)
|
||||
map_size += alignment;
|
||||
// Overflow.
|
||||
if (map_size < size) return ReturnNullOrDieOnBadRequest();
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDie(map_size, "LargeMmapAllocator"));
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||
uptr map_end = map_beg + map_size;
|
||||
uptr res = map_beg + page_size_;
|
||||
if (res & (alignment - 1)) // Align.
|
||||
res += alignment - (res & (alignment - 1));
|
||||
CHECK(IsAligned(res, alignment));
|
||||
CHECK(IsAligned(res, page_size_));
|
||||
CHECK_GE(res + size, map_beg);
|
||||
CHECK_LE(res + size, map_end);
|
||||
Header *h = GetHeader(res);
|
||||
h->size = size;
|
||||
h->map_beg = map_beg;
|
||||
h->map_size = map_size;
|
||||
uptr size_log = MostSignificantSetBitIndex(map_size);
|
||||
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
|
||||
{
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr idx = n_chunks_++;
|
||||
chunks_sorted_ = false;
|
||||
CHECK_LT(idx, kMaxNumChunks);
|
||||
h->chunk_idx = idx;
|
||||
chunks_[idx] = h;
|
||||
stats.n_allocs++;
|
||||
stats.currently_allocated += map_size;
|
||||
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
|
||||
stats.by_size_log[size_log]++;
|
||||
stat->Add(AllocatorStatAllocated, map_size);
|
||||
stat->Add(AllocatorStatMapped, map_size);
|
||||
}
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *stat, void *p) {
|
||||
Header *h = GetHeader(p);
|
||||
{
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr idx = h->chunk_idx;
|
||||
CHECK_EQ(chunks_[idx], h);
|
||||
CHECK_LT(idx, n_chunks_);
|
||||
chunks_[idx] = chunks_[n_chunks_ - 1];
|
||||
chunks_[idx]->chunk_idx = idx;
|
||||
n_chunks_--;
|
||||
chunks_sorted_ = false;
|
||||
stats.n_frees++;
|
||||
stats.currently_allocated -= h->map_size;
|
||||
stat->Sub(AllocatorStatAllocated, h->map_size);
|
||||
stat->Sub(AllocatorStatMapped, h->map_size);
|
||||
}
|
||||
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
||||
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
Header *h = chunks_[i];
|
||||
CHECK_EQ(h->chunk_idx, i);
|
||||
res += RoundUpMapSize(h->size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
return GetBlockBegin(p) != nullptr;
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
return RoundUpTo(GetHeader(p)->size, page_size_);
|
||||
}
|
||||
|
||||
// At least page_size_/2 metadata bytes is available.
|
||||
void *GetMetaData(const void *p) {
|
||||
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
|
||||
if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
|
||||
Printf("%s: bad pointer %p\n", SanitizerToolName, p);
|
||||
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
|
||||
}
|
||||
return GetHeader(p) + 1;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *ptr) {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr nearest_chunk = 0;
|
||||
// Cache-friendly linear search.
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
uptr ch = reinterpret_cast<uptr>(chunks_[i]);
|
||||
if (p < ch) continue; // p is at left to this chunk, skip it.
|
||||
if (p - ch < p - nearest_chunk)
|
||||
nearest_chunk = ch;
|
||||
}
|
||||
if (!nearest_chunk)
|
||||
return nullptr;
|
||||
Header *h = reinterpret_cast<Header *>(nearest_chunk);
|
||||
CHECK_GE(nearest_chunk, h->map_beg);
|
||||
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
||||
CHECK_LE(nearest_chunk, p);
|
||||
if (h->map_beg + h->map_size <= p)
|
||||
return nullptr;
|
||||
return GetUser(h);
|
||||
}
|
||||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(void *ptr) {
|
||||
mutex_.CheckLocked();
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
uptr n = n_chunks_;
|
||||
if (!n) return nullptr;
|
||||
if (!chunks_sorted_) {
|
||||
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
|
||||
SortArray(reinterpret_cast<uptr*>(chunks_), n);
|
||||
for (uptr i = 0; i < n; i++)
|
||||
chunks_[i]->chunk_idx = i;
|
||||
chunks_sorted_ = true;
|
||||
min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
|
||||
max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
|
||||
chunks_[n - 1]->map_size;
|
||||
}
|
||||
if (p < min_mmap_ || p >= max_mmap_)
|
||||
return nullptr;
|
||||
uptr beg = 0, end = n - 1;
|
||||
// This loop is a log(n) lower_bound. It does not check for the exact match
|
||||
// to avoid expensive cache-thrashing loads.
|
||||
while (end - beg >= 2) {
|
||||
uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
|
||||
if (p < reinterpret_cast<uptr>(chunks_[mid]))
|
||||
end = mid - 1; // We are not interested in chunks_[mid].
|
||||
else
|
||||
beg = mid; // chunks_[mid] may still be what we want.
|
||||
}
|
||||
|
||||
if (beg < end) {
|
||||
CHECK_EQ(beg + 1, end);
|
||||
// There are 2 chunks left, choose one.
|
||||
if (p >= reinterpret_cast<uptr>(chunks_[end]))
|
||||
beg = end;
|
||||
}
|
||||
|
||||
Header *h = chunks_[beg];
|
||||
if (h->map_beg + h->map_size <= p || p < h->map_beg)
|
||||
return nullptr;
|
||||
return GetUser(h);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
|
||||
"remains %zd (%zd K) max %zd M; by size logs: ",
|
||||
stats.n_allocs, stats.n_allocs - stats.n_frees,
|
||||
stats.currently_allocated >> 10, stats.max_allocated >> 20);
|
||||
for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
|
||||
uptr c = stats.by_size_log[i];
|
||||
if (!c) continue;
|
||||
Printf("%zd:%zd; ", i, c);
|
||||
}
|
||||
Printf("\n");
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() {
|
||||
mutex_.Lock();
|
||||
}
|
||||
|
||||
void ForceUnlock() {
|
||||
mutex_.Unlock();
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
for (uptr i = 0; i < n_chunks_; i++)
|
||||
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
|
||||
struct Header {
|
||||
uptr map_beg;
|
||||
uptr map_size;
|
||||
uptr size;
|
||||
uptr chunk_idx;
|
||||
};
|
||||
|
||||
Header *GetHeader(uptr p) {
|
||||
CHECK(IsAligned(p, page_size_));
|
||||
return reinterpret_cast<Header*>(p - page_size_);
|
||||
}
|
||||
Header *GetHeader(const void *p) {
|
||||
return GetHeader(reinterpret_cast<uptr>(p));
|
||||
}
|
||||
|
||||
void *GetUser(Header *h) {
|
||||
CHECK(IsAligned((uptr)h, page_size_));
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
||||
}
|
||||
|
||||
uptr RoundUpMapSize(uptr size) {
|
||||
return RoundUpTo(size, page_size_) + page_size_;
|
||||
}
|
||||
|
||||
uptr page_size_;
|
||||
Header *chunks_[kMaxNumChunks];
|
||||
uptr n_chunks_;
|
||||
uptr min_mmap_, max_mmap_;
|
||||
bool chunks_sorted_;
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
atomic_uint8_t may_return_null_;
|
||||
SpinMutex mutex_;
|
||||
};
|
@ -0,0 +1,215 @@
|
||||
//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// SizeClassMap maps allocation sizes into size classes and back.
|
||||
// Class 0 always corresponds to size 0.
|
||||
// The other sizes are controlled by the template parameters:
|
||||
// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
|
||||
// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
|
||||
// kMidSizeLog: the classes starting from 1 increase with step
|
||||
// 2^kMinSizeLog until 2^kMidSizeLog.
|
||||
// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
|
||||
// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
|
||||
// look like 0b1xx0..0, where x is either 0 or 1.
|
||||
//
|
||||
// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
|
||||
//
|
||||
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
|
||||
// Next 4 classes: 256 + i * 64 (i = 1 to 4).
|
||||
// Next 4 classes: 512 + i * 128 (i = 1 to 4).
|
||||
// ...
|
||||
// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
|
||||
// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
|
||||
//
|
||||
// This structure of the size class map gives us:
|
||||
// - Efficient table-free class-to-size and size-to-class functions.
|
||||
// - Difference between two consequent size classes is between 14% and 25%
|
||||
//
|
||||
// This class also gives a hint to a thread-caching allocator about the amount
|
||||
// of chunks that need to be cached per-thread:
|
||||
// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
|
||||
// The actual number is computed in TransferBatch.
|
||||
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
|
||||
//
|
||||
// Part of output of SizeClassMap::Print():
|
||||
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
|
||||
// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
|
||||
// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
|
||||
// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
|
||||
// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
|
||||
// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
|
||||
// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
|
||||
// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
|
||||
//
|
||||
// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
|
||||
// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
|
||||
// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
|
||||
// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
|
||||
// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
|
||||
// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
|
||||
// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
|
||||
// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
|
||||
//
|
||||
// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
|
||||
// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
|
||||
// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
|
||||
// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
|
||||
//
|
||||
// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
|
||||
// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
|
||||
// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
|
||||
// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
|
||||
//
|
||||
// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
|
||||
// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
|
||||
// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
|
||||
// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
|
||||
// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
|
||||
// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
|
||||
// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
|
||||
//
|
||||
// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
|
||||
//
|
||||
//
|
||||
// Another example (kNumBits=2):
|
||||
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
|
||||
// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
|
||||
// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
|
||||
// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
|
||||
// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
|
||||
// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
|
||||
// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
|
||||
// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
|
||||
// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
|
||||
// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
|
||||
// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
|
||||
// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
|
||||
// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
|
||||
// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
|
||||
// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
|
||||
// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
|
||||
// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
|
||||
// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
|
||||
// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
|
||||
// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
|
||||
// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
|
||||
// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
|
||||
// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
|
||||
// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
|
||||
// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
|
||||
// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
|
||||
// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
|
||||
|
||||
template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
|
||||
uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
|
||||
class SizeClassMap {
|
||||
static const uptr kMinSize = 1 << kMinSizeLog;
|
||||
static const uptr kMidSize = 1 << kMidSizeLog;
|
||||
static const uptr kMidClass = kMidSize / kMinSize;
|
||||
static const uptr S = kNumBits - 1;
|
||||
static const uptr M = (1 << S) - 1;
|
||||
|
||||
public:
|
||||
// kMaxNumCachedHintT is a power of two. It serves as a hint
|
||||
// for the size of TransferBatch, the actual size could be a bit smaller.
|
||||
static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
|
||||
COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
|
||||
|
||||
static const uptr kMaxSize = 1UL << kMaxSizeLog;
|
||||
static const uptr kNumClasses =
|
||||
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
|
||||
static const uptr kLargestClassID = kNumClasses - 2;
|
||||
COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
|
||||
static const uptr kNumClassesRounded =
|
||||
kNumClasses <= 32 ? 32 :
|
||||
kNumClasses <= 64 ? 64 :
|
||||
kNumClasses <= 128 ? 128 : 256;
|
||||
|
||||
static uptr Size(uptr class_id) {
|
||||
if (class_id <= kMidClass)
|
||||
return kMinSize * class_id;
|
||||
class_id -= kMidClass;
|
||||
uptr t = kMidSize << (class_id >> S);
|
||||
return t + (t >> S) * (class_id & M);
|
||||
}
|
||||
|
||||
static uptr ClassID(uptr size) {
|
||||
if (size <= kMidSize)
|
||||
return (size + kMinSize - 1) >> kMinSizeLog;
|
||||
if (size > kMaxSize) return 0;
|
||||
uptr l = MostSignificantSetBitIndex(size);
|
||||
uptr hbits = (size >> (l - S)) & M;
|
||||
uptr lbits = size & ((1 << (l - S)) - 1);
|
||||
uptr l1 = l - kMidSizeLog;
|
||||
return kMidClass + (l1 << S) + hbits + (lbits > 0);
|
||||
}
|
||||
|
||||
static uptr MaxCachedHint(uptr class_id) {
|
||||
if (class_id == 0) return 0;
|
||||
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
|
||||
return Max<uptr>(1, Min(kMaxNumCachedHint, n));
|
||||
}
|
||||
|
||||
static void Print() {
|
||||
uptr prev_s = 0;
|
||||
uptr total_cached = 0;
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
uptr s = Size(i);
|
||||
if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
|
||||
Printf("\n");
|
||||
uptr d = s - prev_s;
|
||||
uptr p = prev_s ? (d * 100 / prev_s) : 0;
|
||||
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
|
||||
uptr cached = MaxCachedHint(i) * s;
|
||||
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
|
||||
"cached: %zd %zd; id %zd\n",
|
||||
i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
|
||||
total_cached += cached;
|
||||
prev_s = s;
|
||||
}
|
||||
Printf("Total cached: %zd\n", total_cached);
|
||||
}
|
||||
|
||||
static void Validate() {
|
||||
for (uptr c = 1; c < kNumClasses; c++) {
|
||||
// Printf("Validate: c%zd\n", c);
|
||||
uptr s = Size(c);
|
||||
CHECK_NE(s, 0U);
|
||||
CHECK_EQ(ClassID(s), c);
|
||||
if (c != kNumClasses - 1)
|
||||
CHECK_EQ(ClassID(s + 1), c + 1);
|
||||
CHECK_EQ(ClassID(s - 1), c);
|
||||
if (c)
|
||||
CHECK_GT(Size(c), Size(c-1));
|
||||
}
|
||||
CHECK_EQ(ClassID(kMaxSize + 1), 0);
|
||||
|
||||
for (uptr s = 1; s <= kMaxSize; s++) {
|
||||
uptr c = ClassID(s);
|
||||
// Printf("s%zd => c%zd\n", s, c);
|
||||
CHECK_LT(c, kNumClasses);
|
||||
CHECK_GE(Size(c), s);
|
||||
if (c > 0)
|
||||
CHECK_LT(Size(c-1), s);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
|
||||
typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
|
||||
typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
|
103
libsanitizer/sanitizer_common/sanitizer_allocator_stats.h
Normal file
103
libsanitizer/sanitizer_common/sanitizer_allocator_stats.h
Normal file
@ -0,0 +1,103 @@
|
||||
//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Memory allocator statistics
|
||||
enum AllocatorStat {
|
||||
AllocatorStatAllocated,
|
||||
AllocatorStatMapped,
|
||||
AllocatorStatCount
|
||||
};
|
||||
|
||||
typedef uptr AllocatorStatCounters[AllocatorStatCount];
|
||||
|
||||
// Per-thread stats, live in per-thread cache.
|
||||
class AllocatorStats {
|
||||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
void InitLinkerInitialized() {}
|
||||
|
||||
void Add(AllocatorStat i, uptr v) {
|
||||
v += atomic_load(&stats_[i], memory_order_relaxed);
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Sub(AllocatorStat i, uptr v) {
|
||||
v = atomic_load(&stats_[i], memory_order_relaxed) - v;
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Set(AllocatorStat i, uptr v) {
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uptr Get(AllocatorStat i) const {
|
||||
return atomic_load(&stats_[i], memory_order_relaxed);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AllocatorGlobalStats;
|
||||
AllocatorStats *next_;
|
||||
AllocatorStats *prev_;
|
||||
atomic_uintptr_t stats_[AllocatorStatCount];
|
||||
};
|
||||
|
||||
// Global stats, used for aggregation and querying.
|
||||
class AllocatorGlobalStats : public AllocatorStats {
|
||||
public:
|
||||
void InitLinkerInitialized() {
|
||||
next_ = this;
|
||||
prev_ = this;
|
||||
}
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void Register(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
s->next_ = next_;
|
||||
s->prev_ = this;
|
||||
next_->prev_ = s;
|
||||
next_ = s;
|
||||
}
|
||||
|
||||
void Unregister(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
s->prev_->next_ = s->next_;
|
||||
s->next_->prev_ = s->prev_;
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
|
||||
}
|
||||
|
||||
void Get(AllocatorStatCounters s) const {
|
||||
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
|
||||
SpinMutexLock l(&mu_);
|
||||
const AllocatorStats *stats = this;
|
||||
for (;;) {
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
s[i] += stats->Get(AllocatorStat(i));
|
||||
stats = stats->next_;
|
||||
if (stats == this)
|
||||
break;
|
||||
}
|
||||
// All stats must be non-negative.
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
mutable SpinMutex mu_;
|
||||
};
|
@ -40,3 +40,17 @@
|
||||
# define CFI_DEF_CFA(reg, n)
|
||||
# define CFI_RESTORE(reg)
|
||||
#endif
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
# define ASM_HIDDEN(symbol) .hidden symbol
|
||||
# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
|
||||
# define ASM_SIZE(symbol) .size symbol, .-symbol
|
||||
# define ASM_TSAN_SYMBOL(symbol) symbol
|
||||
# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) symbol
|
||||
#else
|
||||
# define ASM_HIDDEN(symbol)
|
||||
# define ASM_TYPE_FUNCTION(symbol)
|
||||
# define ASM_SIZE(symbol)
|
||||
# define ASM_TSAN_SYMBOL(symbol) _##symbol
|
||||
# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
|
||||
#endif
|
||||
|
@ -31,6 +31,10 @@ extern "C" long _InterlockedExchange( // NOLINT
|
||||
extern "C" long _InterlockedExchangeAdd( // NOLINT
|
||||
long volatile * Addend, long Value); // NOLINT
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
extern "C" char _InterlockedCompareExchange8( // NOLINT
|
||||
char volatile *Destination, // NOLINT
|
||||
char Exchange, char Comparand); // NOLINT
|
||||
#pragma intrinsic(_InterlockedCompareExchange8)
|
||||
extern "C" short _InterlockedCompareExchange16( // NOLINT
|
||||
short volatile *Destination, // NOLINT
|
||||
short Exchange, short Comparand); // NOLINT
|
||||
@ -169,8 +173,6 @@ INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
|
||||
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
#ifndef _WIN64
|
||||
|
||||
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
||||
u8 *cmp,
|
||||
u8 xchgv,
|
||||
@ -178,6 +180,10 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
u8 cmpv = *cmp;
|
||||
#ifdef _WIN64
|
||||
u8 prev = (u8)_InterlockedCompareExchange8(
|
||||
(volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
|
||||
#else
|
||||
u8 prev;
|
||||
__asm {
|
||||
mov al, cmpv
|
||||
@ -186,14 +192,13 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
||||
lock cmpxchg [ecx], dl
|
||||
mov prev, al
|
||||
}
|
||||
#endif
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
|
||||
uptr *cmp,
|
||||
uptr xchg,
|
||||
|
@ -10,6 +10,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_libc.h"
|
||||
@ -22,13 +23,7 @@ namespace __sanitizer {
|
||||
const char *SanitizerToolName = "SanitizerTool";
|
||||
|
||||
atomic_uint32_t current_verbosity;
|
||||
|
||||
uptr GetPageSizeCached() {
|
||||
static uptr PageSize;
|
||||
if (!PageSize)
|
||||
PageSize = GetPageSize();
|
||||
return PageSize;
|
||||
}
|
||||
uptr PageSizeCached;
|
||||
|
||||
StaticSpinMutex report_file_mu;
|
||||
ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
|
||||
@ -103,70 +98,13 @@ uptr stoptheworld_tracer_pid = 0;
|
||||
// writing to the same log file.
|
||||
uptr stoptheworld_tracer_ppid = 0;
|
||||
|
||||
static const int kMaxNumOfInternalDieCallbacks = 5;
|
||||
static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
|
||||
|
||||
bool AddDieCallback(DieCallbackType callback) {
|
||||
for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
|
||||
if (InternalDieCallbacks[i] == nullptr) {
|
||||
InternalDieCallbacks[i] = callback;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RemoveDieCallback(DieCallbackType callback) {
|
||||
for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
|
||||
if (InternalDieCallbacks[i] == callback) {
|
||||
internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
|
||||
sizeof(InternalDieCallbacks[0]) *
|
||||
(kMaxNumOfInternalDieCallbacks - i - 1));
|
||||
InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static DieCallbackType UserDieCallback;
|
||||
void SetUserDieCallback(DieCallbackType callback) {
|
||||
UserDieCallback = callback;
|
||||
}
|
||||
|
||||
void NORETURN Die() {
|
||||
if (UserDieCallback)
|
||||
UserDieCallback();
|
||||
for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
|
||||
if (InternalDieCallbacks[i])
|
||||
InternalDieCallbacks[i]();
|
||||
}
|
||||
if (common_flags()->abort_on_error)
|
||||
Abort();
|
||||
internal__exit(common_flags()->exitcode);
|
||||
}
|
||||
|
||||
static CheckFailedCallbackType CheckFailedCallback;
|
||||
void SetCheckFailedCallback(CheckFailedCallbackType callback) {
|
||||
CheckFailedCallback = callback;
|
||||
}
|
||||
|
||||
void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
||||
u64 v1, u64 v2) {
|
||||
if (CheckFailedCallback) {
|
||||
CheckFailedCallback(file, line, cond, v1, v2);
|
||||
}
|
||||
Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
|
||||
v1, v2);
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type, error_t err) {
|
||||
const char *mmap_type, error_t err,
|
||||
bool raw_report) {
|
||||
static int recursion_count;
|
||||
if (recursion_count) {
|
||||
if (raw_report || recursion_count) {
|
||||
// If raw report is requested or we went into recursion, just die.
|
||||
// The Report() and CHECK calls below may call mmap recursively and fail.
|
||||
// If we went into recursion, just die.
|
||||
RawWrite("ERROR: Failed to mmap\n");
|
||||
Die();
|
||||
}
|
||||
@ -174,7 +112,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
Report("ERROR: %s failed to "
|
||||
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
|
||||
SanitizerToolName, mmap_type, size, size, mem_type, err);
|
||||
#ifndef SANITIZER_GO
|
||||
#if !SANITIZER_GO
|
||||
DumpProcessMap();
|
||||
#endif
|
||||
UNREACHABLE("unable to mmap");
|
||||
@ -217,6 +155,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
}
|
||||
|
||||
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
|
||||
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
|
||||
|
||||
template<class T>
|
||||
static inline bool CompareLess(const T &a, const T &b) {
|
||||
@ -227,25 +166,8 @@ void SortArray(uptr *array, uptr size) {
|
||||
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
|
||||
}
|
||||
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
// uptr PageSize = GetPageSizeCached();
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = size + alignment;
|
||||
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr res = map_res;
|
||||
if (res & (alignment - 1)) // Not aligned.
|
||||
res = (map_res + alignment) & ~(alignment - 1);
|
||||
uptr end = res + size;
|
||||
if (res != map_res)
|
||||
UnmapOrDie((void*)map_res, res - map_res);
|
||||
if (end != map_end)
|
||||
UnmapOrDie((void*)end, map_end - end);
|
||||
return (void*)res;
|
||||
void SortArray(u32 *array, uptr size) {
|
||||
InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess);
|
||||
}
|
||||
|
||||
const char *StripPathPrefix(const char *filepath,
|
||||
@ -283,7 +205,7 @@ void ReportErrorSummary(const char *error_message) {
|
||||
__sanitizer_report_error_summary(buff.data());
|
||||
}
|
||||
|
||||
#ifndef SANITIZER_GO
|
||||
#if !SANITIZER_GO
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
@ -295,6 +217,40 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Removes the ANSI escape sequences from the input string (in-place).
|
||||
void RemoveANSIEscapeSequencesFromString(char *str) {
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
// We are going to remove the escape sequences in place.
|
||||
char *s = str;
|
||||
char *z = str;
|
||||
while (*s != '\0') {
|
||||
CHECK_GE(s, z);
|
||||
// Skip over ANSI escape sequences with pointer 's'.
|
||||
if (*s == '\033' && *(s + 1) == '[') {
|
||||
s = internal_strchrnul(s, 'm');
|
||||
if (*s == '\0') {
|
||||
break;
|
||||
}
|
||||
s++;
|
||||
continue;
|
||||
}
|
||||
// 's' now points at a character we want to keep. Copy over the buffer
|
||||
// content if the escape sequence has been perviously skipped andadvance
|
||||
// both pointers.
|
||||
if (s != z)
|
||||
*z = *s;
|
||||
|
||||
// If we have not seen an escape sequence, just advance both pointers.
|
||||
z++;
|
||||
s++;
|
||||
}
|
||||
|
||||
// Null terminate the string.
|
||||
*z = '\0';
|
||||
}
|
||||
|
||||
void LoadedModule::set(const char *module_name, uptr base_address) {
|
||||
clear();
|
||||
full_name_ = internal_strdup(module_name);
|
||||
@ -318,9 +274,8 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
|
||||
}
|
||||
|
||||
bool LoadedModule::containsAddress(uptr address) const {
|
||||
for (Iterator iter = ranges(); iter.hasNext();) {
|
||||
const AddressRange *r = iter.next();
|
||||
if (r->beg <= address && address < r->end)
|
||||
for (const AddressRange &r : ranges()) {
|
||||
if (r.beg <= address && address < r.end)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -387,6 +342,10 @@ bool TemplateMatch(const char *templ, const char *str) {
|
||||
static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
|
||||
|
||||
char *FindPathToBinary(const char *name) {
|
||||
if (FileExists(name)) {
|
||||
return internal_strdup(name);
|
||||
}
|
||||
|
||||
const char *path = GetEnv("PATH");
|
||||
if (!path)
|
||||
return nullptr;
|
||||
@ -451,6 +410,53 @@ uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
|
||||
return name_len;
|
||||
}
|
||||
|
||||
void PrintCmdline() {
|
||||
char **argv = GetArgv();
|
||||
if (!argv) return;
|
||||
Printf("\nCommand: ");
|
||||
for (uptr i = 0; argv[i]; ++i)
|
||||
Printf("%s ", argv[i]);
|
||||
Printf("\n\n");
|
||||
}
|
||||
|
||||
// Malloc hooks.
|
||||
static const int kMaxMallocFreeHooks = 5;
|
||||
struct MallocFreeHook {
|
||||
void (*malloc_hook)(const void *, uptr);
|
||||
void (*free_hook)(const void *);
|
||||
};
|
||||
|
||||
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
|
||||
|
||||
void RunMallocHooks(const void *ptr, uptr size) {
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].malloc_hook;
|
||||
if (!hook) return;
|
||||
hook(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void RunFreeHooks(const void *ptr) {
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].free_hook;
|
||||
if (!hook) return;
|
||||
hook(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
|
||||
void (*free_hook)(const void *)) {
|
||||
if (!malloc_hook || !free_hook) return 0;
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
if (MFHooks[i].malloc_hook == nullptr) {
|
||||
MFHooks[i].malloc_hook = malloc_hook;
|
||||
MFHooks[i].free_hook = free_hook;
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer; // NOLINT
|
||||
@ -460,6 +466,11 @@ void __sanitizer_set_report_path(const char *path) {
|
||||
report_file.SetReportPath(path);
|
||||
}
|
||||
|
||||
void __sanitizer_set_report_fd(void *fd) {
|
||||
report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
|
||||
report_file.fd_pid = internal_getpid();
|
||||
}
|
||||
|
||||
void __sanitizer_report_error_summary(const char *error_summary) {
|
||||
Printf("%s\n", error_summary);
|
||||
}
|
||||
@ -468,4 +479,18 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_death_callback(void (*callback)(void)) {
|
||||
SetUserDieCallback(callback);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
|
||||
uptr),
|
||||
void (*free_hook)(const void *)) {
|
||||
return InstallMallocFreeHooks(malloc_hook, free_hook);
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(int top_percent) {
|
||||
(void)top_percent;
|
||||
}
|
||||
#endif
|
||||
} // extern "C"
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "sanitizer_list.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
extern "C" void _ReadWriteBarrier();
|
||||
#pragma intrinsic(_ReadWriteBarrier)
|
||||
#endif
|
||||
@ -42,11 +42,10 @@ const uptr kWordSizeInBits = 8 * kWordSize;
|
||||
|
||||
const uptr kMaxPathLength = 4096;
|
||||
|
||||
// 16K loaded modules should be enough for everyone.
|
||||
static const uptr kMaxNumberOfModules = 1 << 14;
|
||||
|
||||
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
|
||||
|
||||
static const uptr kErrorMessageBufferSize = 1 << 16;
|
||||
|
||||
// Denotes fake PC values that come from JIT/JAVA/etc.
|
||||
// For such PC values __tsan_symbolize_external() will be called.
|
||||
const u64 kExternalPCBit = 1ULL << 60;
|
||||
@ -62,7 +61,12 @@ INLINE int Verbosity() {
|
||||
}
|
||||
|
||||
uptr GetPageSize();
|
||||
uptr GetPageSizeCached();
|
||||
extern uptr PageSizeCached;
|
||||
INLINE uptr GetPageSizeCached() {
|
||||
if (!PageSizeCached)
|
||||
PageSizeCached = GetPageSize();
|
||||
return PageSizeCached;
|
||||
}
|
||||
uptr GetMmapGranularity();
|
||||
uptr GetMaxVirtualAddress();
|
||||
// Threads
|
||||
@ -74,22 +78,30 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
|
||||
uptr *tls_addr, uptr *tls_size);
|
||||
|
||||
// Memory management
|
||||
void *MmapOrDie(uptr size, const char *mem_type);
|
||||
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
|
||||
INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
|
||||
return MmapOrDie(size, mem_type, /*raw_report*/ true);
|
||||
}
|
||||
void UnmapOrDie(void *addr, uptr size);
|
||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
|
||||
const char *name = nullptr);
|
||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
|
||||
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||
void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapNoAccess(uptr size);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||
// Disallow access to a memory range. Use MmapNoAccess to allocate an
|
||||
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
||||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
bool MprotectReadOnly(uptr addr, uptr size);
|
||||
|
||||
// Find an available address space.
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
|
||||
|
||||
// Used to check if we can map shadow memory to a fixed location.
|
||||
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
||||
void FlushUnneededShadowMemory(uptr addr, uptr size);
|
||||
void ReleaseMemoryToOS(uptr addr, uptr size);
|
||||
void IncreaseTotalMmap(uptr size);
|
||||
void DecreaseTotalMmap(uptr size);
|
||||
uptr GetRSS();
|
||||
@ -97,21 +109,21 @@ void NoHugePagesInRegion(uptr addr, uptr length);
|
||||
void DontDumpShadowMemory(uptr addr, uptr length);
|
||||
// Check if the built VMA size matches the runtime one.
|
||||
void CheckVMASize();
|
||||
void RunMallocHooks(const void *ptr, uptr size);
|
||||
void RunFreeHooks(const void *ptr);
|
||||
|
||||
// InternalScopedBuffer can be used instead of large stack arrays to
|
||||
// keep frame size low.
|
||||
// FIXME: use InternalAlloc instead of MmapOrDie once
|
||||
// InternalAlloc is made libc-free.
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
class InternalScopedBuffer {
|
||||
public:
|
||||
explicit InternalScopedBuffer(uptr cnt) {
|
||||
cnt_ = cnt;
|
||||
ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
|
||||
}
|
||||
~InternalScopedBuffer() {
|
||||
UnmapOrDie(ptr_, cnt_ * sizeof(T));
|
||||
ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
|
||||
}
|
||||
~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
|
||||
T &operator[](uptr i) { return ptr_[i]; }
|
||||
T *data() { return ptr_; }
|
||||
uptr size() { return cnt_ * sizeof(T); }
|
||||
@ -119,9 +131,11 @@ class InternalScopedBuffer {
|
||||
private:
|
||||
T *ptr_;
|
||||
uptr cnt_;
|
||||
// Disallow evil constructors.
|
||||
InternalScopedBuffer(const InternalScopedBuffer&);
|
||||
void operator=(const InternalScopedBuffer&);
|
||||
// Disallow copies and moves.
|
||||
InternalScopedBuffer(const InternalScopedBuffer &) = delete;
|
||||
InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
|
||||
InternalScopedBuffer(InternalScopedBuffer &&) = delete;
|
||||
InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
|
||||
};
|
||||
|
||||
class InternalScopedString : public InternalScopedBuffer<char> {
|
||||
@ -160,6 +174,7 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
|
||||
// IO
|
||||
void RawWrite(const char *buffer);
|
||||
bool ColorizeReports();
|
||||
void RemoveANSIEscapeSequencesFromString(char *buffer);
|
||||
void Printf(const char *format, ...);
|
||||
void Report(const char *format, ...);
|
||||
void SetPrintfAndReportCallback(void (*callback)(const char *));
|
||||
@ -271,10 +286,27 @@ const char *GetPwd();
|
||||
char *FindPathToBinary(const char *name);
|
||||
bool IsPathSeparator(const char c);
|
||||
bool IsAbsolutePath(const char *path);
|
||||
// Starts a subprocess and returs its pid.
|
||||
// If *_fd parameters are not kInvalidFd their corresponding input/output
|
||||
// streams will be redirect to the file. The files will always be closed
|
||||
// in parent process even in case of an error.
|
||||
// The child process will close all fds after STDERR_FILENO
|
||||
// before passing control to a program.
|
||||
pid_t StartSubprocess(const char *filename, const char *const argv[],
|
||||
fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
|
||||
fd_t stderr_fd = kInvalidFd);
|
||||
// Checks if specified process is still running
|
||||
bool IsProcessRunning(pid_t pid);
|
||||
// Waits for the process to finish and returns its exit code.
|
||||
// Returns -1 in case of an error.
|
||||
int WaitForProcess(pid_t pid);
|
||||
|
||||
u32 GetUid();
|
||||
void ReExec();
|
||||
char **GetArgv();
|
||||
void PrintCmdline();
|
||||
bool StackSizeIsUnlimited();
|
||||
uptr GetStackSizeLimitInBytes();
|
||||
void SetStackSizeLimitInBytes(uptr limit);
|
||||
bool AddressSpaceIsUnlimited();
|
||||
void SetAddressSpaceUnlimited();
|
||||
@ -299,6 +331,7 @@ void SleepForMillis(int millis);
|
||||
u64 NanoTime();
|
||||
int Atexit(void (*function)(void));
|
||||
void SortArray(uptr *array, uptr size);
|
||||
void SortArray(u32 *array, uptr size);
|
||||
bool TemplateMatch(const char *templ, const char *str);
|
||||
|
||||
// Exit
|
||||
@ -307,7 +340,8 @@ void NORETURN Die();
|
||||
void NORETURN
|
||||
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
|
||||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type, error_t err);
|
||||
const char *mmap_type, error_t err,
|
||||
bool raw_report = false);
|
||||
|
||||
// Set the name of the current thread to 'name', return true on succees.
|
||||
// The name may be truncated to a system-dependent limit.
|
||||
@ -339,9 +373,15 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback);
|
||||
// The callback should be registered once at the tool init time.
|
||||
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
|
||||
|
||||
// Callback to be called when we want to try releasing unused allocator memory
|
||||
// back to the OS.
|
||||
typedef void (*AllocatorReleaseToOSCallback)();
|
||||
// The callback should be registered once at the tool init time.
|
||||
void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
|
||||
|
||||
// Functions related to signal handling.
|
||||
typedef void (*SignalHandlerType)(int, void *, void *);
|
||||
bool IsDeadlySignal(int signum);
|
||||
bool IsHandledDeadlySignal(int signum);
|
||||
void InstallDeadlySignalHandlers(SignalHandlerType handler);
|
||||
// Alternative signal stack (POSIX-only).
|
||||
void SetAlternateSignalStack();
|
||||
@ -357,7 +397,7 @@ void ReportErrorSummary(const char *error_message);
|
||||
// error_type file:line[:column][ function]
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
|
||||
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
|
||||
void ReportErrorSummary(const char *error_type, StackTrace *trace);
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *trace);
|
||||
|
||||
// Math
|
||||
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
|
||||
@ -414,13 +454,13 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
|
||||
if (IsPowerOfTwo(size)) return size;
|
||||
|
||||
uptr up = MostSignificantSetBitIndex(size);
|
||||
CHECK(size < (1ULL << (up + 1)));
|
||||
CHECK(size > (1ULL << up));
|
||||
CHECK_LT(size, (1ULL << (up + 1)));
|
||||
CHECK_GT(size, (1ULL << up));
|
||||
return 1ULL << (up + 1);
|
||||
}
|
||||
|
||||
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
|
||||
CHECK(IsPowerOfTwo(boundary));
|
||||
RAW_CHECK(IsPowerOfTwo(boundary));
|
||||
return (size + boundary - 1) & ~(boundary - 1);
|
||||
}
|
||||
|
||||
@ -487,7 +527,7 @@ class InternalMmapVectorNoCtor {
|
||||
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
|
||||
Resize(new_capacity);
|
||||
}
|
||||
data_[size_++] = element;
|
||||
internal_memcpy(&data_[size_++], &element, sizeof(T));
|
||||
}
|
||||
T &back() {
|
||||
CHECK_GT(size_, 0);
|
||||
@ -513,6 +553,19 @@ class InternalMmapVectorNoCtor {
|
||||
void clear() { size_ = 0; }
|
||||
bool empty() const { return size() == 0; }
|
||||
|
||||
const T *begin() const {
|
||||
return data();
|
||||
}
|
||||
T *begin() {
|
||||
return data();
|
||||
}
|
||||
const T *end() const {
|
||||
return data() + size();
|
||||
}
|
||||
T *end() {
|
||||
return data() + size();
|
||||
}
|
||||
|
||||
private:
|
||||
void Resize(uptr new_capacity) {
|
||||
CHECK_GT(new_capacity, 0);
|
||||
@ -619,8 +672,7 @@ class LoadedModule {
|
||||
: next(nullptr), beg(beg), end(end), executable(executable) {}
|
||||
};
|
||||
|
||||
typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
|
||||
Iterator ranges() const { return Iterator(&ranges_); }
|
||||
const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
|
||||
|
||||
private:
|
||||
char *full_name_; // Owned.
|
||||
@ -628,13 +680,33 @@ class LoadedModule {
|
||||
IntrusiveList<AddressRange> ranges_;
|
||||
};
|
||||
|
||||
// OS-dependent function that fills array with descriptions of at most
|
||||
// "max_modules" currently loaded modules. Returns the number of
|
||||
// initialized modules. If filter is nonzero, ignores modules for which
|
||||
// filter(full_name) is false.
|
||||
typedef bool (*string_predicate_t)(const char *);
|
||||
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
|
||||
string_predicate_t filter);
|
||||
// List of LoadedModules. OS-dependent implementation is responsible for
|
||||
// filling this information.
|
||||
class ListOfModules {
|
||||
public:
|
||||
ListOfModules() : modules_(kInitialCapacity) {}
|
||||
~ListOfModules() { clear(); }
|
||||
void init();
|
||||
const LoadedModule *begin() const { return modules_.begin(); }
|
||||
LoadedModule *begin() { return modules_.begin(); }
|
||||
const LoadedModule *end() const { return modules_.end(); }
|
||||
LoadedModule *end() { return modules_.end(); }
|
||||
uptr size() const { return modules_.size(); }
|
||||
const LoadedModule &operator[](uptr i) const {
|
||||
CHECK_LT(i, modules_.size());
|
||||
return modules_[i];
|
||||
}
|
||||
|
||||
private:
|
||||
void clear() {
|
||||
for (auto &module : modules_) module.clear();
|
||||
modules_.clear();
|
||||
}
|
||||
|
||||
InternalMmapVector<LoadedModule> modules_;
|
||||
// We rarely have more than 16K loaded modules.
|
||||
static const uptr kInitialCapacity = 1 << 14;
|
||||
};
|
||||
|
||||
// Callback type for iterating over a set of memory ranges.
|
||||
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
|
||||
@ -646,22 +718,34 @@ enum AndroidApiLevel {
|
||||
ANDROID_POST_LOLLIPOP = 23
|
||||
};
|
||||
|
||||
void WriteToSyslog(const char *buffer);
|
||||
|
||||
#if SANITIZER_MAC
|
||||
void LogFullErrorReport(const char *buffer);
|
||||
#else
|
||||
INLINE void LogFullErrorReport(const char *buffer) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_MAC
|
||||
void WriteOneLineToSyslog(const char *s);
|
||||
void LogMessageOnPrintf(const char *str);
|
||||
#else
|
||||
INLINE void WriteOneLineToSyslog(const char *s) {}
|
||||
INLINE void LogMessageOnPrintf(const char *str) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
// Initialize Android logging. Any writes before this are silently lost.
|
||||
void AndroidLogInit();
|
||||
void WriteToSyslog(const char *buffer);
|
||||
#else
|
||||
INLINE void AndroidLogInit() {}
|
||||
INLINE void WriteToSyslog(const char *buffer) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
void GetExtraActivationFlags(char *buf, uptr size);
|
||||
void SanitizerInitializeUnwinder();
|
||||
AndroidApiLevel AndroidGetApiLevel();
|
||||
#else
|
||||
INLINE void AndroidLogWrite(const char *buffer_unused) {}
|
||||
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
|
||||
INLINE void SanitizerInitializeUnwinder() {}
|
||||
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
|
||||
#endif
|
||||
@ -686,7 +770,7 @@ void MaybeStartBackgroudThread();
|
||||
// compiler from recognising it and turning it into an actual call to
|
||||
// memset/memcpy/etc.
|
||||
static inline void SanitizerBreakOptimization(void *arg) {
|
||||
#if _MSC_VER && !defined(__clang__)
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
_ReadWriteBarrier();
|
||||
#else
|
||||
__asm__ __volatile__("" : : "r" (arg) : "memory");
|
||||
@ -699,17 +783,63 @@ struct SignalContext {
|
||||
uptr pc;
|
||||
uptr sp;
|
||||
uptr bp;
|
||||
bool is_memory_access;
|
||||
|
||||
SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
|
||||
context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
|
||||
}
|
||||
enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
|
||||
|
||||
SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
|
||||
bool is_memory_access, WriteFlag write_flag)
|
||||
: context(context),
|
||||
addr(addr),
|
||||
pc(pc),
|
||||
sp(sp),
|
||||
bp(bp),
|
||||
is_memory_access(is_memory_access),
|
||||
write_flag(write_flag) {}
|
||||
|
||||
// Creates signal context in a platform-specific manner.
|
||||
static SignalContext Create(void *siginfo, void *context);
|
||||
|
||||
// Returns true if the "context" indicates a memory write.
|
||||
static WriteFlag GetWriteFlag(void *context);
|
||||
};
|
||||
|
||||
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
|
||||
|
||||
void MaybeReexec();
|
||||
|
||||
template <typename Fn>
|
||||
class RunOnDestruction {
|
||||
public:
|
||||
explicit RunOnDestruction(Fn fn) : fn_(fn) {}
|
||||
~RunOnDestruction() { fn_(); }
|
||||
|
||||
private:
|
||||
Fn fn_;
|
||||
};
|
||||
|
||||
// A simple scope guard. Usage:
|
||||
// auto cleanup = at_scope_exit([]{ do_cleanup; });
|
||||
template <typename Fn>
|
||||
RunOnDestruction<Fn> at_scope_exit(Fn fn) {
|
||||
return RunOnDestruction<Fn>(fn);
|
||||
}
|
||||
|
||||
// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
|
||||
// if a process uses virtual memory over 4TB (as many sanitizers like
|
||||
// to do). This function will abort the process if running on a kernel
|
||||
// that looks vulnerable.
|
||||
#if SANITIZER_LINUX && SANITIZER_S390_64
|
||||
void AvoidCVE_2016_2143();
|
||||
#else
|
||||
INLINE void AvoidCVE_2016_2143() {}
|
||||
#endif
|
||||
|
||||
struct StackDepotStats {
|
||||
uptr n_uniq_ids;
|
||||
uptr allocated;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
@ -717,9 +847,4 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
return alloc.Allocate(size);
|
||||
}
|
||||
|
||||
struct StackDepotStats {
|
||||
uptr n_uniq_ids;
|
||||
uptr allocated;
|
||||
};
|
||||
|
||||
#endif // SANITIZER_COMMON_H
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,25 +51,9 @@ static void ioctl_table_fill() {
|
||||
_(FIONBIO, READ, sizeof(int));
|
||||
_(FIONCLEX, NONE, 0);
|
||||
_(FIOSETOWN, READ, sizeof(int));
|
||||
_(SIOCADDMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCATMARK, WRITE, sizeof(int));
|
||||
_(SIOCDELMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCGIFADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFCONF, CUSTOM, 0);
|
||||
_(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMTU, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGPGRP, WRITE, sizeof(int));
|
||||
_(SIOCSIFADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFFLAGS, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMETRIC, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMTU, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFNETMASK, READ, struct_ifreq_sz);
|
||||
_(SIOCSPGRP, READ, sizeof(int));
|
||||
_(TIOCCONS, NONE, 0);
|
||||
_(TIOCEXCL, NONE, 0);
|
||||
@ -90,6 +74,25 @@ static void ioctl_table_fill() {
|
||||
_(TIOCSTI, READ, sizeof(char));
|
||||
_(TIOCSWINSZ, READ, struct_winsize_sz);
|
||||
|
||||
#if !SANITIZER_IOS
|
||||
_(SIOCADDMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCDELMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCGIFADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMTU, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
|
||||
_(SIOCSIFADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFFLAGS, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMETRIC, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMTU, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFNETMASK, READ, struct_ifreq_sz);
|
||||
#endif
|
||||
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
|
||||
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
|
||||
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
|
||||
@ -578,7 +581,8 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
|
||||
return;
|
||||
if (request == IOCTL_SIOCGIFCONF) {
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
|
||||
sizeof(ifc->ifc_len));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_stackdepot.h"
|
||||
#include "sanitizer_stacktrace.h"
|
||||
@ -43,7 +45,8 @@ void SetSandboxingCallback(void (*f)()) {
|
||||
sandboxing_callback = f;
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_type, StackTrace *stack) {
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
|
||||
#if !SANITIZER_GO
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
if (stack->size == 0) {
|
||||
@ -56,6 +59,7 @@ void ReportErrorSummary(const char *error_type, StackTrace *stack) {
|
||||
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
|
||||
ReportErrorSummary(error_type, frame->info);
|
||||
frame->ClearAll();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void (*SoftRssLimitExceededCallback)(bool exceeded);
|
||||
@ -64,12 +68,22 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
|
||||
SoftRssLimitExceededCallback = Callback;
|
||||
}
|
||||
|
||||
static AllocatorReleaseToOSCallback ReleseCallback;
|
||||
void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) {
|
||||
CHECK_EQ(ReleseCallback, nullptr);
|
||||
ReleseCallback = Callback;
|
||||
}
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_GO
|
||||
void BackgroundThread(void *arg) {
|
||||
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
|
||||
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
|
||||
bool heap_profile = common_flags()->heap_profile;
|
||||
bool allocator_release_to_os = common_flags()->allocator_release_to_os;
|
||||
uptr prev_reported_rss = 0;
|
||||
uptr prev_reported_stack_depot_size = 0;
|
||||
bool reached_soft_rss_limit = false;
|
||||
uptr rss_during_last_reported_profile = 0;
|
||||
while (true) {
|
||||
SleepForMillis(100);
|
||||
uptr current_rss_mb = GetRSS() >> 20;
|
||||
@ -111,14 +125,43 @@ void BackgroundThread(void *arg) {
|
||||
SoftRssLimitExceededCallback(false);
|
||||
}
|
||||
}
|
||||
if (allocator_release_to_os && ReleseCallback) ReleseCallback();
|
||||
if (heap_profile &&
|
||||
current_rss_mb > rss_during_last_reported_profile * 1.1) {
|
||||
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
|
||||
__sanitizer_print_memory_profile(90);
|
||||
rss_during_last_reported_profile = current_rss_mb;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void WriteToSyslog(const char *msg) {
|
||||
InternalScopedString msg_copy(kErrorMessageBufferSize);
|
||||
msg_copy.append("%s", msg);
|
||||
char *p = msg_copy.data();
|
||||
char *q;
|
||||
|
||||
// Print one line at a time.
|
||||
// syslog, at least on Android, has an implicit message length limit.
|
||||
do {
|
||||
q = internal_strchr(p, '\n');
|
||||
if (q)
|
||||
*q = '\0';
|
||||
WriteOneLineToSyslog(p);
|
||||
if (q)
|
||||
p = q + 1;
|
||||
} while (q);
|
||||
}
|
||||
|
||||
void MaybeStartBackgroudThread() {
|
||||
#if SANITIZER_LINUX // Need to implement/test on other platforms.
|
||||
#if SANITIZER_LINUX && \
|
||||
!SANITIZER_GO // Need to implement/test on other platforms.
|
||||
// Start the background thread if one of the rss limits is given.
|
||||
if (!common_flags()->hard_rss_limit_mb &&
|
||||
!common_flags()->soft_rss_limit_mb) return;
|
||||
!common_flags()->soft_rss_limit_mb &&
|
||||
!common_flags()->allocator_release_to_os &&
|
||||
!common_flags()->heap_profile) return;
|
||||
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
|
||||
internal_start_thread(BackgroundThread, nullptr);
|
||||
#endif
|
||||
@ -128,7 +171,7 @@ void MaybeStartBackgroudThread() {
|
||||
|
||||
void NOINLINE
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
|
||||
PrepareForSandboxing(args);
|
||||
if (sandboxing_callback)
|
||||
sandboxing_callback();
|
||||
__sanitizer::PrepareForSandboxing(args);
|
||||
if (__sanitizer::sandboxing_callback)
|
||||
__sanitizer::sandboxing_callback();
|
||||
}
|
||||
|
@ -1235,17 +1235,15 @@ POST_SYSCALL(fcntl64)(long res, long fd, long cmd, long arg) {}
|
||||
PRE_SYSCALL(pipe)(void *fildes) {}
|
||||
|
||||
POST_SYSCALL(pipe)(long res, void *fildes) {
|
||||
if (res >= 0) {
|
||||
if (fildes) POST_WRITE(fildes, sizeof(int));
|
||||
}
|
||||
if (res >= 0)
|
||||
if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
|
||||
}
|
||||
|
||||
PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
|
||||
|
||||
POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
|
||||
if (res >= 0) {
|
||||
if (fildes) POST_WRITE(fildes, sizeof(int));
|
||||
}
|
||||
if (res >= 0)
|
||||
if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
|
||||
}
|
||||
|
||||
PRE_SYSCALL(dup)(long fildes) {}
|
||||
@ -1878,13 +1876,11 @@ PRE_SYSCALL(socket)(long arg0, long arg1, long arg2) {}
|
||||
|
||||
POST_SYSCALL(socket)(long res, long arg0, long arg1, long arg2) {}
|
||||
|
||||
PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, void *arg3) {}
|
||||
PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
|
||||
|
||||
POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2,
|
||||
void *arg3) {
|
||||
if (res >= 0) {
|
||||
if (arg3) POST_WRITE(arg3, sizeof(int));
|
||||
}
|
||||
POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
|
||||
if (res >= 0)
|
||||
if (sv) POST_WRITE(sv, sizeof(int) * 2);
|
||||
}
|
||||
|
||||
PRE_SYSCALL(socketcall)(long call, void *args) {}
|
||||
@ -2299,7 +2295,7 @@ POST_SYSCALL(ni_syscall)(long res) {}
|
||||
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
|
||||
#if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__))
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
|
||||
if (data) {
|
||||
if (request == ptrace_setregs) {
|
||||
PRE_READ((void *)data, struct_user_regs_struct_sz);
|
||||
@ -2320,7 +2316,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
|
||||
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
|
||||
#if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__))
|
||||
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
|
||||
if (res >= 0 && data) {
|
||||
// Note that this is different from the interceptor in
|
||||
// sanitizer_common_interceptors.inc.
|
||||
@ -2842,6 +2838,40 @@ PRE_SYSCALL(vfork)() {
|
||||
POST_SYSCALL(vfork)(long res) {
|
||||
COMMON_SYSCALL_POST_FORK(res);
|
||||
}
|
||||
|
||||
PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
|
||||
__sanitizer_kernel_sigaction_t *oldact) {
|
||||
if (act) {
|
||||
PRE_READ(&act->sigaction, sizeof(act->sigaction));
|
||||
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
|
||||
PRE_READ(&act->sa_mask, sizeof(act->sa_mask));
|
||||
}
|
||||
}
|
||||
|
||||
POST_SYSCALL(sigaction)(long res, long signum,
|
||||
const __sanitizer_kernel_sigaction_t *act,
|
||||
__sanitizer_kernel_sigaction_t *oldact) {
|
||||
if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
|
||||
}
|
||||
|
||||
PRE_SYSCALL(rt_sigaction)(long signum,
|
||||
const __sanitizer_kernel_sigaction_t *act,
|
||||
__sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
|
||||
if (act) {
|
||||
PRE_READ(&act->sigaction, sizeof(act->sigaction));
|
||||
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
|
||||
PRE_READ(&act->sa_mask, sz);
|
||||
}
|
||||
}
|
||||
|
||||
POST_SYSCALL(rt_sigaction)(long res, long signum,
|
||||
const __sanitizer_kernel_sigaction_t *act,
|
||||
__sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
|
||||
if (res >= 0 && oldact) {
|
||||
SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
|
||||
POST_WRITE(oldact, oldact_sz);
|
||||
}
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#undef PRE_SYSCALL
|
||||
|
@ -45,8 +45,12 @@
|
||||
#include "sanitizer_symbolizer.h"
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
|
||||
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
|
||||
static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
|
||||
static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
|
||||
|
||||
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
||||
|
||||
@ -94,7 +98,7 @@ class CoverageData {
|
||||
void DumpAll();
|
||||
|
||||
ALWAYS_INLINE
|
||||
void TraceBasicBlock(s32 *id);
|
||||
void TraceBasicBlock(u32 *id);
|
||||
|
||||
void InitializeGuardArray(s32 *guards);
|
||||
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
|
||||
@ -105,17 +109,23 @@ class CoverageData {
|
||||
uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
|
||||
|
||||
uptr *data();
|
||||
uptr size();
|
||||
uptr size() const;
|
||||
|
||||
private:
|
||||
struct NamedPcRange {
|
||||
const char *copied_module_name;
|
||||
uptr beg, end; // elements [beg,end) in pc_array.
|
||||
};
|
||||
|
||||
void DirectOpen();
|
||||
void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
|
||||
void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s,
|
||||
InternalMmapVector<uptr>* offsets) const;
|
||||
|
||||
// Maximal size pc array may ever grow.
|
||||
// We MmapNoReserve this space to ensure that the array is contiguous.
|
||||
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
|
||||
1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
|
||||
1 << 27);
|
||||
static const uptr kPcArrayMaxSize =
|
||||
FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
|
||||
// The amount file mapping for the pc array is grown by.
|
||||
static const uptr kPcArrayMmapSize = 64 * 1024;
|
||||
|
||||
@ -134,11 +144,6 @@ class CoverageData {
|
||||
// Vector of coverage guard arrays, protected by mu.
|
||||
InternalMmapVectorNoCtor<s32*> guard_array_vec;
|
||||
|
||||
struct NamedPcRange {
|
||||
const char *copied_module_name;
|
||||
uptr beg, end; // elements [beg,end) in pc_array.
|
||||
};
|
||||
|
||||
// Vector of module and compilation unit pc ranges.
|
||||
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
|
||||
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
|
||||
@ -510,7 +515,7 @@ uptr *CoverageData::data() {
|
||||
return pc_array;
|
||||
}
|
||||
|
||||
uptr CoverageData::size() {
|
||||
uptr CoverageData::size() const {
|
||||
return atomic_load(&pc_array_index, memory_order_relaxed);
|
||||
}
|
||||
|
||||
@ -680,11 +685,11 @@ void CoverageData::DumpCallerCalleePairs() {
|
||||
// it once and then cache in the provided 'cache' storage.
|
||||
//
|
||||
// This function will eventually be inlined by the compiler.
|
||||
void CoverageData::TraceBasicBlock(s32 *id) {
|
||||
void CoverageData::TraceBasicBlock(u32 *id) {
|
||||
// Will trap here if
|
||||
// 1. coverage is not enabled at run-time.
|
||||
// 2. The array tr_event_array is full.
|
||||
*tr_event_pointer = static_cast<u32>(*id - 1);
|
||||
*tr_event_pointer = *id - 1;
|
||||
tr_event_pointer++;
|
||||
}
|
||||
|
||||
@ -740,41 +745,96 @@ void CoverageData::DumpAsBitSet() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
|
||||
InternalMmapVector<uptr>* offsets) const {
|
||||
offsets->clear();
|
||||
for (uptr i = 0; i < kNumWordsForMagic; i++)
|
||||
offsets->push_back(0);
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
for (uptr i = r.beg; i < r.end; i++) {
|
||||
uptr pc = UnbundlePc(pc_array[i]);
|
||||
uptr counter = UnbundleCounter(pc_array[i]);
|
||||
if (!pc) continue; // Not visited.
|
||||
uptr offset = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
|
||||
offsets->push_back(BundlePcAndCounter(offset, counter));
|
||||
}
|
||||
|
||||
CHECK_GE(offsets->size(), kNumWordsForMagic);
|
||||
SortArray(offsets->data(), offsets->size());
|
||||
for (uptr i = 0; i < offsets->size(); i++)
|
||||
(*offsets)[i] = UnbundlePc((*offsets)[i]);
|
||||
}
|
||||
|
||||
static void GenerateHtmlReport(const InternalMmapVector<char *> &cov_files) {
|
||||
if (!common_flags()->html_cov_report) {
|
||||
return;
|
||||
}
|
||||
char *sancov_path = FindPathToBinary(common_flags()->sancov_path);
|
||||
if (sancov_path == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
InternalMmapVector<char *> sancov_argv(cov_files.size() * 2 + 3);
|
||||
sancov_argv.push_back(sancov_path);
|
||||
sancov_argv.push_back(internal_strdup("-html-report"));
|
||||
auto argv_deleter = at_scope_exit([&] {
|
||||
for (uptr i = 0; i < sancov_argv.size(); ++i) {
|
||||
InternalFree(sancov_argv[i]);
|
||||
}
|
||||
});
|
||||
|
||||
for (const auto &cov_file : cov_files) {
|
||||
sancov_argv.push_back(internal_strdup(cov_file));
|
||||
}
|
||||
|
||||
{
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
for (const LoadedModule &module : modules) {
|
||||
sancov_argv.push_back(internal_strdup(module.full_name()));
|
||||
}
|
||||
}
|
||||
|
||||
InternalScopedString report_path(kMaxPathLength);
|
||||
fd_t report_fd =
|
||||
CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html");
|
||||
int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(),
|
||||
kInvalidFd /* stdin */, report_fd /* std_out */);
|
||||
if (pid > 0) {
|
||||
int result = WaitForProcess(pid);
|
||||
if (result == 0)
|
||||
Printf("coverage report generated to %s\n", report_path.data());
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::DumpOffsets() {
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!common_flags()->coverage_pcs) return;
|
||||
CHECK_NE(sym, nullptr);
|
||||
InternalMmapVector<uptr> offsets(0);
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
offsets.clear();
|
||||
uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
|
||||
for (uptr i = 0; i < num_words_for_magic; i++)
|
||||
offsets.push_back(0);
|
||||
auto r = module_name_vec[m];
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
for (uptr i = r.beg; i < r.end; i++) {
|
||||
uptr pc = UnbundlePc(pc_array[i]);
|
||||
uptr counter = UnbundleCounter(pc_array[i]);
|
||||
if (!pc) continue; // Not visited.
|
||||
uptr offset = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
|
||||
offsets.push_back(BundlePcAndCounter(offset, counter));
|
||||
|
||||
InternalMmapVector<char *> cov_files(module_name_vec.size());
|
||||
auto cov_files_deleter = at_scope_exit([&] {
|
||||
for (uptr i = 0; i < cov_files.size(); ++i) {
|
||||
InternalFree(cov_files[i]);
|
||||
}
|
||||
});
|
||||
|
||||
CHECK_GE(offsets.size(), num_words_for_magic);
|
||||
SortArray(offsets.data(), offsets.size());
|
||||
for (uptr i = 0; i < offsets.size(); i++)
|
||||
offsets[i] = UnbundlePc(offsets[i]);
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
auto r = module_name_vec[m];
|
||||
GetRangeOffsets(r, sym, &offsets);
|
||||
|
||||
uptr num_offsets = offsets.size() - num_words_for_magic;
|
||||
uptr num_offsets = offsets.size() - kNumWordsForMagic;
|
||||
u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
|
||||
CHECK_EQ(*magic_p, 0ULL);
|
||||
// FIXME: we may want to write 32-bit offsets even in 64-mode
|
||||
// if all the offsets are small enough.
|
||||
*magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
|
||||
*magic_p = kMagic;
|
||||
|
||||
const char *module_name = StripModuleName(r.copied_module_name);
|
||||
if (cov_sandboxed) {
|
||||
@ -789,11 +849,14 @@ void CoverageData::DumpOffsets() {
|
||||
if (fd == kInvalidFd) continue;
|
||||
WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
|
||||
CloseFile(fd);
|
||||
cov_files.push_back(internal_strdup(path.data()));
|
||||
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
|
||||
}
|
||||
}
|
||||
if (cov_fd != kInvalidFd)
|
||||
CloseFile(cov_fd);
|
||||
|
||||
GenerateHtmlReport(cov_files);
|
||||
}
|
||||
|
||||
void CoverageData::DumpAll() {
|
||||
@ -918,11 +981,13 @@ uptr __sanitizer_get_total_unique_caller_callee_pairs() {
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_func_enter(s32 *id) {
|
||||
void __sanitizer_cov_trace_func_enter(u32 *id) {
|
||||
__sanitizer_cov_with_check(id);
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_basic_block(s32 *id) {
|
||||
void __sanitizer_cov_trace_basic_block(u32 *id) {
|
||||
__sanitizer_cov_with_check(id);
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -949,8 +1014,30 @@ uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
|
||||
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
|
||||
}
|
||||
// Default empty implementations (weak). Users should redefine them.
|
||||
#if !SANITIZER_WINDOWS // weak does not work on Windows.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp1() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp2() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp4() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp8() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_switch() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div4() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div8() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_gep() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_indir() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard_init() {}
|
||||
#endif // !SANITIZER_WINDOWS
|
||||
} // extern "C"
|
||||
|
@ -70,26 +70,21 @@ void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
|
||||
InternalScopedString text(kMaxTextSize);
|
||||
|
||||
{
|
||||
InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
|
||||
CHECK(modules.data());
|
||||
int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
|
||||
/* filter */ nullptr);
|
||||
|
||||
text.append("%d\n", sizeof(uptr) * 8);
|
||||
for (int i = 0; i < n_modules; ++i) {
|
||||
const char *module_name = StripModuleName(modules[i].full_name());
|
||||
uptr base = modules[i].base_address();
|
||||
for (auto iter = modules[i].ranges(); iter.hasNext();) {
|
||||
const auto *range = iter.next();
|
||||
if (range->executable) {
|
||||
uptr start = range->beg;
|
||||
uptr end = range->end;
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
for (const LoadedModule &module : modules) {
|
||||
const char *module_name = StripModuleName(module.full_name());
|
||||
uptr base = module.base_address();
|
||||
for (const auto &range : module.ranges()) {
|
||||
if (range.executable) {
|
||||
uptr start = range.beg;
|
||||
uptr end = range.end;
|
||||
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
|
||||
if (caller_pc && caller_pc >= start && caller_pc < end)
|
||||
cached_mapping.SetModuleRange(start, end);
|
||||
}
|
||||
}
|
||||
modules[i].clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,11 +117,16 @@ void DD::MutexBeforeLock(DDCallback *cb,
|
||||
|
||||
void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
uptr path[10];
|
||||
uptr path[20];
|
||||
uptr len = dd.findPathToLock(<->dd, m->id, path, ARRAY_SIZE(path));
|
||||
CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
|
||||
if (len == 0U) {
|
||||
// A cycle of 20+ locks? Well, that's a bit odd...
|
||||
Printf("WARNING: too long mutex cycle found\n");
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(m->id, path[0]);
|
||||
lt->report_pending = true;
|
||||
len = Min<uptr>(len, DDReport::kMaxLoopSize);
|
||||
DDReport *rep = <->rep;
|
||||
rep->n = len;
|
||||
for (uptr i = 0; i < len; i++) {
|
||||
|
@ -49,7 +49,7 @@ struct DDFlags {
|
||||
};
|
||||
|
||||
struct DDReport {
|
||||
enum { kMaxLoopSize = 8 };
|
||||
enum { kMaxLoopSize = 20 };
|
||||
int n; // number of entries in loop
|
||||
struct {
|
||||
u64 thr_ctx; // user thread context
|
||||
|
@ -28,11 +28,6 @@ struct FlagDescription {
|
||||
|
||||
IntrusiveList<FlagDescription> flag_descriptions;
|
||||
|
||||
// If set, the tool will install its own SEGV signal handler by default.
|
||||
#ifndef SANITIZER_NEEDS_SEGV
|
||||
# define SANITIZER_NEEDS_SEGV 1
|
||||
#endif
|
||||
|
||||
void CommonFlags::SetDefaults() {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "sanitizer_flags.inc"
|
||||
@ -43,17 +38,44 @@ void CommonFlags::CopyFrom(const CommonFlags &other) {
|
||||
internal_memcpy(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
// Copy the string from "s" to "out", replacing "%b" with the binary basename.
|
||||
static void SubstituteBinaryName(const char *s, char *out, uptr out_size) {
|
||||
// Copy the string from "s" to "out", making the following substitutions:
|
||||
// %b = binary basename
|
||||
// %p = pid
|
||||
void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
|
||||
char *out_end = out + out_size;
|
||||
while (*s && out < out_end - 1) {
|
||||
if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; }
|
||||
const char *base = GetProcessName();
|
||||
CHECK(base);
|
||||
while (*base && out < out_end - 1)
|
||||
*out++ = *base++;
|
||||
s += 2; // skip "%b"
|
||||
if (s[0] != '%') {
|
||||
*out++ = *s++;
|
||||
continue;
|
||||
}
|
||||
switch (s[1]) {
|
||||
case 'b': {
|
||||
const char *base = GetProcessName();
|
||||
CHECK(base);
|
||||
while (*base && out < out_end - 1)
|
||||
*out++ = *base++;
|
||||
s += 2; // skip "%b"
|
||||
break;
|
||||
}
|
||||
case 'p': {
|
||||
int pid = internal_getpid();
|
||||
char buf[32];
|
||||
char *buf_pos = buf + 32;
|
||||
do {
|
||||
*--buf_pos = (pid % 10) + '0';
|
||||
pid /= 10;
|
||||
} while (pid);
|
||||
while (buf_pos < buf + 32 && out < out_end - 1)
|
||||
*out++ = *buf_pos++;
|
||||
s += 2; // skip "%p"
|
||||
break;
|
||||
}
|
||||
default:
|
||||
*out++ = *s++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CHECK(out < out_end - 1);
|
||||
*out = '\0';
|
||||
}
|
||||
|
||||
@ -67,7 +89,7 @@ class FlagHandlerInclude : public FlagHandlerBase {
|
||||
bool Parse(const char *value) final {
|
||||
if (internal_strchr(value, '%')) {
|
||||
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
|
||||
SubstituteBinaryName(value, buf, kMaxPathLength);
|
||||
SubstituteForFlagValue(value, buf, kMaxPathLength);
|
||||
bool res = parser_->ParseFile(buf, ignore_missing_);
|
||||
UnmapOrDie(buf, kMaxPathLength);
|
||||
return res;
|
||||
@ -97,4 +119,10 @@ void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
|
||||
RegisterIncludeFlags(parser, cf);
|
||||
}
|
||||
|
||||
void InitializeCommonFlags(CommonFlags *cf) {
|
||||
// need to record coverage to generate coverage report.
|
||||
cf->coverage |= cf->html_cov_report;
|
||||
SetVerbosity(cf->verbosity);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -44,10 +44,17 @@ inline void OverrideCommonFlags(const CommonFlags &cf) {
|
||||
common_flags_dont_use.CopyFrom(cf);
|
||||
}
|
||||
|
||||
void SubstituteForFlagValue(const char *s, char *out, uptr out_size);
|
||||
|
||||
class FlagParser;
|
||||
void RegisterCommonFlags(FlagParser *parser,
|
||||
CommonFlags *cf = &common_flags_dont_use);
|
||||
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
|
||||
|
||||
// Should be called after parsing all flags. Sets up common flag values
|
||||
// and perform initializations common to all sanitizers (e.g. setting
|
||||
// verbosity).
|
||||
void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FLAGS_H
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user