backport: All source files: Merge from upstream 345033.
Merge from upstream 345033. 2018-10-31 Martin Liska <mliska@suse.cz> * All source files: Merge from upstream 345033. From-SVN: r265665
This commit is contained in:
parent
95fba530b6
commit
eac9753122
@ -1,3 +1,7 @@
|
||||
2018-10-31 Martin Liska <mliska@suse.cz>
|
||||
|
||||
* All source files: Merge from upstream 345033.
|
||||
|
||||
2018-10-31 Martin Liska <mliska@suse.cz>
|
||||
|
||||
* HOWTO_MERGE: Enhance documentation.
|
||||
|
@ -1,4 +1,4 @@
|
||||
315899
|
||||
345033
|
||||
|
||||
The first line of this file holds the svn revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -14,8 +14,10 @@
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_flags.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
|
||||
namespace __asan {
|
||||
@ -108,8 +110,9 @@ void AsanDeactivate() {
|
||||
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
|
||||
disabled.quarantine_size_mb = 0;
|
||||
disabled.thread_local_quarantine_size_kb = 0;
|
||||
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
|
||||
disabled.max_redzone = 16;
|
||||
// Redzone must be at least Max(16, granularity) bytes long.
|
||||
disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
|
||||
disabled.max_redzone = disabled.min_redzone;
|
||||
disabled.alloc_dealloc_mismatch = false;
|
||||
disabled.may_return_null = true;
|
||||
ReInitializeAllocator(disabled);
|
||||
|
@ -82,7 +82,10 @@ struct ChunkHeader {
|
||||
// This field is used for small sizes. For large sizes it is equal to
|
||||
// SizeClassMap::kMaxSize and the actual size is stored in the
|
||||
// SecondaryAllocator's metadata.
|
||||
u32 user_requested_size;
|
||||
u32 user_requested_size : 29;
|
||||
// align < 8 -> 0
|
||||
// else -> log2(min(align, 512)) - 2
|
||||
u32 user_requested_alignment_log : 3;
|
||||
u32 alloc_context_id;
|
||||
};
|
||||
|
||||
@ -129,8 +132,9 @@ struct AsanChunk: ChunkBase {
|
||||
};
|
||||
|
||||
struct QuarantineCallback {
|
||||
explicit QuarantineCallback(AllocatorCache *cache)
|
||||
: cache_(cache) {
|
||||
QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
|
||||
: cache_(cache),
|
||||
stack_(stack) {
|
||||
}
|
||||
|
||||
void Recycle(AsanChunk *m) {
|
||||
@ -163,7 +167,7 @@ struct QuarantineCallback {
|
||||
void *res = get_allocator().Allocate(cache_, size, 1);
|
||||
// TODO(alekseys): Consider making quarantine OOM-friendly.
|
||||
if (UNLIKELY(!res))
|
||||
return DieOnFailure::OnOOM();
|
||||
ReportOutOfMemory(size, stack_);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -171,7 +175,9 @@ struct QuarantineCallback {
|
||||
get_allocator().Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
AllocatorCache *cache_;
|
||||
private:
|
||||
AllocatorCache* const cache_;
|
||||
BufferedStackTrace* const stack_;
|
||||
};
|
||||
|
||||
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
|
||||
@ -269,9 +275,9 @@ struct Allocator {
|
||||
atomic_store(&max_redzone, options.max_redzone, memory_order_release);
|
||||
}
|
||||
|
||||
void Initialize(const AllocatorOptions &options) {
|
||||
void InitLinkerInitialized(const AllocatorOptions &options) {
|
||||
SetAllocatorMayReturnNull(options.may_return_null);
|
||||
allocator.Init(options.release_to_os_interval_ms);
|
||||
allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
@ -349,6 +355,20 @@ struct Allocator {
|
||||
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
|
||||
}
|
||||
|
||||
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
|
||||
if (user_requested_alignment < 8)
|
||||
return 0;
|
||||
if (user_requested_alignment > 512)
|
||||
user_requested_alignment = 512;
|
||||
return Log2(user_requested_alignment) - 2;
|
||||
}
|
||||
|
||||
static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
|
||||
if (user_requested_alignment_log == 0)
|
||||
return 0;
|
||||
return 1LL << (user_requested_alignment_log + 2);
|
||||
}
|
||||
|
||||
// We have an address between two chunks, and we want to report just one.
|
||||
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
|
||||
AsanChunk *right_chunk) {
|
||||
@ -378,11 +398,16 @@ struct Allocator {
|
||||
AllocType alloc_type, bool can_fill) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
if (RssLimitExceeded())
|
||||
return AsanAllocator::FailureHandler::OnOOM();
|
||||
if (RssLimitExceeded()) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportRssLimitExceeded(stack);
|
||||
}
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
const uptr user_requested_alignment_log =
|
||||
ComputeUserRequestedAlignmentLog(alignment);
|
||||
if (alignment < min_alignment)
|
||||
alignment = min_alignment;
|
||||
if (size == 0) {
|
||||
@ -410,9 +435,13 @@ struct Allocator {
|
||||
}
|
||||
CHECK(IsAligned(needed_size, min_alignment));
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
if (AllocatorMayReturnNull()) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return nullptr;
|
||||
}
|
||||
ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
|
||||
stack);
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
@ -425,8 +454,12 @@ struct Allocator {
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||
}
|
||||
if (!allocated)
|
||||
return nullptr;
|
||||
if (UNLIKELY(!allocated)) {
|
||||
SetAllocatorOutOfMemory();
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportOutOfMemory(size, stack);
|
||||
}
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
@ -470,6 +503,7 @@ struct Allocator {
|
||||
meta[0] = size;
|
||||
meta[1] = chunk_beg;
|
||||
}
|
||||
m->user_requested_alignment_log = user_requested_alignment_log;
|
||||
|
||||
m->alloc_context_id = StackDepotPut(*stack);
|
||||
|
||||
@ -561,18 +595,18 @@ struct Allocator {
|
||||
if (t) {
|
||||
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
|
||||
AllocatorCache *ac = GetAllocatorCache(ms);
|
||||
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
|
||||
m->UsedSize());
|
||||
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
|
||||
m->UsedSize());
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *ac = &fallback_allocator_cache;
|
||||
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
|
||||
m->UsedSize());
|
||||
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
|
||||
m, m->UsedSize());
|
||||
}
|
||||
}
|
||||
|
||||
void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
|
||||
BufferedStackTrace *stack, AllocType alloc_type) {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
if (p == 0) return;
|
||||
|
||||
@ -599,11 +633,14 @@ struct Allocator {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, delete_size, stack);
|
||||
} else {
|
||||
if (flags()->new_delete_type_mismatch &&
|
||||
(alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
|
||||
((delete_size && delete_size != m->UsedSize()) ||
|
||||
ComputeUserRequestedAlignmentLog(delete_alignment) !=
|
||||
m->user_requested_alignment_log)) {
|
||||
ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
|
||||
}
|
||||
}
|
||||
|
||||
QuarantineChunk(m, ptr, stack);
|
||||
@ -629,14 +666,17 @@ struct Allocator {
|
||||
// If realloc() races with free(), we may start copying freed memory.
|
||||
// However, we will report racy double-free later anyway.
|
||||
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
||||
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
|
||||
Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CheckForCallocOverflow(size, nmemb))
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportCallocOverflow(nmemb, size, stack);
|
||||
}
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
@ -652,9 +692,9 @@ struct Allocator {
|
||||
ReportFreeNotMalloced((uptr)ptr, stack);
|
||||
}
|
||||
|
||||
void CommitBack(AsanThreadLocalMallocStorage *ms) {
|
||||
void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
|
||||
AllocatorCache *ac = GetAllocatorCache(ms);
|
||||
quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
|
||||
quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
|
||||
allocator.SwallowCache(ac);
|
||||
}
|
||||
|
||||
@ -714,6 +754,24 @@ struct Allocator {
|
||||
return AsanChunkView(m1);
|
||||
}
|
||||
|
||||
void Purge(BufferedStackTrace *stack) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (t) {
|
||||
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
|
||||
quarantine.DrainAndRecycle(GetQuarantineCache(ms),
|
||||
QuarantineCallback(GetAllocatorCache(ms),
|
||||
stack));
|
||||
}
|
||||
{
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
quarantine.DrainAndRecycle(&fallback_quarantine_cache,
|
||||
QuarantineCallback(&fallback_allocator_cache,
|
||||
stack));
|
||||
}
|
||||
|
||||
allocator.ForceReleaseToOS();
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
allocator.PrintStats();
|
||||
quarantine.PrintStats();
|
||||
@ -748,6 +806,9 @@ bool AsanChunkView::IsQuarantined() const {
|
||||
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
|
||||
u32 AsanChunkView::UserRequestedAlignment() const {
|
||||
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
|
||||
}
|
||||
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
|
||||
AllocType AsanChunkView::GetAllocType() const {
|
||||
@ -773,7 +834,7 @@ StackTrace AsanChunkView::GetFreeStack() const {
|
||||
}
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options) {
|
||||
instance.Initialize(options);
|
||||
instance.InitLinkerInitialized(options);
|
||||
}
|
||||
|
||||
void ReInitializeAllocator(const AllocatorOptions &options) {
|
||||
@ -792,7 +853,8 @@ AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
|
||||
}
|
||||
|
||||
void AsanThreadLocalMallocStorage::CommitBack() {
|
||||
instance.CommitBack(this);
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
instance.CommitBack(this, &stack);
|
||||
}
|
||||
|
||||
void PrintInternalAllocatorStats() {
|
||||
@ -800,12 +862,12 @@ void PrintInternalAllocatorStats() {
|
||||
}
|
||||
|
||||
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
|
||||
instance.Deallocate(ptr, 0, stack, alloc_type);
|
||||
instance.Deallocate(ptr, 0, 0, stack, alloc_type);
|
||||
}
|
||||
|
||||
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
instance.Deallocate(ptr, size, stack, alloc_type);
|
||||
void asan_delete(void *ptr, uptr size, uptr alignment,
|
||||
BufferedStackTrace *stack, AllocType alloc_type) {
|
||||
instance.Deallocate(ptr, size, alignment, stack, alloc_type);
|
||||
}
|
||||
|
||||
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
|
||||
@ -821,7 +883,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
|
||||
return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
|
||||
if (size == 0) {
|
||||
if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
|
||||
instance.Deallocate(p, 0, stack, FROM_MALLOC);
|
||||
instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
|
||||
return nullptr;
|
||||
}
|
||||
// Allocate a size of 1 if we shouldn't free() on Realloc to 0
|
||||
@ -839,7 +901,9 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
|
||||
errno = errno_ENOMEM;
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportPvallocOverflow(size, stack);
|
||||
}
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = size ? RoundUpTo(size, PageSize) : PageSize;
|
||||
@ -851,20 +915,35 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
|
||||
errno = errno_EINVAL;
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportInvalidAllocationAlignment(alignment, stack);
|
||||
}
|
||||
return SetErrnoOnNull(
|
||||
instance.Allocate(size, alignment, stack, alloc_type, true));
|
||||
}
|
||||
|
||||
void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
|
||||
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
|
||||
errno = errno_EINVAL;
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
|
||||
}
|
||||
return SetErrnoOnNull(
|
||||
instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
|
||||
}
|
||||
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack) {
|
||||
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
|
||||
AsanAllocator::FailureHandler::OnBadRequest();
|
||||
return errno_EINVAL;
|
||||
if (AllocatorMayReturnNull())
|
||||
return errno_EINVAL;
|
||||
ReportInvalidPosixMemalignAlignment(alignment, stack);
|
||||
}
|
||||
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
|
||||
if (UNLIKELY(!ptr))
|
||||
// OOM error is already taken care of by Allocate.
|
||||
return errno_ENOMEM;
|
||||
CHECK(IsAligned((uptr)ptr, alignment));
|
||||
*memptr = ptr;
|
||||
@ -1009,6 +1088,11 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
return allocated_size;
|
||||
}
|
||||
|
||||
void __sanitizer_purge_allocator() {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
instance.Purge(&stack);
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
|
||||
|
@ -56,6 +56,7 @@ class AsanChunkView {
|
||||
uptr Beg() const; // First byte of user memory.
|
||||
uptr End() const; // Last byte of user memory.
|
||||
uptr UsedSize() const; // Size requested by the user.
|
||||
u32 UserRequestedAlignment() const; // Originally requested alignment.
|
||||
uptr AllocTid() const;
|
||||
uptr FreeTid() const;
|
||||
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
|
||||
@ -126,7 +127,8 @@ const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
# elif defined(__aarch64__) && SANITIZER_ANDROID
|
||||
const uptr kAllocatorSpace = 0x3000000000ULL;
|
||||
// Android needs to support 39, 42 and 48 bit VMA.
|
||||
const uptr kAllocatorSpace = ~(uptr)0;
|
||||
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
|
||||
typedef VeryCompactSizeClassMap SizeClassMap;
|
||||
# elif defined(__aarch64__)
|
||||
@ -195,8 +197,8 @@ struct AsanThreadLocalMallocStorage {
|
||||
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type);
|
||||
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
|
||||
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type);
|
||||
void asan_delete(void *ptr, uptr size, uptr alignment,
|
||||
BufferedStackTrace *stack, AllocType alloc_type);
|
||||
|
||||
void *asan_malloc(uptr size, BufferedStackTrace *stack);
|
||||
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
|
||||
@ -204,6 +206,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
|
||||
void *asan_valloc(uptr size, BufferedStackTrace *stack);
|
||||
void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
|
||||
|
||||
void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack);
|
||||
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
|
||||
|
@ -25,7 +25,8 @@ using namespace __asan;
|
||||
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
|
||||
char *name, uptr name_size,
|
||||
uptr ®ion_address, uptr ®ion_size) {
|
||||
InternalMmapVector<StackVarDescr> vars(16);
|
||||
InternalMmapVector<StackVarDescr> vars;
|
||||
vars.reserve(16);
|
||||
if (!ParseFrameDescription(frame_descr, &vars)) {
|
||||
return;
|
||||
}
|
||||
|
@ -18,23 +18,25 @@
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// Return " (thread_name) " or an empty string if the name is empty.
|
||||
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
|
||||
uptr buff_len) {
|
||||
const char *name = t->name;
|
||||
if (name[0] == '\0') return "";
|
||||
buff[0] = 0;
|
||||
internal_strncat(buff, " (", 3);
|
||||
internal_strncat(buff, name, buff_len - 4);
|
||||
internal_strncat(buff, ")", 2);
|
||||
return buff;
|
||||
AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) {
|
||||
Init(t->tid, t->name);
|
||||
}
|
||||
|
||||
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
|
||||
if (tid == kInvalidTid) return "";
|
||||
asanThreadRegistry().CheckLocked();
|
||||
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
|
||||
return ThreadNameWithParenthesis(t, buff, buff_len);
|
||||
AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) {
|
||||
if (tid == kInvalidTid) {
|
||||
Init(tid, "");
|
||||
} else {
|
||||
asanThreadRegistry().CheckLocked();
|
||||
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
|
||||
Init(tid, t->name);
|
||||
}
|
||||
}
|
||||
|
||||
void AsanThreadIdAndName::Init(u32 tid, const char *tname) {
|
||||
int len = internal_snprintf(name, sizeof(name), "T%d", tid);
|
||||
CHECK(((unsigned int)len) < sizeof(name));
|
||||
if (tname[0] != '\0')
|
||||
internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname);
|
||||
}
|
||||
|
||||
void DescribeThread(AsanThreadContext *context) {
|
||||
@ -45,18 +47,15 @@ void DescribeThread(AsanThreadContext *context) {
|
||||
return;
|
||||
}
|
||||
context->announced = true;
|
||||
char tname[128];
|
||||
InternalScopedString str(1024);
|
||||
str.append("Thread T%d%s", context->tid,
|
||||
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
|
||||
str.append("Thread %s", AsanThreadIdAndName(context).c_str());
|
||||
if (context->parent_tid == kInvalidTid) {
|
||||
str.append(" created by unknown thread\n");
|
||||
Printf("%s", str.data());
|
||||
return;
|
||||
}
|
||||
str.append(
|
||||
" created by T%d%s here:\n", context->parent_tid,
|
||||
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
|
||||
str.append(" created by %s here:\n",
|
||||
AsanThreadIdAndName(context->parent_tid).c_str());
|
||||
Printf("%s", str.data());
|
||||
StackDepotGet(context->stack_id).Print();
|
||||
// Recursively described parent thread if needed.
|
||||
@ -120,6 +119,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
|
||||
}
|
||||
descr->chunk_begin = chunk.Beg();
|
||||
descr->chunk_size = chunk.UsedSize();
|
||||
descr->user_requested_alignment = chunk.UserRequestedAlignment();
|
||||
descr->alloc_type = chunk.GetAllocType();
|
||||
}
|
||||
|
||||
@ -355,10 +355,9 @@ bool GlobalAddressDescription::PointsInsideTheSameVariable(
|
||||
|
||||
void StackAddressDescription::Print() const {
|
||||
Decorator d;
|
||||
char tname[128];
|
||||
Printf("%s", d.Location());
|
||||
Printf("Address %p is located in stack of thread T%d%s", addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
Printf("Address %p is located in stack of thread %s", addr,
|
||||
AsanThreadIdAndName(tid).c_str());
|
||||
|
||||
if (!frame_descr) {
|
||||
Printf("%s\n", d.Default());
|
||||
@ -377,7 +376,8 @@ void StackAddressDescription::Print() const {
|
||||
StackTrace alloca_stack(&frame_pc, 1);
|
||||
alloca_stack.Print();
|
||||
|
||||
InternalMmapVector<StackVarDescr> vars(16);
|
||||
InternalMmapVector<StackVarDescr> vars;
|
||||
vars.reserve(16);
|
||||
if (!ParseFrameDescription(frame_descr, &vars)) {
|
||||
Printf(
|
||||
"AddressSanitizer can't parse the stack frame "
|
||||
@ -399,7 +399,7 @@ void StackAddressDescription::Print() const {
|
||||
}
|
||||
Printf(
|
||||
"HINT: this may be a false positive if your program uses "
|
||||
"some custom stack unwind mechanism or swapcontext\n");
|
||||
"some custom stack unwind mechanism, swapcontext or vfork\n");
|
||||
if (SANITIZER_WINDOWS)
|
||||
Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
|
||||
else
|
||||
@ -415,26 +415,19 @@ void HeapAddressDescription::Print() const {
|
||||
AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
|
||||
StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
|
||||
|
||||
char tname[128];
|
||||
Decorator d;
|
||||
AsanThreadContext *free_thread = nullptr;
|
||||
if (free_tid != kInvalidTid) {
|
||||
free_thread = GetThreadContextByTidLocked(free_tid);
|
||||
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
|
||||
free_thread->tid,
|
||||
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
|
||||
d.Default());
|
||||
Printf("%sfreed by thread %s here:%s\n", d.Allocation(),
|
||||
AsanThreadIdAndName(free_thread).c_str(), d.Default());
|
||||
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
|
||||
free_stack.Print();
|
||||
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
|
||||
alloc_thread->tid,
|
||||
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||
d.Default());
|
||||
Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(),
|
||||
AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
|
||||
} else {
|
||||
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
|
||||
alloc_thread->tid,
|
||||
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||
d.Default());
|
||||
Printf("%sallocated by thread %s here:%s\n", d.Allocation(),
|
||||
AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
|
||||
}
|
||||
alloc_stack.Print();
|
||||
DescribeThread(GetCurrentThread());
|
||||
|
@ -24,9 +24,20 @@ void DescribeThread(AsanThreadContext *context);
|
||||
static inline void DescribeThread(AsanThread *t) {
|
||||
if (t) DescribeThread(t->context());
|
||||
}
|
||||
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
|
||||
uptr buff_len);
|
||||
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
|
||||
|
||||
class AsanThreadIdAndName {
|
||||
public:
|
||||
explicit AsanThreadIdAndName(AsanThreadContext *t);
|
||||
explicit AsanThreadIdAndName(u32 tid);
|
||||
|
||||
// Contains "T%tid (%name)" or "T%tid" if the name is empty.
|
||||
const char *c_str() const { return &name[0]; }
|
||||
|
||||
private:
|
||||
void Init(u32 tid, const char *tname);
|
||||
|
||||
char name[128];
|
||||
};
|
||||
|
||||
class Decorator : public __sanitizer::SanitizerCommonDecorator {
|
||||
public:
|
||||
@ -100,6 +111,7 @@ struct ChunkAccess {
|
||||
sptr offset;
|
||||
uptr chunk_begin;
|
||||
uptr chunk_size;
|
||||
u32 user_requested_alignment : 12;
|
||||
u32 access_type : 2;
|
||||
u32 alloc_type : 2;
|
||||
};
|
||||
|
@ -11,7 +11,6 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_errors.h"
|
||||
#include <signal.h>
|
||||
#include "asan_descriptions.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
@ -35,8 +34,7 @@ static void OnStackUnwind(const SignalContext &sig,
|
||||
// corresponding code in the sanitizer_common and we use this callback to
|
||||
// print it.
|
||||
static_cast<const ScarinessScoreBase *>(callback_context)->Print();
|
||||
GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
|
||||
sig.context, fast);
|
||||
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context, fast);
|
||||
}
|
||||
|
||||
void ErrorDeadlySignal::Print() {
|
||||
@ -45,13 +43,11 @@ void ErrorDeadlySignal::Print() {
|
||||
|
||||
void ErrorDoubleFree::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting %s on %p in "
|
||||
"thread T%d%s:\n",
|
||||
scariness.GetDescription(), addr_description.addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
"ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n",
|
||||
scariness.GetDescription(), addr_description.addr,
|
||||
AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
|
||||
@ -61,20 +57,36 @@ void ErrorDoubleFree::Print() {
|
||||
ReportErrorSummary(scariness.GetDescription(), &stack);
|
||||
}
|
||||
|
||||
void ErrorNewDeleteSizeMismatch::Print() {
|
||||
void ErrorNewDeleteTypeMismatch::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s on %p in thread "
|
||||
"T%d%s:\n",
|
||||
scariness.GetDescription(), addr_description.addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
"ERROR: AddressSanitizer: %s on %p in thread %s:\n",
|
||||
scariness.GetDescription(), addr_description.addr,
|
||||
AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s object passed to delete has wrong type:\n", d.Default());
|
||||
Printf(
|
||||
" size of the allocated type: %zd bytes;\n"
|
||||
" size of the deallocated type: %zd bytes.\n",
|
||||
addr_description.chunk_access.chunk_size, delete_size);
|
||||
if (delete_size != 0) {
|
||||
Printf(
|
||||
" size of the allocated type: %zd bytes;\n"
|
||||
" size of the deallocated type: %zd bytes.\n",
|
||||
addr_description.chunk_access.chunk_size, delete_size);
|
||||
}
|
||||
const uptr user_alignment =
|
||||
addr_description.chunk_access.user_requested_alignment;
|
||||
if (delete_alignment != user_alignment) {
|
||||
char user_alignment_str[32];
|
||||
char delete_alignment_str[32];
|
||||
internal_snprintf(user_alignment_str, sizeof(user_alignment_str),
|
||||
"%zd bytes", user_alignment);
|
||||
internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str),
|
||||
"%zd bytes", delete_alignment);
|
||||
static const char *kDefaultAlignment = "default-aligned";
|
||||
Printf(
|
||||
" alignment of the allocated type: %s;\n"
|
||||
" alignment of the deallocated type: %s.\n",
|
||||
user_alignment > 0 ? user_alignment_str : kDefaultAlignment,
|
||||
delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment);
|
||||
}
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
@ -88,13 +100,11 @@ void ErrorNewDeleteSizeMismatch::Print() {
|
||||
|
||||
void ErrorFreeNotMalloced::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
char tname[128];
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting free on address "
|
||||
"which was not malloc()-ed: %p in thread T%d%s\n",
|
||||
addr_description.Address(), tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
|
||||
"which was not malloc()-ed: %p in thread %s\n",
|
||||
addr_description.Address(), AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
scariness.Print();
|
||||
@ -111,7 +121,7 @@ void ErrorAllocTypeMismatch::Print() {
|
||||
"operator delete []"};
|
||||
CHECK_NE(alloc_type, dealloc_type);
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
|
||||
scariness.GetDescription(),
|
||||
alloc_names[alloc_type], dealloc_names[dealloc_type],
|
||||
@ -130,7 +140,7 @@ void ErrorAllocTypeMismatch::Print() {
|
||||
|
||||
void ErrorMallocUsableSizeNotOwned::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
|
||||
"pointer which is not owned: %p\n",
|
||||
@ -143,7 +153,7 @@ void ErrorMallocUsableSizeNotOwned::Print() {
|
||||
|
||||
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: attempting to call "
|
||||
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
|
||||
@ -154,11 +164,123 @@ void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorCallocOverflow::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: calloc parameters overflow: count * size "
|
||||
"(%zd * %zd) cannot be represented in type size_t (thread %s)\n",
|
||||
count, size, AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorPvallocOverflow::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx "
|
||||
"rounded up to system page size 0x%zx cannot be represented in type "
|
||||
"size_t (thread %s)\n",
|
||||
size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorInvalidAllocationAlignment::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: invalid allocation alignment: %zd, "
|
||||
"alignment must be a power of two (thread %s)\n",
|
||||
alignment, AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorInvalidAlignedAllocAlignment::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
#if SANITIZER_POSIX
|
||||
Report("ERROR: AddressSanitizer: invalid alignment requested in "
|
||||
"aligned_alloc: %zd, alignment must be a power of two and the "
|
||||
"requested size 0x%zx must be a multiple of alignment "
|
||||
"(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str());
|
||||
#else
|
||||
Report("ERROR: AddressSanitizer: invalid alignment requested in "
|
||||
"aligned_alloc: %zd, the requested size 0x%zx must be a multiple of "
|
||||
"alignment (thread %s)\n", alignment, size,
|
||||
AsanThreadIdAndName(tid).c_str());
|
||||
#endif
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorInvalidPosixMemalignAlignment::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: "
|
||||
"%zd, alignment must be a power of two and a multiple of sizeof(void*) "
|
||||
"== %zd (thread %s)\n",
|
||||
alignment, sizeof(void*), AsanThreadIdAndName(tid).c_str()); // NOLINT
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorAllocationSizeTooBig::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after "
|
||||
"adjustments for alignment, red zones etc.) exceeds maximum supported "
|
||||
"size of 0x%zx (thread %s)\n",
|
||||
user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str());
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorRssLimitExceeded::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to "
|
||||
"soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorOutOfMemory::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: allocator is out of memory trying to allocate "
|
||||
"0x%zx bytes\n", requested_size);
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(scariness.GetDescription(), stack);
|
||||
}
|
||||
|
||||
void ErrorStringFunctionMemoryRangesOverlap::Print() {
|
||||
Decorator d;
|
||||
char bug_type[100];
|
||||
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
|
||||
"overlap\n",
|
||||
@ -175,7 +297,7 @@ void ErrorStringFunctionMemoryRangesOverlap::Print() {
|
||||
|
||||
void ErrorStringFunctionSizeOverflow::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
|
||||
scariness.GetDescription(), size);
|
||||
Printf("%s", d.Default());
|
||||
@ -203,7 +325,7 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
|
||||
|
||||
void ErrorODRViolation::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
|
||||
global1.beg);
|
||||
Printf("%s", d.Default());
|
||||
@ -232,7 +354,7 @@ void ErrorODRViolation::Print() {
|
||||
|
||||
void ErrorInvalidPointerPair::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
|
||||
addr1_description.Address(), addr2_description.Address());
|
||||
Printf("%s", d.Default());
|
||||
@ -396,6 +518,7 @@ static void PrintLegend(InternalScopedString *str) {
|
||||
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
|
||||
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
|
||||
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
|
||||
PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap);
|
||||
}
|
||||
|
||||
static void PrintShadowBytes(InternalScopedString *str, const char *before,
|
||||
@ -420,9 +543,14 @@ static void PrintShadowMemoryForAddress(uptr addr) {
|
||||
InternalScopedString str(4096 * 8);
|
||||
str.append("Shadow bytes around the buggy address:\n");
|
||||
for (int i = -5; i <= 5; i++) {
|
||||
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
|
||||
// Skip rows that would be outside the shadow range. This can happen when
|
||||
// the user address is near the bottom, top, or shadow gap of the address
|
||||
// space.
|
||||
if (!AddrIsInShadow(row_shadow_addr)) continue;
|
||||
const char *prefix = (i == 0) ? "=>" : " ";
|
||||
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
|
||||
(u8 *)shadow_addr, n_bytes_per_row);
|
||||
PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr,
|
||||
n_bytes_per_row);
|
||||
}
|
||||
if (flags()->print_legend) PrintLegend(&str);
|
||||
Printf("%s", str.data());
|
||||
@ -430,17 +558,15 @@ static void PrintShadowMemoryForAddress(uptr addr) {
|
||||
|
||||
void ErrorGeneric::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Printf("%s", d.Error());
|
||||
uptr addr = addr_description.Address();
|
||||
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
|
||||
bug_descr, (void *)addr, pc, bp, sp);
|
||||
Printf("%s", d.Default());
|
||||
|
||||
char tname[128];
|
||||
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
|
||||
Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(),
|
||||
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
|
||||
(void *)addr, tid,
|
||||
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.Default());
|
||||
(void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default());
|
||||
|
||||
scariness.Print();
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
|
@ -18,20 +18,30 @@
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// (*) VS2013 does not implement unrestricted unions, so we need a trivial
|
||||
// default constructor explicitly defined for each particular error.
|
||||
|
||||
// None of the error classes own the stack traces mentioned in them.
|
||||
|
||||
struct ErrorBase {
|
||||
ErrorBase() = default;
|
||||
explicit ErrorBase(u32 tid_) : tid(tid_) {}
|
||||
ScarinessScoreBase scariness;
|
||||
u32 tid;
|
||||
|
||||
ErrorBase() = default; // (*)
|
||||
explicit ErrorBase(u32 tid_) : tid(tid_) {}
|
||||
ErrorBase(u32 tid_, int initial_score, const char *reason) : tid(tid_) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(initial_score, reason);
|
||||
}
|
||||
};
|
||||
|
||||
struct ErrorDeadlySignal : ErrorBase {
|
||||
SignalContext signal;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorDeadlySignal() = default;
|
||||
|
||||
ErrorDeadlySignal() = default; // (*)
|
||||
ErrorDeadlySignal(u32 tid, const SignalContext &sig)
|
||||
: ErrorBase(tid), signal(sig) {
|
||||
: ErrorBase(tid),
|
||||
signal(sig) {
|
||||
scariness.Clear();
|
||||
if (signal.IsStackOverflow()) {
|
||||
scariness.Scare(10, "stack-overflow");
|
||||
@ -53,123 +63,206 @@ struct ErrorDeadlySignal : ErrorBase {
|
||||
};
|
||||
|
||||
struct ErrorDoubleFree : ErrorBase {
|
||||
// ErrorDoubleFree doesn't own the stack trace.
|
||||
const BufferedStackTrace *second_free_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorDoubleFree() = default;
|
||||
|
||||
ErrorDoubleFree() = default; // (*)
|
||||
ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
|
||||
: ErrorBase(tid), second_free_stack(stack) {
|
||||
: ErrorBase(tid, 42, "double-free"),
|
||||
second_free_stack(stack) {
|
||||
CHECK_GT(second_free_stack->size, 0);
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(42, "double-free");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorNewDeleteSizeMismatch : ErrorBase {
|
||||
// ErrorNewDeleteSizeMismatch doesn't own the stack trace.
|
||||
struct ErrorNewDeleteTypeMismatch : ErrorBase {
|
||||
const BufferedStackTrace *free_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
uptr delete_size;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorNewDeleteSizeMismatch() = default;
|
||||
ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
|
||||
uptr delete_size_)
|
||||
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
|
||||
uptr delete_alignment;
|
||||
|
||||
ErrorNewDeleteTypeMismatch() = default; // (*)
|
||||
ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
|
||||
uptr delete_size_, uptr delete_alignment_)
|
||||
: ErrorBase(tid, 10, "new-delete-type-mismatch"),
|
||||
free_stack(stack),
|
||||
delete_size(delete_size_),
|
||||
delete_alignment(delete_alignment_) {
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "new-delete-type-mismatch");
|
||||
}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorFreeNotMalloced : ErrorBase {
|
||||
// ErrorFreeNotMalloced doesn't own the stack trace.
|
||||
const BufferedStackTrace *free_stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorFreeNotMalloced() = default;
|
||||
|
||||
ErrorFreeNotMalloced() = default; // (*)
|
||||
ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 40, "bad-free"),
|
||||
free_stack(stack),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(40, "bad-free");
|
||||
}
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorAllocTypeMismatch : ErrorBase {
|
||||
// ErrorAllocTypeMismatch doesn't own the stack trace.
|
||||
const BufferedStackTrace *dealloc_stack;
|
||||
HeapAddressDescription addr_description;
|
||||
AllocType alloc_type, dealloc_type;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorAllocTypeMismatch() = default;
|
||||
|
||||
ErrorAllocTypeMismatch() = default; // (*)
|
||||
ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
|
||||
AllocType alloc_type_, AllocType dealloc_type_)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "alloc-dealloc-mismatch"),
|
||||
dealloc_stack(stack),
|
||||
alloc_type(alloc_type_),
|
||||
dealloc_type(dealloc_type_) {
|
||||
GetHeapAddressInformation(addr, 1, &addr_description);
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "alloc-dealloc-mismatch");
|
||||
};
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorMallocUsableSizeNotOwned : ErrorBase {
|
||||
// ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorMallocUsableSizeNotOwned() = default;
|
||||
|
||||
ErrorMallocUsableSizeNotOwned() = default; // (*)
|
||||
ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "bad-malloc_usable_size"),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "bad-malloc_usable_size");
|
||||
}
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
|
||||
// ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorSanitizerGetAllocatedSizeNotOwned() = default;
|
||||
|
||||
ErrorSanitizerGetAllocatedSizeNotOwned() = default; // (*)
|
||||
ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "bad-__sanitizer_get_allocated_size"),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
|
||||
}
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorCallocOverflow : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr count;
|
||||
uptr size;
|
||||
|
||||
ErrorCallocOverflow() = default; // (*)
|
||||
ErrorCallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr count_,
|
||||
uptr size_)
|
||||
: ErrorBase(tid, 10, "calloc-overflow"),
|
||||
stack(stack_),
|
||||
count(count_),
|
||||
size(size_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorPvallocOverflow : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr size;
|
||||
|
||||
ErrorPvallocOverflow() = default; // (*)
|
||||
ErrorPvallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr size_)
|
||||
: ErrorBase(tid, 10, "pvalloc-overflow"),
|
||||
stack(stack_),
|
||||
size(size_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorInvalidAllocationAlignment : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr alignment;
|
||||
|
||||
ErrorInvalidAllocationAlignment() = default; // (*)
|
||||
ErrorInvalidAllocationAlignment(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr alignment_)
|
||||
: ErrorBase(tid, 10, "invalid-allocation-alignment"),
|
||||
stack(stack_),
|
||||
alignment(alignment_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorInvalidAlignedAllocAlignment : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr size;
|
||||
uptr alignment;
|
||||
|
||||
ErrorInvalidAlignedAllocAlignment() = default; // (*)
|
||||
ErrorInvalidAlignedAllocAlignment(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr size_, uptr alignment_)
|
||||
: ErrorBase(tid, 10, "invalid-aligned-alloc-alignment"),
|
||||
stack(stack_),
|
||||
size(size_),
|
||||
alignment(alignment_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorInvalidPosixMemalignAlignment : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr alignment;
|
||||
|
||||
ErrorInvalidPosixMemalignAlignment() = default; // (*)
|
||||
ErrorInvalidPosixMemalignAlignment(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr alignment_)
|
||||
: ErrorBase(tid, 10, "invalid-posix-memalign-alignment"),
|
||||
stack(stack_),
|
||||
alignment(alignment_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorAllocationSizeTooBig : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr user_size;
|
||||
uptr total_size;
|
||||
uptr max_size;
|
||||
|
||||
ErrorAllocationSizeTooBig() = default; // (*)
|
||||
ErrorAllocationSizeTooBig(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr user_size_, uptr total_size_, uptr max_size_)
|
||||
: ErrorBase(tid, 10, "allocation-size-too-big"),
|
||||
stack(stack_),
|
||||
user_size(user_size_),
|
||||
total_size(total_size_),
|
||||
max_size(max_size_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorRssLimitExceeded : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
|
||||
ErrorRssLimitExceeded() = default; // (*)
|
||||
ErrorRssLimitExceeded(u32 tid, BufferedStackTrace *stack_)
|
||||
: ErrorBase(tid, 10, "rss-limit-exceeded"),
|
||||
stack(stack_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorOutOfMemory : ErrorBase {
|
||||
const BufferedStackTrace *stack;
|
||||
uptr requested_size;
|
||||
|
||||
ErrorOutOfMemory() = default; // (*)
|
||||
ErrorOutOfMemory(u32 tid, BufferedStackTrace *stack_, uptr requested_size_)
|
||||
: ErrorBase(tid, 10, "out-of-memory"),
|
||||
stack(stack_),
|
||||
requested_size(requested_size_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
|
||||
// ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
uptr length1, length2;
|
||||
AddressDescription addr1_description;
|
||||
AddressDescription addr2_description;
|
||||
const char *function;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorStringFunctionMemoryRangesOverlap() = default;
|
||||
|
||||
ErrorStringFunctionMemoryRangesOverlap() = default; // (*)
|
||||
ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr1, uptr length1_, uptr addr2,
|
||||
uptr length2_, const char *function_)
|
||||
@ -189,65 +282,51 @@ struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
|
||||
};
|
||||
|
||||
struct ErrorStringFunctionSizeOverflow : ErrorBase {
|
||||
// ErrorStringFunctionSizeOverflow doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
AddressDescription addr_description;
|
||||
uptr size;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorStringFunctionSizeOverflow() = default;
|
||||
|
||||
ErrorStringFunctionSizeOverflow() = default; // (*)
|
||||
ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
|
||||
uptr addr, uptr size_)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "negative-size-param"),
|
||||
stack(stack_),
|
||||
addr_description(addr, /*shouldLockThreadRegistry=*/false),
|
||||
size(size_) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "negative-size-param");
|
||||
}
|
||||
size(size_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
|
||||
// ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
|
||||
const BufferedStackTrace *stack;
|
||||
uptr beg, end, old_mid, new_mid;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorBadParamsToAnnotateContiguousContainer() = default;
|
||||
|
||||
ErrorBadParamsToAnnotateContiguousContainer() = default; // (*)
|
||||
// PS4: Do we want an AddressDescription for beg?
|
||||
ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
|
||||
BufferedStackTrace *stack_,
|
||||
uptr beg_, uptr end_,
|
||||
uptr old_mid_, uptr new_mid_)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "bad-__sanitizer_annotate_contiguous_container"),
|
||||
stack(stack_),
|
||||
beg(beg_),
|
||||
end(end_),
|
||||
old_mid(old_mid_),
|
||||
new_mid(new_mid_) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
|
||||
}
|
||||
new_mid(new_mid_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
struct ErrorODRViolation : ErrorBase {
|
||||
__asan_global global1, global2;
|
||||
u32 stack_id1, stack_id2;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorODRViolation() = default;
|
||||
|
||||
ErrorODRViolation() = default; // (*)
|
||||
ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
|
||||
const __asan_global *g2, u32 stack_id2_)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "odr-violation"),
|
||||
global1(*g1),
|
||||
global2(*g2),
|
||||
stack_id1(stack_id1_),
|
||||
stack_id2(stack_id2_) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "odr-violation");
|
||||
}
|
||||
stack_id2(stack_id2_) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
@ -255,20 +334,16 @@ struct ErrorInvalidPointerPair : ErrorBase {
|
||||
uptr pc, bp, sp;
|
||||
AddressDescription addr1_description;
|
||||
AddressDescription addr2_description;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorInvalidPointerPair() = default;
|
||||
|
||||
ErrorInvalidPointerPair() = default; // (*)
|
||||
ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
|
||||
uptr p2)
|
||||
: ErrorBase(tid),
|
||||
: ErrorBase(tid, 10, "invalid-pointer-pair"),
|
||||
pc(pc_),
|
||||
bp(bp_),
|
||||
sp(sp_),
|
||||
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
|
||||
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
|
||||
scariness.Clear();
|
||||
scariness.Scare(10, "invalid-pointer-pair");
|
||||
}
|
||||
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {}
|
||||
void Print();
|
||||
};
|
||||
|
||||
@ -279,9 +354,8 @@ struct ErrorGeneric : ErrorBase {
|
||||
const char *bug_descr;
|
||||
bool is_write;
|
||||
u8 shadow_val;
|
||||
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
|
||||
// constructor
|
||||
ErrorGeneric() = default;
|
||||
|
||||
ErrorGeneric() = default; // (*)
|
||||
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
|
||||
uptr access_size_);
|
||||
void Print();
|
||||
@ -291,11 +365,19 @@ struct ErrorGeneric : ErrorBase {
|
||||
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
|
||||
macro(DeadlySignal) \
|
||||
macro(DoubleFree) \
|
||||
macro(NewDeleteSizeMismatch) \
|
||||
macro(NewDeleteTypeMismatch) \
|
||||
macro(FreeNotMalloced) \
|
||||
macro(AllocTypeMismatch) \
|
||||
macro(MallocUsableSizeNotOwned) \
|
||||
macro(SanitizerGetAllocatedSizeNotOwned) \
|
||||
macro(CallocOverflow) \
|
||||
macro(PvallocOverflow) \
|
||||
macro(InvalidAllocationAlignment) \
|
||||
macro(InvalidAlignedAllocAlignment) \
|
||||
macro(InvalidPosixMemalignAlignment) \
|
||||
macro(AllocationSizeTooBig) \
|
||||
macro(RssLimitExceeded) \
|
||||
macro(OutOfMemory) \
|
||||
macro(StringFunctionMemoryRangesOverlap) \
|
||||
macro(StringFunctionSizeOverflow) \
|
||||
macro(BadParamsToAnnotateContiguousContainer) \
|
||||
@ -330,6 +412,7 @@ struct ErrorDescription {
|
||||
};
|
||||
|
||||
ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
|
||||
explicit ErrorDescription(LinkerInitialized) {}
|
||||
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
|
||||
|
||||
bool IsValid() { return kind != kErrorKindInvalid; }
|
||||
|
@ -26,9 +26,9 @@ static const u64 kAllocaRedzoneMask = 31UL;
|
||||
|
||||
// For small size classes inline PoisonShadow for better performance.
|
||||
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
|
||||
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
|
||||
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
|
||||
if (class_id <= 6) {
|
||||
if (SHADOW_SCALE == 3 && class_id <= 6) {
|
||||
// This code expects SHADOW_SCALE=3.
|
||||
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
|
||||
shadow[i] = magic;
|
||||
// Make sure this does not become memset.
|
||||
|
@ -31,10 +31,7 @@ static const char *MaybeCallAsanDefaultOptions() {
|
||||
|
||||
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
|
||||
#ifdef ASAN_DEFAULT_OPTIONS
|
||||
// Stringize the macro value.
|
||||
# define ASAN_STRINGIZE(x) #x
|
||||
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
|
||||
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
|
||||
return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
|
||||
#else
|
||||
return "";
|
||||
#endif
|
||||
@ -146,6 +143,9 @@ void InitializeFlags() {
|
||||
SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
// Ensure that redzone is at least SHADOW_GRANULARITY.
|
||||
if (f->redzone < (int)SHADOW_GRANULARITY)
|
||||
f->redzone = SHADOW_GRANULARITY;
|
||||
// Make "strict_init_order" imply "check_initialization_order".
|
||||
// TODO(samsonov): Use a single runtime flag for an init-order checker.
|
||||
if (f->strict_init_order) {
|
||||
@ -158,6 +158,10 @@ void InitializeFlags() {
|
||||
CHECK_LE(f->max_redzone, 2048);
|
||||
CHECK(IsPowerOfTwo(f->redzone));
|
||||
CHECK(IsPowerOfTwo(f->max_redzone));
|
||||
if (SANITIZER_RTEMS) {
|
||||
CHECK(!f->unmap_shadow_on_exit);
|
||||
CHECK(!f->protect_shadow_gap);
|
||||
}
|
||||
|
||||
// quarantine_size is deprecated but we still honor it.
|
||||
// quarantine_size can not be used together with quarantine_size_mb.
|
||||
|
@ -86,7 +86,8 @@ ASAN_FLAG(bool, check_malloc_usable_size, true,
|
||||
"295.*.")
|
||||
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
|
||||
"If set, explicitly unmaps the (huge) shadow at exit.")
|
||||
ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
|
||||
ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS,
|
||||
"If set, mprotect the shadow gap")
|
||||
ASAN_FLAG(bool, print_stats, false,
|
||||
"Print various statistics after printing an error message or if "
|
||||
"atexit=1.")
|
||||
|
@ -26,7 +26,7 @@
|
||||
namespace __asan {
|
||||
|
||||
// The system already set up the shadow memory for us.
|
||||
// __sanitizer::GetMaxVirtualAddress has already been called by
|
||||
// __sanitizer::GetMaxUserVirtualAddress has already been called by
|
||||
// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc).
|
||||
// Just do some additional sanity checks here.
|
||||
void InitializeShadowMemory() {
|
||||
|
@ -147,6 +147,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
@ -194,6 +211,8 @@ static void RegisterGlobal(const Global *g) {
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
@ -203,8 +222,9 @@ static void RegisterGlobal(const Global *g) {
|
||||
list_of_all_globals = l;
|
||||
if (g->has_dynamic_init) {
|
||||
if (!dynamic_init_globals) {
|
||||
dynamic_init_globals = new(allocator_for_globals)
|
||||
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
|
||||
dynamic_init_globals =
|
||||
new (allocator_for_globals) VectorOfGlobals; // NOLINT
|
||||
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
|
||||
}
|
||||
DynInitGlobal dyn_global = { *g, false };
|
||||
dynamic_init_globals->push_back(dyn_global);
|
||||
@ -337,9 +357,11 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
u32 stack_id = StackDepotPut(stack);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
if (!global_registration_site_vector)
|
||||
if (!global_registration_site_vector) {
|
||||
global_registration_site_vector =
|
||||
new(allocator_for_globals) GlobalRegistrationSiteVector(128);
|
||||
new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT
|
||||
global_registration_site_vector->reserve(128);
|
||||
}
|
||||
GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
|
||||
global_registration_site_vector->push_back(site);
|
||||
if (flags()->report_globals >= 2) {
|
||||
|
@ -17,9 +17,9 @@ namespace __asan {
|
||||
#pragma section(".ASAN$GA", read, write) // NOLINT
|
||||
#pragma section(".ASAN$GZ", read, write) // NOLINT
|
||||
extern "C" __declspec(allocate(".ASAN$GA"))
|
||||
__asan_global __asan_globals_start = {};
|
||||
ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {};
|
||||
extern "C" __declspec(allocate(".ASAN$GZ"))
|
||||
__asan_global __asan_globals_end = {};
|
||||
ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {};
|
||||
#pragma comment(linker, "/merge:.ASAN=.data")
|
||||
|
||||
static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
|
||||
@ -27,7 +27,7 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
|
||||
__asan_global *end = &__asan_globals_end;
|
||||
uptr bytediff = (uptr)end - (uptr)start;
|
||||
if (bytediff % sizeof(__asan_global) != 0) {
|
||||
#ifdef SANITIZER_DLL_THUNK
|
||||
#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)
|
||||
__debugbreak();
|
||||
#else
|
||||
CHECK("corrupt asan global array");
|
||||
|
@ -13,6 +13,8 @@
|
||||
#ifndef ASAN_INIT_VERSION_H
|
||||
#define ASAN_INIT_VERSION_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
extern "C" {
|
||||
// Every time the ASan ABI changes we also change the version number in the
|
||||
// __asan_init function name. Objects built with incompatible ASan ABI
|
||||
@ -30,7 +32,12 @@ extern "C" {
|
||||
// v6=>v7: added 'odr_indicator' to __asan_global
|
||||
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead
|
||||
// stripping support on Mach-O platforms
|
||||
#if SANITIZER_WORDSIZE == 32 && SANITIZER_ANDROID
|
||||
// v8=>v9: 32-bit Android switched to dynamic shadow
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v9
|
||||
#else
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // ASAN_INIT_VERSION_H
|
||||
|
@ -22,15 +22,20 @@
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
// There is no general interception at all on Fuchsia.
|
||||
// There is no general interception at all on Fuchsia and RTEMS.
|
||||
// Only the functions in asan_interceptors_memintrinsics.cc are
|
||||
// really defined to replace libc functions.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \
|
||||
ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
|
||||
#include <unwind.h>
|
||||
#endif
|
||||
|
||||
#if defined(__i386) && SANITIZER_LINUX
|
||||
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
|
||||
#elif defined(__mips__) && SANITIZER_LINUX
|
||||
@ -176,6 +181,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
(void)(s); \
|
||||
} while (false)
|
||||
#include "sanitizer_common/sanitizer_common_syscalls.inc"
|
||||
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
|
||||
|
||||
struct ThreadStartParam {
|
||||
atomic_uintptr_t t;
|
||||
@ -324,6 +330,32 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) {
|
||||
CHECK(REAL(__cxa_rethrow_primary_exception));
|
||||
__asan_handle_no_return();
|
||||
REAL(__cxa_rethrow_primary_exception)(a);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
|
||||
INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException,
|
||||
_Unwind_Exception *object) {
|
||||
CHECK(REAL(_Unwind_RaiseException));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_Unwind_RaiseException)(object);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
|
||||
INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
|
||||
_Unwind_Exception *object) {
|
||||
CHECK(REAL(_Unwind_SjLj_RaiseException));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_Unwind_SjLj_RaiseException)(object);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT_INDEX
|
||||
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
INTERCEPTOR(char*, index, const char *string, int c)
|
||||
@ -546,14 +578,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
|
||||
}
|
||||
#endif // ASAN_INTERCEPT___CXA_ATEXIT
|
||||
|
||||
#if ASAN_INTERCEPT_FORK
|
||||
INTERCEPTOR(int, fork, void) {
|
||||
ENSURE_ASAN_INITED();
|
||||
int pid = REAL(fork)();
|
||||
return pid;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_FORK
|
||||
|
||||
// ---------------------- InitializeAsanInterceptors ---------------- {{{1
|
||||
namespace __asan {
|
||||
void InitializeAsanInterceptors() {
|
||||
@ -604,6 +628,17 @@ void InitializeAsanInterceptors() {
|
||||
#if ASAN_INTERCEPT___CXA_THROW
|
||||
ASAN_INTERCEPT_FUNC(__cxa_throw);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception);
|
||||
#endif
|
||||
// Indirectly intercept std::rethrow_exception.
|
||||
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
|
||||
INTERCEPT_FUNCTION(_Unwind_RaiseException);
|
||||
#endif
|
||||
// Indirectly intercept std::rethrow_exception.
|
||||
#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
|
||||
INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
|
||||
#endif
|
||||
|
||||
// Intercept threading-related functions
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
@ -620,10 +655,6 @@ void InitializeAsanInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(__cxa_atexit);
|
||||
#endif
|
||||
|
||||
#if ASAN_INTERCEPT_FORK
|
||||
ASAN_INTERCEPT_FUNC(fork);
|
||||
#endif
|
||||
|
||||
InitializePlatformInterceptors();
|
||||
|
||||
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
|
||||
|
@ -32,10 +32,10 @@ void InitializePlatformInterceptors();
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// There is no general interception at all on Fuchsia.
|
||||
// There is no general interception at all on Fuchsia and RTEMS.
|
||||
// Only the functions in asan_interceptors_memintrinsics.h are
|
||||
// really defined to replace libc functions.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
||||
// Use macro to describe if specific function should be
|
||||
// intercepted on a given platform.
|
||||
@ -44,22 +44,21 @@ void InitializePlatformInterceptors();
|
||||
# define ASAN_INTERCEPT__LONGJMP 1
|
||||
# define ASAN_INTERCEPT_INDEX 1
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
|
||||
# define ASAN_INTERCEPT_FORK 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
|
||||
# define ASAN_INTERCEPT__LONGJMP 0
|
||||
# define ASAN_INTERCEPT_INDEX 0
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
|
||||
# define ASAN_INTERCEPT_FORK 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
|
||||
#else
|
||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
|
||||
# define ASAN_INTERCEPT_SWAPCONTEXT 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_SWAPCONTEXT 0
|
||||
@ -77,12 +76,20 @@ void InitializePlatformInterceptors();
|
||||
# define ASAN_INTERCEPT___LONGJMP_CHK 0
|
||||
#endif
|
||||
|
||||
// Android bug: https://code.google.com/p/android/issues/detail?id=61799
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
|
||||
!(SANITIZER_ANDROID && defined(__i386))
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 1
|
||||
# endif
|
||||
#else
|
||||
# define ASAN_INTERCEPT___CXA_THROW 0
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 0
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 0
|
||||
#endif
|
||||
|
||||
#if !SANITIZER_WINDOWS
|
||||
@ -103,9 +110,6 @@ DECLARE_REAL(SIZE_T, strlen, const char *s)
|
||||
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
|
||||
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
|
||||
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
|
||||
struct sigaction;
|
||||
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
|
||||
struct sigaction *oldact)
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
#define ASAN_INTERCEPT_FUNC(name) \
|
||||
|
@ -29,14 +29,14 @@ void *__asan_memmove(void *to, const void *from, uptr size) {
|
||||
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
|
||||
}
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
|
||||
|
||||
// Fuchsia doesn't use sanitizer_common_interceptors.inc, but the only
|
||||
// things there it wants are these three. Just define them as aliases
|
||||
// here rather than repeating the contents.
|
||||
// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but
|
||||
// the only things there it wants are these three. Just define them
|
||||
// as aliases here rather than repeating the contents.
|
||||
|
||||
decltype(memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
|
||||
decltype(memmove) memmove[[gnu::alias("__asan_memmove")]];
|
||||
decltype(memset) memset[[gnu::alias("__asan_memset")]];
|
||||
extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
|
||||
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
|
||||
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS
|
||||
|
@ -131,15 +131,22 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
|
||||
const char *offset2, uptr length2) {
|
||||
return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
|
||||
}
|
||||
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
|
||||
const char *offset1 = (const char*)_offset1; \
|
||||
const char *offset2 = (const char*)_offset2; \
|
||||
if (RangesOverlap(offset1, length1, offset2, length2)) { \
|
||||
GET_STACK_TRACE_FATAL_HERE; \
|
||||
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
|
||||
offset2, length2, &stack); \
|
||||
} \
|
||||
} while (0)
|
||||
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) \
|
||||
do { \
|
||||
const char *offset1 = (const char *)_offset1; \
|
||||
const char *offset2 = (const char *)_offset2; \
|
||||
if (RangesOverlap(offset1, length1, offset2, length2)) { \
|
||||
GET_STACK_TRACE_FATAL_HERE; \
|
||||
bool suppressed = IsInterceptorSuppressed(name); \
|
||||
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
|
||||
suppressed = IsStackTraceSuppressed(&stack); \
|
||||
} \
|
||||
if (!suppressed) { \
|
||||
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
|
||||
offset2, length2, &stack); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||
// will be changed towards less memory overhead.
|
||||
#ifndef ASAN_LOW_MEMORY
|
||||
# if SANITIZER_IOS || SANITIZER_ANDROID
|
||||
# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS
|
||||
# define ASAN_LOW_MEMORY 1
|
||||
# else
|
||||
# define ASAN_LOW_MEMORY 0
|
||||
@ -76,7 +76,7 @@ void InitializeShadowMemory();
|
||||
// asan_malloc_linux.cc / asan_malloc_mac.cc
|
||||
void ReplaceSystemMalloc();
|
||||
|
||||
// asan_linux.cc / asan_mac.cc / asan_win.cc
|
||||
// asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc
|
||||
uptr FindDynamicShadowStart();
|
||||
void *AsanDoesNotSupportStaticLinkage();
|
||||
void AsanCheckDynamicRTPrereqs();
|
||||
@ -145,6 +145,9 @@ const int kAsanArrayCookieMagic = 0xac;
|
||||
const int kAsanIntraObjectRedzone = 0xbb;
|
||||
const int kAsanAllocaLeftMagic = 0xca;
|
||||
const int kAsanAllocaRightMagic = 0xcb;
|
||||
// Used to populate the shadow gap for systems without memory
|
||||
// protection there (i.e. Myriad).
|
||||
const int kAsanShadowGap = 0xcc;
|
||||
|
||||
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
|
||||
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
|
||||
|
@ -11,10 +11,12 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_premap_shadow.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_freebsd.h"
|
||||
@ -28,6 +30,7 @@
|
||||
#include <sys/types.h>
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <limits.h>
|
||||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
@ -37,7 +40,11 @@
|
||||
#include <sys/link_elf.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID || SANITIZER_FREEBSD
|
||||
#if SANITIZER_SOLARIS
|
||||
#include <link.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
|
||||
#include <ucontext.h>
|
||||
extern "C" void* _DYNAMIC;
|
||||
#elif SANITIZER_NETBSD
|
||||
@ -79,9 +86,51 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
return &_DYNAMIC; // defined in link.h
|
||||
}
|
||||
|
||||
static void UnmapFromTo(uptr from, uptr to) {
|
||||
CHECK(to >= from);
|
||||
if (to == from) return;
|
||||
uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
|
||||
if (UNLIKELY(internal_iserror(res))) {
|
||||
Report(
|
||||
"ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
|
||||
"%p\n",
|
||||
to - from, to - from, from);
|
||||
CHECK("unable to unmap" && 0);
|
||||
}
|
||||
}
|
||||
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
uptr FindPremappedShadowStart() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
|
||||
uptr premap_shadow_size = PremapShadowSize();
|
||||
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
|
||||
// We may have mapped too much. Release extra memory.
|
||||
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
|
||||
return shadow_start;
|
||||
}
|
||||
#endif
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
UNREACHABLE("FindDynamicShadowStart is not available");
|
||||
return 0;
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
if (!PremapShadowFailed())
|
||||
return FindPremappedShadowStart();
|
||||
#endif
|
||||
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = granularity * 8;
|
||||
uptr left_padding = granularity;
|
||||
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
|
||||
uptr map_size = shadow_size + left_padding + alignment;
|
||||
|
||||
uptr map_start = (uptr)MmapNoAccess(map_size);
|
||||
CHECK_NE(map_start, ~(uptr)0);
|
||||
|
||||
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
|
||||
UnmapFromTo(map_start, shadow_start - left_padding);
|
||||
UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
|
||||
|
||||
return shadow_start;
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
@ -95,6 +144,9 @@ void AsanCheckIncompatibleRT() {}
|
||||
#else
|
||||
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
||||
void *data) {
|
||||
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n",
|
||||
info->dlpi_name, info->dlpi_addr);
|
||||
|
||||
// Continue until the first dynamic library is found
|
||||
if (!info->dlpi_name || info->dlpi_name[0] == 0)
|
||||
return 0;
|
||||
@ -103,7 +155,7 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
||||
if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
|
||||
return 0;
|
||||
|
||||
#if SANITIZER_NETBSD
|
||||
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
// Ignore first entry (the main program)
|
||||
char **p = (char **)data;
|
||||
if (!(*p)) {
|
||||
@ -112,6 +164,12 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_SOLARIS
|
||||
// Ignore executable on Solaris
|
||||
if (info->dlpi_addr == 0)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
*(const char **)data = info->dlpi_name;
|
||||
return 1;
|
||||
}
|
||||
@ -155,7 +213,7 @@ void AsanCheckIncompatibleRT() {
|
||||
// the functions in dynamic ASan runtime instead of the functions in
|
||||
// system libraries, causing crashes later in ASan initialization.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
char filename[128];
|
||||
char filename[PATH_MAX];
|
||||
MemoryMappedSegment segment(filename, sizeof(filename));
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (IsDynamicRTName(segment.filename)) {
|
||||
@ -190,4 +248,5 @@ void *AsanDlSymNext(const char *sym) {
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
|
||||
// SANITIZER_SOLARIS
|
||||
|
@ -1 +0,0 @@
|
||||
|
@ -60,16 +60,36 @@ uptr FindDynamicShadowStart() {
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
|
||||
uptr largest_gap_found = 0;
|
||||
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
|
||||
granularity, &largest_gap_found);
|
||||
uptr max_occupied_addr = 0;
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
|
||||
uptr shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity,
|
||||
&largest_gap_found, &max_occupied_addr);
|
||||
// If the shadow doesn't fit, restrict the address space to make it fit.
|
||||
if (shadow_start == 0) {
|
||||
VReport(
|
||||
2,
|
||||
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
|
||||
largest_gap_found, max_occupied_addr);
|
||||
uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
|
||||
if (new_max_vm < max_occupied_addr) {
|
||||
Report("Unable to find a memory range for dynamic shadow.\n");
|
||||
Report(
|
||||
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
|
||||
"new_max_vm = %p\n",
|
||||
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
|
||||
CHECK(0 && "cannot place shadow");
|
||||
}
|
||||
RestrictMemoryToMaxAddress(new_max_vm);
|
||||
kHighMemEnd = new_max_vm - 1;
|
||||
space_size = kHighShadowEnd + left_padding;
|
||||
shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
|
||||
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
|
||||
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
|
||||
nullptr, nullptr);
|
||||
if (shadow_start == 0) {
|
||||
Report("Unable to find a memory range after restricting VM.\n");
|
||||
CHECK(0 && "cannot place shadow after restricting vm");
|
||||
}
|
||||
}
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
|
@ -14,19 +14,23 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
|
||||
SANITIZER_NETBSD
|
||||
SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_malloc_local.h"
|
||||
#include "asan_stack.h"
|
||||
|
||||
// ---------------------- Replacement functions ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
static uptr allocated_for_dlsym;
|
||||
static const uptr kDlsymAllocPoolSize = 1024;
|
||||
static uptr last_dlsym_alloc_size_in_words;
|
||||
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
|
||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||
|
||||
static INLINE bool IsInDlsymAllocPool(const void *ptr) {
|
||||
@ -37,21 +41,73 @@ static INLINE bool IsInDlsymAllocPool(const void *ptr) {
|
||||
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
||||
void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
last_dlsym_alloc_size_in_words = size_in_words;
|
||||
allocated_for_dlsym += size_in_words;
|
||||
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
|
||||
static void DeallocateFromLocalPool(const void *ptr) {
|
||||
// Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
|
||||
// error messages and instead uses malloc followed by free. To avoid pool
|
||||
// exhaustion due to long object filenames, handle that special case here.
|
||||
uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
|
||||
void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset];
|
||||
if (prev_mem == ptr) {
|
||||
REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
|
||||
allocated_for_dlsym = prev_offset;
|
||||
last_dlsym_alloc_size_in_words = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
|
||||
uptr size_in_bytes) {
|
||||
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
|
||||
return errno_EINVAL;
|
||||
|
||||
CHECK(alignment >= kWordSize);
|
||||
|
||||
uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
uptr aligned_addr = RoundUpTo(addr, alignment);
|
||||
uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
|
||||
|
||||
uptr *end_mem = (uptr*)(aligned_addr + aligned_size);
|
||||
uptr allocated = end_mem - alloc_memory_for_dlsym;
|
||||
if (allocated >= kDlsymAllocPoolSize)
|
||||
return errno_ENOMEM;
|
||||
|
||||
allocated_for_dlsym = allocated;
|
||||
*memptr = (void*)aligned_addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if SANITIZER_RTEMS
|
||||
void* MemalignFromLocalPool(uptr alignment, uptr size) {
|
||||
void *ptr = nullptr;
|
||||
alignment = Max(alignment, kWordSize);
|
||||
PosixMemalignFromLocalPool(&ptr, alignment, size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool IsFromLocalPool(const void *ptr) {
|
||||
return IsInDlsymAllocPool(ptr);
|
||||
}
|
||||
#endif
|
||||
|
||||
static INLINE bool MaybeInDlsym() {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA && asan_init_is_running;
|
||||
}
|
||||
|
||||
static INLINE bool UseLocalPool() {
|
||||
return EarlyMalloc() || MaybeInDlsym();
|
||||
}
|
||||
|
||||
static void *ReallocFromLocalPool(void *ptr, uptr size) {
|
||||
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||
void *new_ptr;
|
||||
if (UNLIKELY(MaybeInDlsym())) {
|
||||
if (UNLIKELY(UseLocalPool())) {
|
||||
new_ptr = AllocateFromLocalPool(size);
|
||||
} else {
|
||||
ENSURE_ASAN_INITED();
|
||||
@ -64,8 +120,10 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) {
|
||||
|
||||
INTERCEPTOR(void, free, void *ptr) {
|
||||
GET_STACK_TRACE_FREE;
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||
DeallocateFromLocalPool(ptr);
|
||||
return;
|
||||
}
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
}
|
||||
|
||||
@ -79,7 +137,7 @@ INTERCEPTOR(void, cfree, void *ptr) {
|
||||
#endif // SANITIZER_INTERCEPT_CFREE
|
||||
|
||||
INTERCEPTOR(void*, malloc, uptr size) {
|
||||
if (UNLIKELY(MaybeInDlsym()))
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(size);
|
||||
ENSURE_ASAN_INITED();
|
||||
@ -88,7 +146,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
if (UNLIKELY(MaybeInDlsym()))
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(nmemb * size);
|
||||
ENSURE_ASAN_INITED();
|
||||
@ -99,7 +157,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
return ReallocFromLocalPool(ptr, size);
|
||||
if (UNLIKELY(MaybeInDlsym()))
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
return AllocateFromLocalPool(size);
|
||||
ENSURE_ASAN_INITED();
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
@ -120,10 +178,12 @@ INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
|
||||
}
|
||||
#endif // SANITIZER_INTERCEPT_MEMALIGN
|
||||
|
||||
#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
|
||||
INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
|
||||
return asan_aligned_alloc(boundary, size, &stack);
|
||||
}
|
||||
#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC
|
||||
|
||||
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
@ -152,8 +212,9 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
|
||||
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
|
||||
|
||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
return PosixMemalignFromLocalPool(memptr, alignment, size);
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
// Printf("posix_memalign: %zx %zu\n", alignment, size);
|
||||
return asan_posix_memalign(memptr, alignment, size, &stack);
|
||||
}
|
||||
|
||||
@ -234,4 +295,4 @@ void ReplaceSystemMalloc() {
|
||||
#endif // SANITIZER_ANDROID
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
|
||||
// SANITIZER_NETBSD
|
||||
// SANITIZER_NETBSD || SANITIZER_SOLARIS
|
||||
|
42
libsanitizer/asan/asan_malloc_local.h
Normal file
42
libsanitizer/asan/asan_malloc_local.h
Normal file
@ -0,0 +1,42 @@
|
||||
//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Provide interfaces to check for and handle local pool memory allocation.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_MALLOC_LOCAL_H
|
||||
#define ASAN_MALLOC_LOCAL_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "asan_internal.h"
|
||||
|
||||
// On RTEMS, we use the local pool to handle memory allocation when the ASan
|
||||
// run-time is not up.
|
||||
static INLINE bool EarlyMalloc() {
|
||||
return SANITIZER_RTEMS && (!__asan::asan_inited ||
|
||||
__asan::asan_init_is_running);
|
||||
}
|
||||
|
||||
void* MemalignFromLocalPool(uptr alignment, uptr size);
|
||||
|
||||
#if SANITIZER_RTEMS
|
||||
|
||||
bool IsFromLocalPool(const void *ptr);
|
||||
|
||||
#define ALLOCATE_FROM_LOCAL_POOL UNLIKELY(EarlyMalloc())
|
||||
#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
|
||||
|
||||
#else // SANITIZER_RTEMS
|
||||
|
||||
#define ALLOCATE_FROM_LOCAL_POOL 0
|
||||
#define IS_FROM_LOCAL_POOL(ptr) 0
|
||||
|
||||
#endif // SANITIZER_RTEMS
|
||||
|
||||
#endif // ASAN_MALLOC_LOCAL_H
|
@ -36,6 +36,9 @@ using namespace __asan;
|
||||
#define COMMON_MALLOC_CALLOC(count, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_calloc(count, size, &stack);
|
||||
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
int res = asan_posix_memalign(memptr, alignment, size, &stack);
|
||||
#define COMMON_MALLOC_VALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
|
||||
|
@ -12,8 +12,17 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
// Intentionally not including windows.h here, to avoid the risk of
|
||||
// pulling in conflicting declarations of these functions. (With mingw-w64,
|
||||
// there's a risk of windows.h pulling in stdint.h.)
|
||||
typedef int BOOL;
|
||||
typedef void *HANDLE;
|
||||
typedef const void *LPCVOID;
|
||||
typedef void *LPVOID;
|
||||
|
||||
#define HEAP_ZERO_MEMORY 0x00000008
|
||||
#define HEAP_REALLOC_IN_PLACE_ONLY 0x00000010
|
||||
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
@ -123,7 +132,7 @@ void *_recalloc_base(void *p, size_t n, size_t elem_size) {
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
size_t _msize(const void *ptr) {
|
||||
size_t _msize(void *ptr) {
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
(void)sp;
|
||||
return asan_malloc_usable_size(ptr, pc, bp);
|
||||
|
@ -120,6 +120,13 @@
|
||||
// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
|
||||
// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
|
||||
//
|
||||
// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000:
|
||||
// || `[0x60000000, 0xfffff000]` || HighMem ||
|
||||
// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
|
||||
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
|
||||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
|
||||
//
|
||||
// Default Windows/i386 mapping:
|
||||
// (the exact location of HighShadow/HighMem may vary depending
|
||||
// on WoW64, /LARGEADDRESSAWARE, etc).
|
||||
@ -128,12 +135,23 @@
|
||||
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
|
||||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
||||
//
|
||||
// Shadow mapping on Myriad2 (for shadow scale 5):
|
||||
// || `[0x9ff80000, 0x9fffffff]` || ShadowGap ||
|
||||
// || `[0x9f000000, 0x9ff7ffff]` || LowShadow ||
|
||||
// || `[0x80000000, 0x9effffff]` || LowMem ||
|
||||
// || `[0x00000000, 0x7fffffff]` || Ignored ||
|
||||
|
||||
static const u64 kDefaultShadowScale = 3;
|
||||
#if defined(ASAN_SHADOW_SCALE)
|
||||
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
|
||||
#else
|
||||
static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3;
|
||||
#endif
|
||||
static const u64 kDefaultShadowSentinel = ~(uptr)0;
|
||||
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
|
||||
static const u64 kDefaultShort64bitShadowOffset =
|
||||
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
|
||||
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kIosShadowOffset64 = 0x120200000;
|
||||
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
|
||||
@ -141,24 +159,36 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
|
||||
static const u64 kMyriadMemoryOffset32 = 0x80000000ULL;
|
||||
static const u64 kMyriadMemorySize32 = 0x20000000ULL;
|
||||
static const u64 kMyriadMemoryEnd32 =
|
||||
kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1;
|
||||
static const u64 kMyriadShadowOffset32 =
|
||||
(kMyriadMemoryOffset32 + kMyriadMemorySize32 -
|
||||
(kMyriadMemorySize32 >> kDefaultShadowScale));
|
||||
static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
|
||||
|
||||
#define SHADOW_SCALE kDefaultShadowScale
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
# define SHADOW_OFFSET (0)
|
||||
#elif SANITIZER_WORDSIZE == 32
|
||||
# if SANITIZER_ANDROID
|
||||
# define SHADOW_OFFSET (0)
|
||||
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
|
||||
# elif defined(__mips__)
|
||||
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
|
||||
# elif SANITIZER_NETBSD
|
||||
# define SHADOW_OFFSET kNetBSD_ShadowOffset32
|
||||
# elif SANITIZER_WINDOWS
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||
# elif SANITIZER_IOS
|
||||
@ -167,6 +197,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
# else
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# endif
|
||||
# elif SANITIZER_MYRIAD2
|
||||
# define SHADOW_OFFSET kMyriadShadowOffset32
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# endif
|
||||
@ -198,7 +230,46 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID && defined(__arm__)
|
||||
# define ASAN_PREMAP_SHADOW 1
|
||||
#else
|
||||
# define ASAN_PREMAP_SHADOW 0
|
||||
#endif
|
||||
|
||||
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
|
||||
|
||||
#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
|
||||
|
||||
#if DO_ASAN_MAPPING_PROFILE
|
||||
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
|
||||
#else
|
||||
# define PROFILE_ASAN_MAPPING()
|
||||
#endif
|
||||
|
||||
// If 1, all shadow boundaries are constants.
|
||||
// Don't set to 1 other than for testing.
|
||||
#define ASAN_FIXED_MAPPING 0
|
||||
|
||||
namespace __asan {
|
||||
|
||||
extern uptr AsanMappingProfile[];
|
||||
|
||||
#if ASAN_FIXED_MAPPING
|
||||
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
|
||||
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
|
||||
// difference between fixed and non-fixed mapping is below the noise level.
|
||||
static uptr kHighMemEnd = 0x7fffffffffffULL;
|
||||
static uptr kMidMemBeg = 0x3000000000ULL;
|
||||
static uptr kMidMemEnd = 0x4fffffffffULL;
|
||||
#else
|
||||
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
|
||||
#endif
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#if SANITIZER_MYRIAD2
|
||||
#include "asan_mapping_myriad.h"
|
||||
#else
|
||||
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
||||
|
||||
#define kLowMemBeg 0
|
||||
@ -230,36 +301,11 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
|
||||
#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
|
||||
|
||||
#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
|
||||
|
||||
#if DO_ASAN_MAPPING_PROFILE
|
||||
# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
|
||||
#else
|
||||
# define PROFILE_ASAN_MAPPING()
|
||||
#endif
|
||||
|
||||
// If 1, all shadow boundaries are constants.
|
||||
// Don't set to 1 other than for testing.
|
||||
#define ASAN_FIXED_MAPPING 0
|
||||
|
||||
namespace __asan {
|
||||
|
||||
extern uptr AsanMappingProfile[];
|
||||
|
||||
#if ASAN_FIXED_MAPPING
|
||||
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
|
||||
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
|
||||
// difference between fixed and non-fixed mapping is below the noise level.
|
||||
static uptr kHighMemEnd = 0x7fffffffffffULL;
|
||||
static uptr kMidMemBeg = 0x3000000000ULL;
|
||||
static uptr kMidMemEnd = 0x4fffffffffULL;
|
||||
#else
|
||||
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
|
||||
#endif
|
||||
|
||||
static inline bool AddrIsInLowMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return a < kLowMemEnd;
|
||||
return a <= kLowMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInLowShadow(uptr a) {
|
||||
@ -267,16 +313,26 @@ static inline bool AddrIsInLowShadow(uptr a) {
|
||||
return a >= kLowShadowBeg && a <= kLowShadowEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return a >= kHighMemBeg && a <= kHighMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMidMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMidShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInShadowGap(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
if (kMidMemBeg) {
|
||||
@ -292,6 +348,12 @@ static inline bool AddrIsInShadowGap(uptr a) {
|
||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // SANITIZER_MYRIAD2
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static inline bool AddrIsInMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
|
||||
@ -304,16 +366,6 @@ static inline uptr MemToShadow(uptr p) {
|
||||
return MEM_TO_SHADOW(p);
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return a >= kHighShadowBeg && a <= kHighMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMidShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
|
||||
@ -326,6 +378,8 @@ static inline bool AddrIsAlignedByGranularity(uptr a) {
|
||||
|
||||
static inline bool AddressIsPoisoned(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a))
|
||||
return false;
|
||||
const uptr kAccessSize = 1;
|
||||
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
|
||||
s8 shadow_value = *shadow_address;
|
||||
|
84
libsanitizer/asan/asan_mapping_myriad.h
Normal file
84
libsanitizer/asan/asan_mapping_myriad.h
Normal file
@ -0,0 +1,84 @@
|
||||
//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Myriad-specific definitions for ASan memory mapping.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_MAPPING_MYRIAD_H
|
||||
#define ASAN_MAPPING_MYRIAD_H
|
||||
|
||||
#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32)
|
||||
#define MEM_TO_SHADOW(mem) \
|
||||
(((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
||||
|
||||
#define kLowMemBeg kMyriadMemoryOffset32
|
||||
#define kLowMemEnd (SHADOW_OFFSET - 1)
|
||||
|
||||
#define kLowShadowBeg SHADOW_OFFSET
|
||||
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
|
||||
|
||||
#define kHighMemBeg 0
|
||||
|
||||
#define kHighShadowBeg 0
|
||||
#define kHighShadowEnd 0
|
||||
|
||||
#define kMidShadowBeg 0
|
||||
#define kMidShadowEnd 0
|
||||
|
||||
#define kShadowGapBeg (kLowShadowEnd + 1)
|
||||
#define kShadowGapEnd kMyriadMemoryEnd32
|
||||
|
||||
#define kShadowGap2Beg 0
|
||||
#define kShadowGap2End 0
|
||||
|
||||
#define kShadowGap3Beg 0
|
||||
#define kShadowGap3End 0
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static inline bool AddrIsInLowMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
a = RAW_ADDR(a);
|
||||
return a >= kLowMemBeg && a <= kLowMemEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInLowShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
a = RAW_ADDR(a);
|
||||
return a >= kLowShadowBeg && a <= kLowShadowEnd;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMidMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInMidShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighMem(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInHighShadow(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool AddrIsInShadowGap(uptr a) {
|
||||
PROFILE_ASAN_MAPPING();
|
||||
a = RAW_ADDR(a);
|
||||
return a >= kShadowGapBeg && a <= kShadowGapEnd;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_MAPPING_MYRIAD_H
|
@ -29,9 +29,9 @@ struct AllocationSite {
|
||||
|
||||
class HeapProfile {
|
||||
public:
|
||||
HeapProfile() : allocations_(1024) {}
|
||||
HeapProfile() { allocations_.reserve(1024); }
|
||||
|
||||
void ProcessChunk(const AsanChunkView& cv) {
|
||||
void ProcessChunk(const AsanChunkView &cv) {
|
||||
if (cv.IsAllocated()) {
|
||||
total_allocated_user_size_ += cv.UsedSize();
|
||||
total_allocated_count_++;
|
||||
@ -47,10 +47,10 @@ class HeapProfile {
|
||||
}
|
||||
|
||||
void Print(uptr top_percent, uptr max_number_of_contexts) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
Sort(allocations_.data(), allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
CHECK(total_allocated_user_size_);
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_malloc_local.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
|
||||
#include "interception/interception.h"
|
||||
@ -22,7 +24,7 @@
|
||||
// anyway by passing extra -export flags to the linker, which is exactly that
|
||||
// dllexport would normally do. We need to export them in order to make the
|
||||
// VS2015 dynamic CRT (MD) work.
|
||||
#if SANITIZER_WINDOWS
|
||||
#if SANITIZER_WINDOWS && defined(_MSC_VER)
|
||||
#define CXX_OPERATOR_ATTRIBUTE
|
||||
#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym))
|
||||
#ifdef _WIN64
|
||||
@ -65,16 +67,28 @@ struct nothrow_t {};
|
||||
enum class align_val_t: size_t {};
|
||||
} // namespace std
|
||||
|
||||
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
|
||||
// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM.
|
||||
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
|
||||
// allocator behavior.
|
||||
#define OPERATOR_NEW_BODY(type, nothrow) \
|
||||
if (ALLOCATE_FROM_LOCAL_POOL) {\
|
||||
void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size);\
|
||||
if (!nothrow) CHECK(res);\
|
||||
return res;\
|
||||
}\
|
||||
GET_STACK_TRACE_MALLOC;\
|
||||
void *res = asan_memalign(0, size, &stack, type);\
|
||||
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
|
||||
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
|
||||
return res;
|
||||
#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
|
||||
if (ALLOCATE_FROM_LOCAL_POOL) {\
|
||||
void *res = MemalignFromLocalPool((uptr)align, size);\
|
||||
if (!nothrow) CHECK(res);\
|
||||
return res;\
|
||||
}\
|
||||
GET_STACK_TRACE_MALLOC;\
|
||||
void *res = asan_memalign((uptr)align, size, &stack, type);\
|
||||
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
|
||||
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
|
||||
return res;
|
||||
|
||||
// On OS X it's not enough to just provide our own 'operator new' and
|
||||
@ -123,77 +137,73 @@ INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
|
||||
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
|
||||
OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
|
||||
}
|
||||
#endif
|
||||
#endif // !SANITIZER_MAC
|
||||
|
||||
#define OPERATOR_DELETE_BODY(type) \
|
||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
||||
GET_STACK_TRACE_FREE;\
|
||||
asan_free(ptr, &stack, type);
|
||||
asan_delete(ptr, 0, 0, &stack, type);
|
||||
|
||||
#define OPERATOR_DELETE_BODY_SIZE(type) \
|
||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
||||
GET_STACK_TRACE_FREE;\
|
||||
asan_delete(ptr, size, 0, &stack, type);
|
||||
|
||||
#define OPERATOR_DELETE_BODY_ALIGN(type) \
|
||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
||||
GET_STACK_TRACE_FREE;\
|
||||
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
|
||||
|
||||
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
|
||||
if (IS_FROM_LOCAL_POOL(ptr)) return;\
|
||||
GET_STACK_TRACE_FREE;\
|
||||
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, size_t size) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr, size_t size) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, size_t size) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr, size_t size) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW);
|
||||
}
|
||||
void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); }
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
|
||||
}
|
||||
void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
|
||||
{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
|
||||
|
||||
#else // SANITIZER_MAC
|
||||
INTERCEPTOR(void, _ZdlPv, void *ptr) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
INTERCEPTOR(void, _ZdaPv, void *ptr) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
#endif
|
||||
INTERCEPTOR(void, _ZdlPv, void *ptr)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||
INTERCEPTOR(void, _ZdaPv, void *ptr)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
|
||||
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||
#endif // !SANITIZER_MAC
|
||||
|
@ -30,7 +30,7 @@ bool CanPoisonMemory() {
|
||||
}
|
||||
|
||||
void PoisonShadow(uptr addr, uptr size, u8 value) {
|
||||
if (!CanPoisonMemory()) return;
|
||||
if (value && !CanPoisonMemory()) return;
|
||||
CHECK(AddrIsAlignedByGranularity(addr));
|
||||
CHECK(AddrIsInMem(addr));
|
||||
CHECK(AddrIsAlignedByGranularity(addr + size));
|
||||
@ -180,8 +180,15 @@ int __asan_address_is_poisoned(void const volatile *addr) {
|
||||
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
||||
if (!size) return 0;
|
||||
uptr end = beg + size;
|
||||
if (!AddrIsInMem(beg)) return beg;
|
||||
if (!AddrIsInMem(end)) return end;
|
||||
if (SANITIZER_MYRIAD2) {
|
||||
// On Myriad, address not in DRAM range need to be treated as
|
||||
// unpoisoned.
|
||||
if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
|
||||
if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
|
||||
} else {
|
||||
if (!AddrIsInMem(beg)) return beg;
|
||||
if (!AddrIsInMem(end)) return end;
|
||||
}
|
||||
CHECK_LT(beg, end);
|
||||
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
|
||||
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||
|
@ -36,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
|
||||
// performance-critical code with care.
|
||||
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
||||
u8 value) {
|
||||
DCHECK(CanPoisonMemory());
|
||||
DCHECK(!value || CanPoisonMemory());
|
||||
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
|
||||
uptr shadow_end = MEM_TO_SHADOW(
|
||||
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
|
||||
@ -49,6 +49,9 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
||||
// changed at all. It doesn't currently have an efficient means
|
||||
// to zero a bunch of pages, but maybe we should add one.
|
||||
SANITIZER_FUCHSIA == 1 ||
|
||||
// RTEMS doesn't have have pages, let alone a fast way to zero
|
||||
// them, so default to memset.
|
||||
SANITIZER_RTEMS == 1 ||
|
||||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
|
||||
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
|
||||
} else {
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
|
77
libsanitizer/asan/asan_premap_shadow.cc
Normal file
77
libsanitizer/asan/asan_premap_shadow.cc
Normal file
@ -0,0 +1,77 @@
|
||||
//===-- asan_premap_shadow.cc ---------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Reserve shadow memory with an ifunc resolver.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_mapping.h"
|
||||
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
|
||||
#include "asan_premap_shadow.h"
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// The code in this file needs to run in an unrelocated binary. It may not
|
||||
// access any external symbol, including its own non-hidden globals.
|
||||
|
||||
// Conservative upper limit.
|
||||
uptr PremapShadowSize() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
|
||||
}
|
||||
|
||||
// Returns an address aligned to 8 pages, such that one page on the left and
|
||||
// PremapShadowSize() bytes on the right of it are mapped r/o.
|
||||
uptr PremapShadow() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = granularity * 8;
|
||||
uptr left_padding = granularity;
|
||||
uptr shadow_size = PremapShadowSize();
|
||||
uptr map_size = shadow_size + left_padding + alignment;
|
||||
|
||||
uptr map_start = (uptr)MmapNoAccess(map_size);
|
||||
CHECK_NE(map_start, ~(uptr)0);
|
||||
|
||||
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
|
||||
uptr shadow_end = shadow_start + shadow_size;
|
||||
internal_munmap(reinterpret_cast<void *>(map_start),
|
||||
shadow_start - left_padding - map_start);
|
||||
internal_munmap(reinterpret_cast<void *>(shadow_end),
|
||||
map_start + map_size - shadow_end);
|
||||
return shadow_start;
|
||||
}
|
||||
|
||||
bool PremapShadowFailed() {
|
||||
uptr shadow = reinterpret_cast<uptr>(&__asan_shadow);
|
||||
uptr resolver = reinterpret_cast<uptr>(&__asan_premap_shadow);
|
||||
// shadow == resolver is how Android KitKat and older handles ifunc.
|
||||
// shadow == 0 just in case.
|
||||
if (shadow == 0 || shadow == resolver)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
} // namespace __asan
|
||||
|
||||
extern "C" {
|
||||
decltype(__asan_shadow)* __asan_premap_shadow() {
|
||||
// The resolver may be called multiple times. Map the shadow just once.
|
||||
static uptr premapped_shadow = 0;
|
||||
if (!premapped_shadow) premapped_shadow = __asan::PremapShadow();
|
||||
return reinterpret_cast<decltype(__asan_shadow)*>(premapped_shadow);
|
||||
}
|
||||
|
||||
// __asan_shadow is a "function" that has the same address as the first byte of
|
||||
// the shadow mapping.
|
||||
INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void
|
||||
__asan_shadow();
|
||||
}
|
||||
|
||||
#endif // ASAN_PREMAP_SHADOW
|
28
libsanitizer/asan/asan_premap_shadow.h
Normal file
28
libsanitizer/asan/asan_premap_shadow.h
Normal file
@ -0,0 +1,28 @@
|
||||
//===-- asan_mapping.h ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Premap shadow range with an ifunc resolver.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
#ifndef ASAN_PREMAP_SHADOW_H
|
||||
#define ASAN_PREMAP_SHADOW_H
|
||||
|
||||
#if ASAN_PREMAP_SHADOW
|
||||
namespace __asan {
|
||||
// Conservative upper limit.
|
||||
uptr PremapShadowSize();
|
||||
bool PremapShadowFailed();
|
||||
}
|
||||
#endif
|
||||
|
||||
extern "C" INTERFACE_ATTRIBUTE void __asan_shadow();
|
||||
extern "C" decltype(__asan_shadow)* __asan_premap_shadow();
|
||||
|
||||
#endif // ASAN_PREMAP_SHADOW_H
|
@ -82,7 +82,7 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
|
||||
bool ParseFrameDescription(const char *frame_descr,
|
||||
InternalMmapVector<StackVarDescr> *vars) {
|
||||
CHECK(frame_descr);
|
||||
char *p;
|
||||
const char *p;
|
||||
// This string is created by the compiler and has the following form:
|
||||
// "n alloc_1 alloc_2 ... alloc_n"
|
||||
// where alloc_i looks like "offset size len ObjectName"
|
||||
@ -132,6 +132,10 @@ class ScopedInErrorReport {
|
||||
}
|
||||
|
||||
~ScopedInErrorReport() {
|
||||
if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
|
||||
asanThreadRegistry().Unlock();
|
||||
return;
|
||||
}
|
||||
ASAN_ON_ERROR();
|
||||
if (current_error_.IsValid()) current_error_.Print();
|
||||
|
||||
@ -150,7 +154,7 @@ class ScopedInErrorReport {
|
||||
|
||||
// Copy the message buffer so that we could start logging without holding a
|
||||
// lock that gets aquired during printing.
|
||||
InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize);
|
||||
InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
|
||||
{
|
||||
BlockingMutexLock l(&error_message_buf_mutex);
|
||||
internal_memcpy(buffer_copy.data(),
|
||||
@ -200,7 +204,7 @@ class ScopedInErrorReport {
|
||||
bool halt_on_error_;
|
||||
};
|
||||
|
||||
ErrorDescription ScopedInErrorReport::current_error_;
|
||||
ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
|
||||
|
||||
void ReportDeadlySignal(const SignalContext &sig) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
@ -214,11 +218,12 @@ void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
|
||||
uptr delete_alignment,
|
||||
BufferedStackTrace *free_stack) {
|
||||
ScopedInErrorReport in_report;
|
||||
ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
|
||||
delete_size);
|
||||
ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
|
||||
delete_size, delete_alignment);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
@ -251,6 +256,62 @@ void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportInvalidAllocationAlignment(uptr alignment,
|
||||
BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
|
||||
alignment);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
|
||||
size, alignment);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
|
||||
alignment);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
|
||||
BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
|
||||
total_size, max_size);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportRssLimitExceeded(BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
|
||||
ScopedInErrorReport in_report(/*fatal*/ true);
|
||||
ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
|
||||
in_report.ReportError(error);
|
||||
}
|
||||
|
||||
void ReportStringFunctionMemoryRangesOverlap(const char *function,
|
||||
const char *offset1, uptr length1,
|
||||
const char *offset2, uptr length2,
|
||||
|
@ -10,6 +10,9 @@
|
||||
// ASan-private header for error reporting functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_REPORT_H
|
||||
#define ASAN_REPORT_H
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_thread.h"
|
||||
@ -45,7 +48,8 @@ bool ParseFrameDescription(const char *frame_descr,
|
||||
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
uptr access_size, u32 exp, bool fatal);
|
||||
void ReportDeadlySignal(const SignalContext &sig);
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
|
||||
uptr delete_alignment,
|
||||
BufferedStackTrace *free_stack);
|
||||
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
|
||||
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
|
||||
@ -55,6 +59,18 @@ void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
|
||||
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
|
||||
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack);
|
||||
void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack);
|
||||
void ReportInvalidAllocationAlignment(uptr alignment,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
|
||||
BufferedStackTrace *stack);
|
||||
void ReportRssLimitExceeded(BufferedStackTrace *stack);
|
||||
void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack);
|
||||
void ReportStringFunctionMemoryRangesOverlap(const char *function,
|
||||
const char *offset1, uptr length1,
|
||||
const char *offset2, uptr length2,
|
||||
@ -77,3 +93,4 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
|
||||
BufferedStackTrace *stack);
|
||||
|
||||
} // namespace __asan
|
||||
#endif // ASAN_REPORT_H
|
||||
|
251
libsanitizer/asan/asan_rtems.cc
Normal file
251
libsanitizer/asan/asan_rtems.cc
Normal file
@ -0,0 +1,251 @@
|
||||
//===-- asan_rtems.cc -----------------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// RTEMS-specific details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_rtems.h"
|
||||
#if SANITIZER_RTEMS
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static void ResetShadowMemory() {
|
||||
uptr shadow_start = SHADOW_OFFSET;
|
||||
uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32);
|
||||
uptr gap_start = MEM_TO_SHADOW(shadow_start);
|
||||
uptr gap_end = MEM_TO_SHADOW(shadow_end);
|
||||
|
||||
REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start);
|
||||
REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start);
|
||||
}
|
||||
|
||||
void InitializeShadowMemory() {
|
||||
kHighMemEnd = 0;
|
||||
kMidMemBeg = 0;
|
||||
kMidMemEnd = 0;
|
||||
|
||||
ResetShadowMemory();
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
void InitializeAsanInterceptors() {}
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
|
||||
// RTEMS only support static linking; it sufficies to return with no
|
||||
// error.
|
||||
void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
|
||||
|
||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void EarlyInit() {
|
||||
// Provide early initialization of shadow memory so that
|
||||
// instrumented code running before full initialzation will not
|
||||
// report spurious errors.
|
||||
ResetShadowMemory();
|
||||
}
|
||||
|
||||
// We can use a plain thread_local variable for TSD.
|
||||
static thread_local void *per_thread;
|
||||
|
||||
void *AsanTSDGet() { return per_thread; }
|
||||
|
||||
void AsanTSDSet(void *tsd) { per_thread = tsd; }
|
||||
|
||||
// There's no initialization needed, and the passed-in destructor
|
||||
// will never be called. Instead, our own thread destruction hook
|
||||
// (below) will call AsanThread::TSDDtor directly.
|
||||
void AsanTSDInit(void (*destructor)(void *tsd)) {
|
||||
DCHECK(destructor == &PlatformTSDDtor);
|
||||
}
|
||||
|
||||
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
|
||||
|
||||
//
|
||||
// Thread registration. We provide an API similar to the Fushia port.
|
||||
//
|
||||
|
||||
struct AsanThread::InitOptions {
|
||||
uptr stack_bottom, stack_size, tls_bottom, tls_size;
|
||||
};
|
||||
|
||||
// Shared setup between thread creation and startup for the initial thread.
|
||||
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
|
||||
uptr user_id, bool detached,
|
||||
uptr stack_bottom, uptr stack_size,
|
||||
uptr tls_bottom, uptr tls_size) {
|
||||
// In lieu of AsanThread::Create.
|
||||
AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__);
|
||||
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
|
||||
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
|
||||
|
||||
// On other systems, AsanThread::Init() is called from the new
|
||||
// thread itself. But on RTEMS we already know the stack address
|
||||
// range beforehand, so we can do most of the setup right now.
|
||||
const AsanThread::InitOptions options = {stack_bottom, stack_size,
|
||||
tls_bottom, tls_size};
|
||||
thread->Init(&options);
|
||||
return thread;
|
||||
}
|
||||
|
||||
// This gets the same arguments passed to Init by CreateAsanThread, above.
|
||||
// We're in the creator thread before the new thread is actually started, but
|
||||
// its stack and tls address range are already known.
|
||||
void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
|
||||
DCHECK_NE(GetCurrentThread(), this);
|
||||
DCHECK_NE(GetCurrentThread(), nullptr);
|
||||
CHECK_NE(options->stack_bottom, 0);
|
||||
CHECK_NE(options->stack_size, 0);
|
||||
stack_bottom_ = options->stack_bottom;
|
||||
stack_top_ = options->stack_bottom + options->stack_size;
|
||||
tls_begin_ = options->tls_bottom;
|
||||
tls_end_ = options->tls_bottom + options->tls_size;
|
||||
}
|
||||
|
||||
// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the
|
||||
// main thread on RTEMS does not require special treatment; its AsanThread is
|
||||
// already created by the provided hooks. This function simply looks up and
|
||||
// returns the created thread.
|
||||
AsanThread *CreateMainThread() {
|
||||
return GetThreadContextByTidLocked(0)->thread;
|
||||
}
|
||||
|
||||
// This is called before each thread creation is attempted. So, in
|
||||
// its first call, the calling thread is the initial and sole thread.
|
||||
static void *BeforeThreadCreateHook(uptr user_id, bool detached,
|
||||
uptr stack_bottom, uptr stack_size,
|
||||
uptr tls_bottom, uptr tls_size) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
// Strict init-order checking is thread-hostile.
|
||||
if (flags()->strict_init_order) StopInitOrderChecking();
|
||||
|
||||
GET_STACK_TRACE_THREAD;
|
||||
u32 parent_tid = GetCurrentTidOrInvalid();
|
||||
|
||||
return CreateAsanThread(&stack, parent_tid, user_id, detached,
|
||||
stack_bottom, stack_size, tls_bottom, tls_size);
|
||||
}
|
||||
|
||||
// This is called after creating a new thread (in the creating thread),
|
||||
// with the pointer returned by BeforeThreadCreateHook (above).
|
||||
static void ThreadCreateHook(void *hook, bool aborted) {
|
||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
||||
if (!aborted) {
|
||||
// The thread was created successfully.
|
||||
// ThreadStartHook is already running in the new thread.
|
||||
} else {
|
||||
// The thread wasn't created after all.
|
||||
// Clean up everything we set up in BeforeThreadCreateHook.
|
||||
asanThreadRegistry().FinishThread(thread->tid());
|
||||
UnmapOrDie(thread, sizeof(AsanThread));
|
||||
}
|
||||
}
|
||||
|
||||
// This is called (1) in the newly-created thread before it runs anything else,
|
||||
// with the pointer returned by BeforeThreadCreateHook (above). (2) before a
|
||||
// thread restart.
|
||||
static void ThreadStartHook(void *hook, uptr os_id) {
|
||||
if (!hook)
|
||||
return;
|
||||
|
||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
||||
SetCurrentThread(thread);
|
||||
|
||||
ThreadStatus status =
|
||||
asanThreadRegistry().GetThreadLocked(thread->tid())->status;
|
||||
DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning);
|
||||
// Determine whether we are starting or restarting the thread.
|
||||
if (status == ThreadStatusCreated)
|
||||
// In lieu of AsanThread::ThreadStart.
|
||||
asanThreadRegistry().StartThread(thread->tid(), os_id,
|
||||
/*workerthread*/ false, nullptr);
|
||||
else {
|
||||
// In a thread restart, a thread may resume execution at an
|
||||
// arbitrary function entry point, with its stack and TLS state
|
||||
// reset. We unpoison the stack in that case.
|
||||
PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Each thread runs this just before it exits,
|
||||
// with the pointer returned by BeforeThreadCreateHook (above).
|
||||
// All per-thread destructors have already been called.
|
||||
static void ThreadExitHook(void *hook, uptr os_id) {
|
||||
AsanThread *thread = static_cast<AsanThread *>(hook);
|
||||
if (thread)
|
||||
AsanThread::TSDDtor(thread->context());
|
||||
}
|
||||
|
||||
static void HandleExit() {
|
||||
// Disable ASan by setting it to uninitialized. Also reset the
|
||||
// shadow memory to avoid reporting errors after the run-time has
|
||||
// been desroyed.
|
||||
if (asan_inited) {
|
||||
asan_inited = false;
|
||||
ResetShadowMemory();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// These are declared (in extern "C") by <some_path/sanitizer.h>.
|
||||
// The system runtime will call our definitions directly.
|
||||
|
||||
extern "C" {
|
||||
void __sanitizer_early_init() {
|
||||
__asan::EarlyInit();
|
||||
}
|
||||
|
||||
void *__sanitizer_before_thread_create_hook(uptr thread, bool detached,
|
||||
const char *name,
|
||||
void *stack_base, size_t stack_size,
|
||||
void *tls_base, size_t tls_size) {
|
||||
return __asan::BeforeThreadCreateHook(
|
||||
thread, detached,
|
||||
reinterpret_cast<uptr>(stack_base), stack_size,
|
||||
reinterpret_cast<uptr>(tls_base), tls_size);
|
||||
}
|
||||
|
||||
void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) {
|
||||
__asan::ThreadCreateHook(handle, status != 0);
|
||||
}
|
||||
|
||||
void __sanitizer_thread_start_hook(void *handle, uptr self) {
|
||||
__asan::ThreadStartHook(handle, self);
|
||||
}
|
||||
|
||||
void __sanitizer_thread_exit_hook(void *handle, uptr self) {
|
||||
__asan::ThreadExitHook(handle, self);
|
||||
}
|
||||
|
||||
void __sanitizer_exit() {
|
||||
__asan::HandleExit();
|
||||
}
|
||||
} // "C"
|
||||
|
||||
#endif // SANITIZER_RTEMS
|
@ -54,7 +54,8 @@ static void AsanDie() {
|
||||
UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
|
||||
UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd);
|
||||
} else {
|
||||
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
|
||||
if (kHighShadowEnd)
|
||||
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -63,8 +64,14 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
|
||||
u64 v1, u64 v2) {
|
||||
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
|
||||
line, cond, (uptr)v1, (uptr)v2);
|
||||
// FIXME: check for infinite recursion without a thread-local counter here.
|
||||
PRINT_CURRENT_STACK_CHECK();
|
||||
|
||||
// Print a stack trace the first time we come here. Otherwise, we probably
|
||||
// failed a CHECK during symbolization.
|
||||
static atomic_uint32_t num_calls;
|
||||
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
|
||||
PRINT_CURRENT_STACK_CHECK();
|
||||
}
|
||||
|
||||
Die();
|
||||
}
|
||||
|
||||
@ -138,6 +145,8 @@ ASAN_REPORT_ERROR_N(load, false)
|
||||
ASAN_REPORT_ERROR_N(store, true)
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
|
||||
if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
|
||||
return; \
|
||||
uptr sp = MEM_TO_SHADOW(addr); \
|
||||
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
||||
: *reinterpret_cast<u16 *>(sp); \
|
||||
@ -304,20 +313,24 @@ static void asan_atexit() {
|
||||
}
|
||||
|
||||
static void InitializeHighMemEnd() {
|
||||
#if !SANITIZER_MYRIAD2
|
||||
#if !ASAN_FIXED_MAPPING
|
||||
kHighMemEnd = GetMaxVirtualAddress();
|
||||
kHighMemEnd = GetMaxUserVirtualAddress();
|
||||
// Increase kHighMemEnd to make sure it's properly
|
||||
// aligned together with kHighMemBeg:
|
||||
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
|
||||
#endif // !ASAN_FIXED_MAPPING
|
||||
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
||||
#endif // !SANITIZER_MYRIAD2
|
||||
}
|
||||
|
||||
void PrintAddressSpaceLayout() {
|
||||
Printf("|| `[%p, %p]` || HighMem ||\n",
|
||||
(void*)kHighMemBeg, (void*)kHighMemEnd);
|
||||
Printf("|| `[%p, %p]` || HighShadow ||\n",
|
||||
(void*)kHighShadowBeg, (void*)kHighShadowEnd);
|
||||
if (kHighMemBeg) {
|
||||
Printf("|| `[%p, %p]` || HighMem ||\n",
|
||||
(void*)kHighMemBeg, (void*)kHighMemEnd);
|
||||
Printf("|| `[%p, %p]` || HighShadow ||\n",
|
||||
(void*)kHighShadowBeg, (void*)kHighShadowEnd);
|
||||
}
|
||||
if (kMidMemBeg) {
|
||||
Printf("|| `[%p, %p]` || ShadowGap3 ||\n",
|
||||
(void*)kShadowGap3Beg, (void*)kShadowGap3End);
|
||||
@ -336,11 +349,14 @@ void PrintAddressSpaceLayout() {
|
||||
Printf("|| `[%p, %p]` || LowMem ||\n",
|
||||
(void*)kLowMemBeg, (void*)kLowMemEnd);
|
||||
}
|
||||
Printf("MemToShadow(shadow): %p %p %p %p",
|
||||
Printf("MemToShadow(shadow): %p %p",
|
||||
(void*)MEM_TO_SHADOW(kLowShadowBeg),
|
||||
(void*)MEM_TO_SHADOW(kLowShadowEnd),
|
||||
(void*)MEM_TO_SHADOW(kHighShadowBeg),
|
||||
(void*)MEM_TO_SHADOW(kHighShadowEnd));
|
||||
(void*)MEM_TO_SHADOW(kLowShadowEnd));
|
||||
if (kHighMemBeg) {
|
||||
Printf(" %p %p",
|
||||
(void*)MEM_TO_SHADOW(kHighShadowBeg),
|
||||
(void*)MEM_TO_SHADOW(kHighShadowEnd));
|
||||
}
|
||||
if (kMidMemBeg) {
|
||||
Printf(" %p %p",
|
||||
(void*)MEM_TO_SHADOW(kMidShadowBeg),
|
||||
@ -372,6 +388,7 @@ static void AsanInitInternal() {
|
||||
asan_init_is_running = true;
|
||||
|
||||
CacheBinaryName();
|
||||
CheckASLR();
|
||||
|
||||
// Initialize flags. This must be done early, because most of the
|
||||
// initialization steps look at flags().
|
||||
@ -405,6 +422,7 @@ static void AsanInitInternal() {
|
||||
MaybeReexec();
|
||||
|
||||
// Setup internal allocator callback.
|
||||
SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
|
||||
SetLowLevelAllocateCallback(OnLowLevelAllocate);
|
||||
|
||||
InitializeAsanInterceptors();
|
||||
@ -523,6 +541,9 @@ void NOINLINE __asan_handle_no_return() {
|
||||
if (curr_thread) {
|
||||
top = curr_thread->stack_top();
|
||||
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
|
||||
} else if (SANITIZER_RTEMS) {
|
||||
// Give up On RTEMS.
|
||||
return;
|
||||
} else {
|
||||
CHECK(!SANITIZER_FUCHSIA);
|
||||
// If we haven't seen this thread, try asking the OS for stack bounds.
|
||||
|
@ -12,8 +12,9 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
// asan_fuchsia.cc has its own InitializeShadowMemory implementation.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
// asan_fuchsia.cc and asan_rtems.cc have their own
|
||||
// InitializeShadowMemory implementation.
|
||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
@ -28,8 +29,7 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
|
||||
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
|
||||
uptr size = end - beg + 1;
|
||||
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
|
||||
void *res = MmapFixedNoReserve(beg, size, name);
|
||||
if (res != (void *)beg) {
|
||||
if (!MmapFixedNoReserve(beg, size, name)) {
|
||||
Report(
|
||||
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
|
||||
"Perhaps you're using ulimit -v\n",
|
||||
@ -97,17 +97,21 @@ void InitializeShadowMemory() {
|
||||
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
|
||||
// expands to |__asan_shadow_memory_dynamic_address| which is
|
||||
// |kDefaultShadowSentinel|.
|
||||
bool full_shadow_is_available = false;
|
||||
if (shadow_start == kDefaultShadowSentinel) {
|
||||
__asan_shadow_memory_dynamic_address = 0;
|
||||
CHECK_EQ(0, kLowShadowBeg);
|
||||
shadow_start = FindDynamicShadowStart();
|
||||
if (SANITIZER_LINUX) full_shadow_is_available = true;
|
||||
}
|
||||
// Update the shadow memory address (potentially) used by instrumentation.
|
||||
__asan_shadow_memory_dynamic_address = shadow_start;
|
||||
|
||||
if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
|
||||
bool full_shadow_is_available =
|
||||
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
|
||||
|
||||
if (!full_shadow_is_available)
|
||||
full_shadow_is_available =
|
||||
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
|
||||
|
||||
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
|
||||
!ASAN_FIXED_MAPPING
|
||||
@ -156,4 +160,4 @@ void InitializeShadowMemory() {
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
@ -29,9 +29,8 @@ u32 GetMallocContextSize();
|
||||
// The pc will be in the position 0 of the resulting stack trace.
|
||||
// The bp may refer to the current frame or to the caller's frame.
|
||||
ALWAYS_INLINE
|
||||
void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
||||
uptr pc, uptr bp, void *context,
|
||||
bool fast) {
|
||||
void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp,
|
||||
void *context, bool fast) {
|
||||
#if SANITIZER_WINDOWS
|
||||
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
|
||||
#else
|
||||
@ -60,32 +59,29 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
||||
// as early as possible (in functions exposed to the user), as we generally
|
||||
// don't want stack trace to contain functions from ASan internals.
|
||||
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
BufferedStackTrace stack; \
|
||||
if (max_size <= 2) { \
|
||||
stack.size = max_size; \
|
||||
if (max_size > 0) { \
|
||||
stack.top_frame_bp = GET_CURRENT_FRAME(); \
|
||||
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
|
||||
if (max_size > 1) \
|
||||
stack.trace_buffer[1] = GET_CALLER_PC(); \
|
||||
} \
|
||||
} else { \
|
||||
GetStackTraceWithPcBpAndContext(&stack, max_size, \
|
||||
StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), 0, fast); \
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
BufferedStackTrace stack; \
|
||||
if (max_size <= 2) { \
|
||||
stack.size = max_size; \
|
||||
if (max_size > 0) { \
|
||||
stack.top_frame_bp = GET_CURRENT_FRAME(); \
|
||||
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
|
||||
if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
|
||||
} \
|
||||
} else { \
|
||||
GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), 0, fast); \
|
||||
}
|
||||
|
||||
#define GET_STACK_TRACE_FATAL(pc, bp) \
|
||||
BufferedStackTrace stack; \
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
#define GET_STACK_TRACE_FATAL(pc, bp) \
|
||||
BufferedStackTrace stack; \
|
||||
GetStackTrace(&stack, kStackTraceMax, pc, bp, 0, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
|
||||
#define GET_STACK_TRACE_SIGNAL(sig) \
|
||||
BufferedStackTrace stack; \
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
|
||||
(sig).pc, (sig).bp, (sig).context, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
#define GET_STACK_TRACE_SIGNAL(sig) \
|
||||
BufferedStackTrace stack; \
|
||||
GetStackTrace(&stack, kStackTraceMax, (sig).pc, (sig).bp, (sig).context, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
|
||||
#define GET_STACK_TRACE_FATAL_HERE \
|
||||
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
|
||||
|
@ -219,22 +219,25 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
void AsanThread::Init(const InitOptions *options) {
|
||||
next_stack_top_ = next_stack_bottom_ = 0;
|
||||
atomic_store(&stack_switching_, false, memory_order_release);
|
||||
fake_stack_ = nullptr; // Will be initialized lazily if needed.
|
||||
CHECK_EQ(this->stack_size(), 0U);
|
||||
SetThreadStackAndTls(options);
|
||||
CHECK_GT(this->stack_size(), 0U);
|
||||
CHECK(AddrIsInMem(stack_bottom_));
|
||||
CHECK(AddrIsInMem(stack_top_ - 1));
|
||||
ClearShadowForThreadStackAndTLS();
|
||||
fake_stack_ = nullptr;
|
||||
if (__asan_option_detect_stack_use_after_return)
|
||||
AsyncSignalSafeLazyInitFakeStack();
|
||||
int local = 0;
|
||||
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
|
||||
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
|
||||
&local);
|
||||
}
|
||||
|
||||
// Fuchsia doesn't use ThreadStart.
|
||||
// asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
// Fuchsia and RTEMS don't use ThreadStart.
|
||||
// asan_fuchsia.c/asan_rtems.c define CreateMainThread and
|
||||
// SetThreadStackAndTls.
|
||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
||||
thread_return_t AsanThread::ThreadStart(
|
||||
tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
|
||||
@ -294,12 +297,17 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
|
||||
CHECK(AddrIsInStack((uptr)&local));
|
||||
}
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
|
||||
void AsanThread::ClearShadowForThreadStackAndTLS() {
|
||||
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
|
||||
if (tls_begin_ != tls_end_)
|
||||
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
|
||||
if (tls_begin_ != tls_end_) {
|
||||
uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
|
||||
uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
|
||||
FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
|
||||
tls_end_ - tls_begin_aligned,
|
||||
tls_end_aligned - tls_end_, 0);
|
||||
}
|
||||
}
|
||||
|
||||
bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||
@ -384,6 +392,9 @@ static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
|
||||
}
|
||||
|
||||
AsanThread *GetCurrentThread() {
|
||||
if (SANITIZER_RTEMS && !asan_inited)
|
||||
return nullptr;
|
||||
|
||||
AsanThreadContext *context =
|
||||
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
|
||||
if (!context) {
|
||||
@ -475,6 +486,11 @@ void UnlockThreadRegistry() {
|
||||
__asan::asanThreadRegistry().Unlock();
|
||||
}
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked() {
|
||||
__asan::asanThreadRegistry().CheckLocked();
|
||||
return &__asan::asanThreadRegistry();
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
__asan::EnsureMainThreadIDIsCorrect();
|
||||
}
|
||||
|
@ -157,6 +157,14 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {
|
||||
// The interceptors were not designed to be removable, so we have to keep this
|
||||
// module alive for the life of the process.
|
||||
HMODULE pinned;
|
||||
CHECK(GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
|
||||
GET_MODULE_HANDLE_EX_FLAG_PIN,
|
||||
(LPCWSTR)&InitializePlatformInterceptors,
|
||||
&pinned));
|
||||
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
|
||||
|
||||
@ -220,8 +228,8 @@ uptr FindDynamicShadowStart() {
|
||||
uptr alignment = 8 * granularity;
|
||||
uptr left_padding = granularity;
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
uptr shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
|
||||
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
|
||||
granularity, nullptr, nullptr);
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
return shadow_start;
|
||||
@ -263,11 +271,6 @@ ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
|
||||
// Determine the address of the page that is being accessed.
|
||||
uptr page = RoundDownTo(addr, page_size);
|
||||
|
||||
// Query the existing page.
|
||||
MEMORY_BASIC_INFORMATION mem_info = {};
|
||||
if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0)
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// Commit the page.
|
||||
uptr result =
|
||||
(uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
|
||||
|
@ -97,7 +97,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Window specific functions not included in asan_interface.inc.
|
||||
// Windows specific functions not included in asan_interface.inc.
|
||||
INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
|
||||
INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
|
||||
INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
|
||||
|
@ -30,7 +30,7 @@ extern "C" {
|
||||
size_t __sanitizer_get_allocated_size(const volatile void *p);
|
||||
|
||||
/* Number of bytes, allocated and not yet freed by the application. */
|
||||
size_t __sanitizer_get_current_allocated_bytes();
|
||||
size_t __sanitizer_get_current_allocated_bytes(void);
|
||||
|
||||
/* Number of bytes, mmaped by the allocator to fulfill allocation requests.
|
||||
Generally, for request of X bytes, allocator can reserve and add to free
|
||||
@ -38,17 +38,17 @@ extern "C" {
|
||||
All these chunks count toward the heap size. Currently, allocator never
|
||||
releases memory to OS (instead, it just puts freed chunks to free
|
||||
lists). */
|
||||
size_t __sanitizer_get_heap_size();
|
||||
size_t __sanitizer_get_heap_size(void);
|
||||
|
||||
/* Number of bytes, mmaped by the allocator, which can be used to fulfill
|
||||
allocation requests. When a user program frees memory chunk, it can first
|
||||
fall into quarantine and will count toward __sanitizer_get_free_bytes()
|
||||
later. */
|
||||
size_t __sanitizer_get_free_bytes();
|
||||
size_t __sanitizer_get_free_bytes(void);
|
||||
|
||||
/* Number of bytes in unmapped pages, that are released to OS. Currently,
|
||||
always returns 0. */
|
||||
size_t __sanitizer_get_unmapped_bytes();
|
||||
size_t __sanitizer_get_unmapped_bytes(void);
|
||||
|
||||
/* Malloc hooks that may be optionally provided by user.
|
||||
__sanitizer_malloc_hook(ptr, size) is called immediately after
|
||||
@ -74,6 +74,12 @@ extern "C" {
|
||||
void (*malloc_hook)(const volatile void *, size_t),
|
||||
void (*free_hook)(const volatile void *));
|
||||
|
||||
/* Drains allocator quarantines (calling thread's and global ones), returns
|
||||
freed memory back to OS and releases other non-essential internal allocator
|
||||
resources in attempt to reduce process RSS.
|
||||
Currently available with ASan only.
|
||||
*/
|
||||
void __sanitizer_purge_allocator(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -62,19 +62,19 @@ extern "C" {
|
||||
|
||||
// Useful for calling from a debugger to get information about an ASan error.
|
||||
// Returns 1 if an error has been (or is being) reported, otherwise returns 0.
|
||||
int __asan_report_present();
|
||||
int __asan_report_present(void);
|
||||
|
||||
// Useful for calling from a debugger to get information about an ASan error.
|
||||
// If an error has been (or is being) reported, the following functions return
|
||||
// the pc, bp, sp, address, access type (0 = read, 1 = write), access size and
|
||||
// bug description (e.g. "heap-use-after-free"). Otherwise they return 0.
|
||||
void *__asan_get_report_pc();
|
||||
void *__asan_get_report_bp();
|
||||
void *__asan_get_report_sp();
|
||||
void *__asan_get_report_address();
|
||||
int __asan_get_report_access_type();
|
||||
size_t __asan_get_report_access_size();
|
||||
const char *__asan_get_report_description();
|
||||
void *__asan_get_report_pc(void);
|
||||
void *__asan_get_report_bp(void);
|
||||
void *__asan_get_report_sp(void);
|
||||
void *__asan_get_report_address(void);
|
||||
int __asan_get_report_access_type(void);
|
||||
size_t __asan_get_report_access_size(void);
|
||||
const char *__asan_get_report_description(void);
|
||||
|
||||
// Useful for calling from the debugger to get information about a pointer.
|
||||
// Returns the category of the given pointer as a constant string.
|
||||
@ -116,21 +116,21 @@ extern "C" {
|
||||
// User may provide function that would be called right when ASan detects
|
||||
// an error. This can be used to notice cases when ASan detects an error, but
|
||||
// the program crashes before ASan report is printed.
|
||||
void __asan_on_error();
|
||||
void __asan_on_error(void);
|
||||
|
||||
// Prints accumulated stats to stderr. Used for debugging.
|
||||
void __asan_print_accumulated_stats();
|
||||
void __asan_print_accumulated_stats(void);
|
||||
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing ASan runtime options. See asan_flags.h for details.
|
||||
const char* __asan_default_options();
|
||||
const char* __asan_default_options(void);
|
||||
|
||||
// The following 2 functions facilitate garbage collection in presence of
|
||||
// asan's fake stack.
|
||||
|
||||
// Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
|
||||
// Returns NULL if the current thread does not have a fake stack.
|
||||
void *__asan_get_current_fake_stack();
|
||||
void *__asan_get_current_fake_stack(void);
|
||||
|
||||
// If fake_stack is non-NULL and addr belongs to a fake frame in
|
||||
// fake_stack, returns the address on real stack that corresponds to
|
||||
|
@ -63,6 +63,11 @@ extern "C" {
|
||||
void __sanitizer_unaligned_store32(void *p, uint32_t x);
|
||||
void __sanitizer_unaligned_store64(void *p, uint64_t x);
|
||||
|
||||
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
|
||||
// to ensure only one report is printed when multiple errors occur
|
||||
// simultaneously.
|
||||
int __sanitizer_acquire_crash_state();
|
||||
|
||||
// Annotate the current state of a contiguous container, such as
|
||||
// std::vector, std::string or similar.
|
||||
// A contiguous container is a container that keeps all of its elements
|
||||
@ -113,10 +118,16 @@ extern "C" {
|
||||
const void *beg, const void *mid, const void *end);
|
||||
|
||||
// Print the stack trace leading to this call. Useful for debugging user code.
|
||||
void __sanitizer_print_stack_trace();
|
||||
void __sanitizer_print_stack_trace(void);
|
||||
|
||||
// Symbolizes the supplied 'pc' using the format string 'fmt'.
|
||||
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
|
||||
// If 'out_buf' is not empty then output is zero or more non empty C strings
|
||||
// followed by single empty C string. Multiple strings can be returned if PC
|
||||
// corresponds to inlined function. Inlined frames are printed in the order
|
||||
// from "most-inlined" to the "least-inlined", so the last frame should be the
|
||||
// not inlined function.
|
||||
// Inlined frames can be removed with 'symbolize_inline_frames=0'.
|
||||
// The format syntax is described in
|
||||
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
|
||||
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
|
||||
|
@ -18,10 +18,10 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
// Record and dump coverage info.
|
||||
void __sanitizer_cov_dump();
|
||||
void __sanitizer_cov_dump(void);
|
||||
|
||||
// Clear collected coverage info.
|
||||
void __sanitizer_cov_reset();
|
||||
void __sanitizer_cov_reset(void);
|
||||
|
||||
// Dump collected coverage info. Sorts pcs by module into individual .sancov
|
||||
// files.
|
||||
|
@ -35,11 +35,11 @@ extern "C" {
|
||||
// This function can be called mid-run (or at the end of a run for
|
||||
// a server process that doesn't shut down normally) to request that
|
||||
// data for that point in the run be reported from the tool.
|
||||
void COMPILER_RT_WEAK __esan_report();
|
||||
void COMPILER_RT_WEAK __esan_report(void);
|
||||
|
||||
// This function returns the number of samples that the esan tool has collected
|
||||
// to this point. This is useful for testing.
|
||||
unsigned int COMPILER_RT_WEAK __esan_get_sample_count();
|
||||
unsigned int COMPILER_RT_WEAK __esan_get_sample_count(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
82
libsanitizer/include/sanitizer/hwasan_interface.h
Normal file
82
libsanitizer/include/sanitizer/hwasan_interface.h
Normal file
@ -0,0 +1,82 @@
|
||||
//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of HWAddressSanitizer.
|
||||
//
|
||||
// Public interface header.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_HWASAN_INTERFACE_H
|
||||
#define SANITIZER_HWASAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// Initialize shadow but not the rest of the runtime.
|
||||
// Does not call libc unless there is an error.
|
||||
// Can be called multiple times, or not at all (in which case shadow will
|
||||
// be initialized in compiler-inserted __hwasan_init() call).
|
||||
void __hwasan_shadow_init(void);
|
||||
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing HWASan runtime options. See asan_flags.h for details.
|
||||
const char* __hwasan_default_options(void);
|
||||
|
||||
void __hwasan_enable_allocator_tagging(void);
|
||||
void __hwasan_disable_allocator_tagging(void);
|
||||
|
||||
// Mark region of memory with the given tag. Both address and size need to be
|
||||
// 16-byte aligned.
|
||||
void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
|
||||
size_t size);
|
||||
|
||||
/// Set pointer tag. Previous tag is lost.
|
||||
void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
|
||||
|
||||
// Set memory tag from the current SP address to the given address to zero.
|
||||
// This is meant to annotate longjmp and other non-local jumps.
|
||||
// This function needs to know the (almost) exact destination frame address;
|
||||
// clearing shadow for the entire thread stack like __asan_handle_no_return
|
||||
// does would cause false reports.
|
||||
void __hwasan_handle_longjmp(const void *sp_dst);
|
||||
|
||||
// Libc hook for thread creation. Should be called in the child thread before
|
||||
// any instrumented code.
|
||||
void __hwasan_thread_enter();
|
||||
|
||||
// Libc hook for thread destruction. No instrumented code should run after
|
||||
// this call.
|
||||
void __hwasan_thread_exit();
|
||||
|
||||
// Print shadow and origin for the memory range to stderr in a human-readable
|
||||
// format.
|
||||
void __hwasan_print_shadow(const volatile void *x, size_t size);
|
||||
|
||||
// Print one-line report about the memory usage of the current process.
|
||||
void __hwasan_print_memory_usage();
|
||||
|
||||
int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
|
||||
void * __sanitizer_memalign(size_t alignment, size_t size);
|
||||
void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
|
||||
void * __sanitizer___libc_memalign(size_t alignment, size_t size);
|
||||
void * __sanitizer_valloc(size_t size);
|
||||
void * __sanitizer_pvalloc(size_t size);
|
||||
void __sanitizer_free(void *ptr);
|
||||
void __sanitizer_cfree(void *ptr);
|
||||
size_t __sanitizer_malloc_usable_size(const void *ptr);
|
||||
struct mallinfo __sanitizer_mallinfo();
|
||||
int __sanitizer_mallopt(int cmd, int value);
|
||||
void __sanitizer_malloc_stats(void);
|
||||
void * __sanitizer_calloc(size_t nmemb, size_t size);
|
||||
void * __sanitizer_realloc(void *ptr, size_t size);
|
||||
void * __sanitizer_malloc(size_t size);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_HWASAN_INTERFACE_H
|
@ -19,8 +19,8 @@ extern "C" {
|
||||
#endif
|
||||
// Allocations made between calls to __lsan_disable() and __lsan_enable() will
|
||||
// be treated as non-leaks. Disable/enable pairs may be nested.
|
||||
void __lsan_disable();
|
||||
void __lsan_enable();
|
||||
void __lsan_disable(void);
|
||||
void __lsan_enable(void);
|
||||
|
||||
// The heap object into which p points will be treated as a non-leak.
|
||||
void __lsan_ignore_object(const void *p);
|
||||
@ -47,7 +47,7 @@ extern "C" {
|
||||
// the time of first invocation of this function.
|
||||
// By calling this function early during process shutdown, you can instruct
|
||||
// LSan to ignore shutdown-only leaks which happen later on.
|
||||
void __lsan_do_leak_check();
|
||||
void __lsan_do_leak_check(void);
|
||||
|
||||
// Check for leaks now. Returns zero if no leaks have been found or if leak
|
||||
// detection is disabled, non-zero otherwise.
|
||||
@ -56,7 +56,7 @@ extern "C" {
|
||||
// terminate the process. It does not affect the behavior of
|
||||
// __lsan_do_leak_check() or the end-of-process leak check, and is not
|
||||
// affected by them.
|
||||
int __lsan_do_recoverable_leak_check();
|
||||
int __lsan_do_recoverable_leak_check(void);
|
||||
|
||||
// The user may optionally provide this function to disallow leak checking
|
||||
// for the program it is linked into (if the return value is non-zero). This
|
||||
@ -64,15 +64,15 @@ extern "C" {
|
||||
// that is unsupported.
|
||||
// To avoid dead stripping, you may need to define this function with
|
||||
// __attribute__((used))
|
||||
int __lsan_is_turned_off();
|
||||
int __lsan_is_turned_off(void);
|
||||
|
||||
// This function may be optionally provided by user and should return
|
||||
// a string containing LSan runtime options. See lsan_flags.inc for details.
|
||||
const char *__lsan_default_options();
|
||||
const char *__lsan_default_options(void);
|
||||
|
||||
// This function may be optionally provided by the user and should return
|
||||
// a string containing LSan suppressions.
|
||||
const char *__lsan_default_suppressions();
|
||||
const char *__lsan_default_suppressions(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
||||
|
@ -29,10 +29,10 @@ extern "C" {
|
||||
int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
|
||||
|
||||
/* Returns non-zero if tracking origins. */
|
||||
int __msan_get_track_origins();
|
||||
int __msan_get_track_origins(void);
|
||||
|
||||
/* Returns the origin id of the latest UMR in the calling thread. */
|
||||
uint32_t __msan_get_umr_origin();
|
||||
uint32_t __msan_get_umr_origin(void);
|
||||
|
||||
/* Make memory region fully initialized (without changing its contents). */
|
||||
void __msan_unpoison(const volatile void *a, size_t size);
|
||||
@ -80,7 +80,7 @@ extern "C" {
|
||||
void __msan_dump_shadow(const volatile void *x, size_t size);
|
||||
|
||||
/* Returns true if running under a dynamic tool (DynamoRio-based). */
|
||||
int __msan_has_dynamic_component();
|
||||
int __msan_has_dynamic_component(void);
|
||||
|
||||
/* Tell MSan about newly allocated memory (ex.: custom allocator).
|
||||
Memory will be marked uninitialized, with origin at the call site. */
|
||||
@ -91,7 +91,7 @@ extern "C" {
|
||||
|
||||
/* This function may be optionally provided by user and should return
|
||||
a string containing Msan runtime options. See msan_flags.h for details. */
|
||||
const char* __msan_default_options();
|
||||
const char* __msan_default_options(void);
|
||||
|
||||
/* Deprecated. Call __sanitizer_set_death_callback instead. */
|
||||
void __msan_set_death_callback(void (*callback)(void));
|
||||
@ -102,6 +102,14 @@ extern "C" {
|
||||
copy. Source and destination regions can overlap. */
|
||||
void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
|
||||
size_t size);
|
||||
|
||||
/* Disables uninitialized memory checks in interceptors. */
|
||||
void __msan_scoped_disable_interceptor_checks(void);
|
||||
|
||||
/* Re-enables uninitialized memory checks in interceptors after a previous
|
||||
call to __msan_scoped_disable_interceptor_checks. */
|
||||
void __msan_scoped_enable_interceptor_checks(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
4732
libsanitizer/include/sanitizer/netbsd_syscall_hooks.h
Normal file
4732
libsanitizer/include/sanitizer/netbsd_syscall_hooks.h
Normal file
File diff suppressed because it is too large
Load Diff
37
libsanitizer/include/sanitizer/scudo_interface.h
Normal file
37
libsanitizer/include/sanitizer/scudo_interface.h
Normal file
@ -0,0 +1,37 @@
|
||||
//===-- sanitizer/scudo_interface.h -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// Public Scudo interface header.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_SCUDO_INTERFACE_H_
|
||||
#define SANITIZER_SCUDO_INTERFACE_H_
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
// This function may be optionally provided by a user and should return
|
||||
// a string containing Scudo runtime options. See scudo_flags.h for details.
|
||||
const char* __scudo_default_options(void);
|
||||
|
||||
// This function allows to set the RSS limit at runtime. This can be either
|
||||
// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
|
||||
// can be removed by setting LimitMb to 0. This function's parameters should
|
||||
// be fully trusted to avoid security mishaps.
|
||||
void __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
|
||||
|
||||
// This function outputs various allocator statistics for both the Primary
|
||||
// and Secondary allocators, including memory usage, number of allocations
|
||||
// and deallocations.
|
||||
void __scudo_print_stats(void);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_SCUDO_INTERFACE_H_
|
@ -42,6 +42,11 @@ const unsigned __tsan_mutex_linker_init = 1 << 0;
|
||||
const unsigned __tsan_mutex_write_reentrant = 1 << 1;
|
||||
// Mutex is read reentrant.
|
||||
const unsigned __tsan_mutex_read_reentrant = 1 << 2;
|
||||
// Mutex does not have static storage duration, and must not be used after
|
||||
// its destructor runs. The opposite of __tsan_mutex_linker_init.
|
||||
// If this flag is passed to __tsan_mutex_destroy, then the destruction
|
||||
// is ignored unless this flag was previously set on the mutex.
|
||||
const unsigned __tsan_mutex_not_static = 1 << 8;
|
||||
|
||||
// Mutex operation flags:
|
||||
|
||||
@ -68,6 +73,7 @@ void __tsan_mutex_create(void *addr, unsigned flags);
|
||||
// Annotate destruction of a mutex.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_linker_init
|
||||
// - __tsan_mutex_not_static
|
||||
void __tsan_mutex_destroy(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start of lock operation.
|
||||
|
@ -13,19 +13,21 @@
|
||||
#ifndef INTERCEPTION_H
|
||||
#define INTERCEPTION_H
|
||||
|
||||
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__) && \
|
||||
!defined(__NetBSD__) && !defined(_WIN32) && !defined(__Fuchsia__)
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
|
||||
!SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \
|
||||
!SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
|
||||
# error "Interception doesn't work on this operating system."
|
||||
#endif
|
||||
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
// These typedefs should be used only in the interceptor definitions to replace
|
||||
// the standard system types (e.g. SSIZE_T instead of ssize_t)
|
||||
typedef __sanitizer::uptr SIZE_T;
|
||||
typedef __sanitizer::sptr SSIZE_T;
|
||||
typedef __sanitizer::sptr PTRDIFF_T;
|
||||
typedef __sanitizer::s64 INTMAX_T;
|
||||
typedef __sanitizer::u64 UINTMAX_T;
|
||||
typedef __sanitizer::OFF_T OFF_T;
|
||||
typedef __sanitizer::OFF64_T OFF64_T;
|
||||
|
||||
@ -85,7 +87,7 @@ typedef __sanitizer::OFF64_T OFF64_T;
|
||||
// As it's decided at compile time which functions are to be intercepted on Mac,
|
||||
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#if SANITIZER_MAC
|
||||
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
|
||||
|
||||
// Just a pair of pointers.
|
||||
@ -119,7 +121,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
# define INTERCEPTOR_ATTRIBUTE
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...)
|
||||
|
||||
#elif defined(_WIN32)
|
||||
#elif SANITIZER_WINDOWS
|
||||
# define WRAP(x) __asan_wrap_##x
|
||||
# define WRAPPER_NAME(x) "__asan_wrap_"#x
|
||||
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
|
||||
@ -127,7 +129,12 @@ const interpose_substitution substitution_##func_name[] \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
||||
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
|
||||
#elif defined(__FreeBSD__) || defined(__NetBSD__)
|
||||
#elif SANITIZER_RTEMS
|
||||
# define WRAP(x) x
|
||||
# define WRAPPER_NAME(x) #x
|
||||
# define INTERCEPTOR_ATTRIBUTE
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...)
|
||||
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define WRAPPER_NAME(x) "__interceptor_" #x
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
@ -137,7 +144,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) \
|
||||
__attribute__((alias("__interceptor_" #func), visibility("default")));
|
||||
#elif !defined(__Fuchsia__)
|
||||
#elif !SANITIZER_FUCHSIA
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define WRAPPER_NAME(x) "__interceptor_" #x
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
@ -146,7 +153,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((weak, alias("__interceptor_" #func), visibility("default")));
|
||||
#endif
|
||||
|
||||
#if defined(__Fuchsia__)
|
||||
#if SANITIZER_FUCHSIA
|
||||
// There is no general interception at all on Fuchsia.
|
||||
// Sanitizer runtimes just define functions directly to preempt them,
|
||||
// and have bespoke ways to access the underlying libc functions.
|
||||
@ -154,10 +161,14 @@ const interpose_substitution substitution_##func_name[] \
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# define REAL(x) __unsanitized_##x
|
||||
# define DECLARE_REAL(ret_type, func, ...)
|
||||
#elif !defined(__APPLE__)
|
||||
#elif SANITIZER_RTEMS
|
||||
# define REAL(x) __real_ ## x
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
extern "C" ret_type REAL(func)(__VA_ARGS__);
|
||||
#elif !SANITIZER_MAC
|
||||
# define PTR_TO_REAL(x) real_##x
|
||||
# define REAL(x) __interception::PTR_TO_REAL(x)
|
||||
# define FUNC_TYPE(x) x##_f
|
||||
# define FUNC_TYPE(x) x##_type
|
||||
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
@ -165,14 +176,14 @@ const interpose_substitution substitution_##func_name[] \
|
||||
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
}
|
||||
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
|
||||
#else // __APPLE__
|
||||
#else // SANITIZER_MAC
|
||||
# define REAL(x) x
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define ASSIGN_REAL(x, y)
|
||||
#endif // __APPLE__
|
||||
#endif // SANITIZER_MAC
|
||||
|
||||
#if !defined(__Fuchsia__)
|
||||
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__);
|
||||
@ -184,7 +195,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
// macros does its job. In exceptional cases you may need to call REAL(foo)
|
||||
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
|
||||
// foo with an interceptor for other function.
|
||||
#if !defined(__APPLE__) && !defined(__Fuchsia__)
|
||||
#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
|
||||
# define DEFINE_REAL(ret_type, func, ...) \
|
||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
@ -194,7 +205,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
# define DEFINE_REAL(ret_type, func, ...)
|
||||
#endif
|
||||
|
||||
#if defined(__Fuchsia__)
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
// We need to define the __interceptor_func name just to get
|
||||
// sanitizer_common/scripts/gen_dynamic_list.py to export func.
|
||||
@ -204,7 +215,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
__interceptor_##func(__VA_ARGS__); \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
|
||||
|
||||
#elif !defined(__APPLE__)
|
||||
#elif !SANITIZER_MAC
|
||||
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
@ -217,7 +228,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
|
||||
INTERCEPTOR(ret_type, func, __VA_ARGS__)
|
||||
|
||||
#else // __APPLE__
|
||||
#else // SANITIZER_MAC
|
||||
|
||||
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) suffix; \
|
||||
@ -236,7 +247,7 @@ const interpose_substitution substitution_##func_name[] \
|
||||
INTERPOSER_2(overridee, WRAP(overrider))
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#if SANITIZER_WINDOWS
|
||||
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
|
||||
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
@ -262,17 +273,19 @@ typedef unsigned long uptr; // NOLINT
|
||||
|
||||
#define INCLUDED_FROM_INTERCEPTION_LIB
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_OPENBSD || SANITIZER_SOLARIS
|
||||
|
||||
# include "interception_linux.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
|
||||
#elif defined(__APPLE__)
|
||||
#elif SANITIZER_MAC
|
||||
# include "interception_mac.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
INTERCEPT_FUNCTION_VER_MAC(func, symver)
|
||||
#elif defined(_WIN32)
|
||||
#elif SANITIZER_WINDOWS
|
||||
# include "interception_win.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
|
@ -10,32 +10,44 @@
|
||||
// Linux-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_OPENBSD || SANITIZER_SOLARIS
|
||||
|
||||
#include <dlfcn.h> // for dlsym() and dlvsym()
|
||||
|
||||
#ifdef __NetBSD__
|
||||
#if SANITIZER_NETBSD
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#endif
|
||||
|
||||
namespace __interception {
|
||||
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
|
||||
uptr real, uptr wrapper) {
|
||||
#ifdef __NetBSD__
|
||||
#if SANITIZER_NETBSD
|
||||
// XXX: Find a better way to handle renames
|
||||
if (internal_strcmp(func_name, "sigaction") == 0) func_name = "__sigaction14";
|
||||
#endif
|
||||
*func_addr = (uptr)dlsym(RTLD_NEXT, func_name);
|
||||
if (!*func_addr) {
|
||||
// If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
|
||||
// later in the library search order than the DSO that we are trying to
|
||||
// intercept, which means that we cannot intercept this function. We still
|
||||
// want the address of the real definition, though, so look it up using
|
||||
// RTLD_DEFAULT.
|
||||
*func_addr = (uptr)dlsym(RTLD_DEFAULT, func_name);
|
||||
}
|
||||
return real == wrapper;
|
||||
}
|
||||
|
||||
#if !defined(__ANDROID__) // android does not have dlvsym
|
||||
// Android and Solaris do not have dlvsym
|
||||
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
|
||||
void *GetFuncAddrVer(const char *func_name, const char *ver) {
|
||||
return dlvsym(RTLD_NEXT, func_name, ver);
|
||||
}
|
||||
#endif // !defined(__ANDROID__)
|
||||
#endif // !SANITIZER_ANDROID
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#endif // __linux__ || __FreeBSD__ || __NetBSD__
|
||||
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
|
||||
// SANITIZER_OPENBSD || SANITIZER_SOLARIS
|
||||
|
@ -10,7 +10,8 @@
|
||||
// Linux-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_OPENBSD || SANITIZER_SOLARIS
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_linux.h should be included from interception library only"
|
||||
@ -32,14 +33,16 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
|
||||
(::__interception::uptr) & (func), \
|
||||
(::__interception::uptr) & WRAP(func))
|
||||
|
||||
#if !defined(__ANDROID__) // android does not have dlvsym
|
||||
// Android, Solaris and OpenBSD do not have dlvsym
|
||||
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
(::__interception::real_##func = (func##_f)( \
|
||||
(::__interception::real_##func = (func##_type)( \
|
||||
unsigned long)::__interception::GetFuncAddrVer(#func, symver))
|
||||
#else
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
#endif // !defined(__ANDROID__)
|
||||
#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
|
||||
|
||||
#endif // INTERCEPTION_LINUX_H
|
||||
#endif // __linux__ || __FreeBSD__ || __NetBSD__
|
||||
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
|
||||
// SANITIZER_OPENBSD || SANITIZER_SOLARIS
|
||||
|
@ -10,9 +10,8 @@
|
||||
// Mac-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_MAC
|
||||
|
||||
#endif // __APPLE__
|
||||
#endif // SANITIZER_MAC
|
||||
|
@ -10,7 +10,7 @@
|
||||
// Mac-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef __APPLE__
|
||||
#if SANITIZER_MAC
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_mac.h should be included from interception.h only"
|
||||
@ -23,4 +23,4 @@
|
||||
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
|
||||
|
||||
#endif // INTERCEPTION_MAC_H
|
||||
#endif // __APPLE__
|
||||
#endif // SANITIZER_MAC
|
||||
|
@ -10,9 +10,10 @@
|
||||
// Compile-time tests of the internal type definitions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_MAC
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
@ -22,14 +23,14 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
|
||||
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
|
||||
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
|
||||
|
||||
#ifndef __APPLE__
|
||||
#if !SANITIZER_MAC
|
||||
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
|
||||
#endif
|
||||
|
||||
// The following are the cases when pread (and friends) is used instead of
|
||||
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
|
||||
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
|
||||
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
|
||||
# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
|
||||
_FILE_OFFSET_BITS != 64
|
||||
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
|
||||
# endif
|
||||
|
@ -123,9 +123,9 @@
|
||||
// addr2: .bytes <body>
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
@ -221,8 +221,8 @@ static bool IsMemoryPadding(uptr address, uptr size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static const u8 kHintNop9Bytes[] = {
|
||||
0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
static const u8 kHintNop8Bytes[] = {
|
||||
0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||
};
|
||||
|
||||
template<class T>
|
||||
@ -237,8 +237,8 @@ static bool FunctionHasPrefix(uptr address, const T &pattern) {
|
||||
static bool FunctionHasPadding(uptr address, uptr size) {
|
||||
if (IsMemoryPadding(address - size, size))
|
||||
return true;
|
||||
if (size <= sizeof(kHintNop9Bytes) &&
|
||||
FunctionHasPrefix(address, kHintNop9Bytes))
|
||||
if (size <= sizeof(kHintNop8Bytes) &&
|
||||
FunctionHasPrefix(address, kHintNop8Bytes))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -451,6 +451,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
}
|
||||
|
||||
switch (*(u16*)(address)) {
|
||||
case 0x018A: // 8A 01 : mov al, byte ptr [ecx]
|
||||
case 0xFF8B: // 8B FF : mov edi, edi
|
||||
case 0xEC8B: // 8B EC : mov ebp, esp
|
||||
case 0xc889: // 89 C8 : mov eax, ecx
|
||||
@ -551,7 +552,10 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
|
||||
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
|
||||
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
|
||||
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
|
||||
return 5;
|
||||
case 0x24648348: // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY
|
||||
return 6;
|
||||
}
|
||||
|
||||
#else
|
||||
@ -830,6 +834,7 @@ bool OverrideFunction(
|
||||
static void **InterestingDLLsAvailable() {
|
||||
static const char *InterestingDLLs[] = {
|
||||
"kernel32.dll",
|
||||
"msvcr100.dll", // VS2010
|
||||
"msvcr110.dll", // VS2012
|
||||
"msvcr120.dll", // VS2013
|
||||
"vcruntime140.dll", // VS2015
|
||||
@ -1007,4 +1012,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#endif // _WIN32
|
||||
#endif // SANITIZER_MAC
|
||||
|
@ -10,7 +10,7 @@
|
||||
// Windows-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef _WIN32
|
||||
#if SANITIZER_WINDOWS
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_win.h should be included from interception library only"
|
||||
@ -79,4 +79,4 @@ void TestOnlyReleaseTrampolineRegions();
|
||||
(::__interception::uptr *)&REAL(func))
|
||||
|
||||
#endif // INTERCEPTION_WIN_H
|
||||
#endif // _WIN32
|
||||
#endif // SANITIZER_WINDOWS
|
||||
|
@ -64,16 +64,17 @@ static void InitializeFlags() {
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
if (common_flags()->help) parser.PrintFlagDescriptions();
|
||||
|
||||
__sanitizer_set_report_path(common_flags()->log_path);
|
||||
}
|
||||
|
||||
static void OnStackUnwind(const SignalContext &sig, const void *,
|
||||
BufferedStackTrace *stack) {
|
||||
GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
|
||||
sig.context,
|
||||
common_flags()->fast_unwind_on_fatal);
|
||||
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
|
||||
common_flags()->fast_unwind_on_fatal);
|
||||
}
|
||||
|
||||
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
|
||||
nullptr);
|
||||
}
|
||||
|
@ -16,9 +16,8 @@
|
||||
|
||||
#define GET_STACK_TRACE(max_size, fast) \
|
||||
__sanitizer::BufferedStackTrace stack; \
|
||||
GetStackTraceWithPcBpAndContext(&stack, max_size, \
|
||||
StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), nullptr, fast);
|
||||
GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
|
||||
GET_CURRENT_FRAME(), nullptr, fast);
|
||||
|
||||
#define GET_STACK_TRACE_FATAL \
|
||||
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
|
||||
@ -44,10 +43,9 @@ void ReplaceSystemMalloc();
|
||||
// The pc will be in the position 0 of the resulting stack trace.
|
||||
// The bp may refer to the current frame or to the caller's frame.
|
||||
ALWAYS_INLINE
|
||||
void GetStackTraceWithPcBpAndContext(__sanitizer::BufferedStackTrace *stack,
|
||||
__sanitizer::uptr max_depth,
|
||||
__sanitizer::uptr pc, __sanitizer::uptr bp,
|
||||
void *context, bool fast) {
|
||||
void GetStackTrace(__sanitizer::BufferedStackTrace *stack,
|
||||
__sanitizer::uptr max_depth, __sanitizer::uptr pc,
|
||||
__sanitizer::uptr bp, void *context, bool fast) {
|
||||
uptr stack_top = 0, stack_bottom = 0;
|
||||
ThreadContext *t;
|
||||
if (fast && (t = CurrentThreadContext())) {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_report.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
@ -68,15 +69,27 @@ static void RegisterDeallocation(void *p) {
|
||||
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
|
||||
if (AllocatorMayReturnNull()) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
|
||||
return nullptr;
|
||||
}
|
||||
ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
|
||||
}
|
||||
|
||||
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
bool cleared) {
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
}
|
||||
if (size > kMaxAllowedMallocSize)
|
||||
return ReportAllocationSizeTooBig(size, stack);
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
|
||||
if (UNLIKELY(!p)) {
|
||||
SetAllocatorOutOfMemory();
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportOutOfMemory(size, &stack);
|
||||
}
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
@ -87,8 +100,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
}
|
||||
|
||||
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
|
||||
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportCallocOverflow(nmemb, size, &stack);
|
||||
}
|
||||
size *= nmemb;
|
||||
return Allocate(stack, size, 1, true);
|
||||
}
|
||||
@ -104,9 +120,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
||||
uptr alignment) {
|
||||
RegisterDeallocation(p);
|
||||
if (new_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
return ReportAllocationSizeTooBig(new_size, stack);
|
||||
}
|
||||
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
|
||||
RegisterAllocation(stack, p, new_size);
|
||||
@ -124,10 +139,38 @@ uptr GetMallocUsableSize(const void *p) {
|
||||
return m->requested_size;
|
||||
}
|
||||
|
||||
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
const StackTrace &stack) {
|
||||
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
|
||||
if (AllocatorMayReturnNull())
|
||||
return errno_EINVAL;
|
||||
ReportInvalidPosixMemalignAlignment(alignment, &stack);
|
||||
}
|
||||
void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
if (UNLIKELY(!ptr))
|
||||
// OOM error is already taken care of by Allocate.
|
||||
return errno_ENOMEM;
|
||||
CHECK(IsAligned((uptr)ptr, alignment));
|
||||
*memptr = ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
|
||||
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
|
||||
errno = errno_EINVAL;
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
|
||||
}
|
||||
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
|
||||
}
|
||||
|
||||
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
|
||||
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
|
||||
errno = errno_EINVAL;
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportInvalidAllocationAlignment(alignment, &stack);
|
||||
}
|
||||
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
|
||||
}
|
||||
@ -153,6 +196,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) {
|
||||
Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
|
||||
}
|
||||
|
||||
void *lsan_pvalloc(uptr size, const StackTrace &stack) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
|
||||
errno = errno_ENOMEM;
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportPvallocOverflow(size, &stack);
|
||||
}
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = size ? RoundUpTo(size, PageSize) : PageSize;
|
||||
return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
|
||||
}
|
||||
|
||||
uptr lsan_mz_size(const void *p) {
|
||||
return GetMallocUsableSize(p);
|
||||
}
|
||||
|
@ -66,9 +66,16 @@ struct AP32 {
|
||||
};
|
||||
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
|
||||
#elif defined(__x86_64__) || defined(__powerpc64__)
|
||||
# if defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = 0xa0000000000ULL;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
# endif
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kSpaceBeg = kAllocatorSpace;
|
||||
static const uptr kSpaceSize = kAllocatorSize;
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
@ -81,12 +88,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
|
||||
AllocatorCache *GetAllocatorCache();
|
||||
|
||||
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
const StackTrace &stack);
|
||||
void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
|
||||
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
|
||||
void *lsan_malloc(uptr size, const StackTrace &stack);
|
||||
void lsan_free(void *p);
|
||||
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
|
||||
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
|
||||
void *lsan_valloc(uptr size, const StackTrace &stack);
|
||||
void *lsan_pvalloc(uptr size, const StackTrace &stack);
|
||||
uptr lsan_mz_size(const void *p);
|
||||
|
||||
} // namespace __lsan
|
||||
|
@ -13,14 +13,15 @@
|
||||
#include "lsan_common.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_suppressions.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
@ -102,7 +103,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
|
||||
void InitializeRootRegions() {
|
||||
CHECK(!root_regions);
|
||||
ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
|
||||
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
|
||||
root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT
|
||||
}
|
||||
|
||||
const char *MaybeCallLsanDefaultOptions() {
|
||||
@ -212,9 +213,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
|
||||
// Scans thread data (stacks and TLS) for heap pointers.
|
||||
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
Frontier *frontier) {
|
||||
InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
|
||||
InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
|
||||
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
|
||||
uptr registers_end = registers_begin + registers.size();
|
||||
uptr registers_end =
|
||||
reinterpret_cast<uptr>(registers.data() + registers.size());
|
||||
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
|
||||
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
|
||||
LOG_THREADS("Processing thread %d.\n", os_id);
|
||||
@ -409,8 +411,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
// On Linux, handles dynamically allocated TLS blocks by treating all chunks
|
||||
// allocated from ld-linux.so as reachable.
|
||||
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||
// modules accounting etc.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
@ -441,7 +444,7 @@ void ProcessPC(Frontier *frontier) {
|
||||
// Sets the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||
// Holds the flood fill frontier.
|
||||
Frontier frontier(1);
|
||||
Frontier frontier;
|
||||
|
||||
ForEachChunk(CollectIgnoredCb, &frontier);
|
||||
ProcessGlobalRegions(&frontier);
|
||||
@ -503,7 +506,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
|
||||
}
|
||||
|
||||
static void PrintMatchedSuppressions() {
|
||||
InternalMmapVector<Suppression *> matched(1);
|
||||
InternalMmapVector<Suppression *> matched;
|
||||
GetSuppressionContext()->GetMatched(&matched);
|
||||
if (!matched.size())
|
||||
return;
|
||||
@ -522,11 +525,36 @@ struct CheckForLeaksParam {
|
||||
LeakReport leak_report;
|
||||
};
|
||||
|
||||
static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
|
||||
const InternalMmapVector<tid_t> &suspended_threads =
|
||||
*(const InternalMmapVector<tid_t> *)arg;
|
||||
if (tctx->status == ThreadStatusRunning) {
|
||||
uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
|
||||
tctx->os_id, CompareLess<int>());
|
||||
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
|
||||
Report("Running thread %d was not suspended. False leaks are possible.\n",
|
||||
tctx->os_id);
|
||||
};
|
||||
}
|
||||
|
||||
static void ReportUnsuspendedThreads(
|
||||
const SuspendedThreadsList &suspended_threads) {
|
||||
InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
|
||||
for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
|
||||
threads[i] = suspended_threads.GetThreadID(i);
|
||||
|
||||
Sort(threads.data(), threads.size());
|
||||
|
||||
GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
|
||||
&ReportIfNotSuspended, &threads);
|
||||
}
|
||||
|
||||
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
||||
void *arg) {
|
||||
CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
|
||||
CHECK(param);
|
||||
CHECK(!param->success);
|
||||
ReportUnsuspendedThreads(suspended_threads);
|
||||
ClassifyAllChunks(suspended_threads);
|
||||
ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
||||
// Clean up for subsequent leak checks. This assumes we did not overwrite any
|
||||
@ -681,7 +709,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
|
||||
uptr unsuppressed_count = UnsuppressedLeakCount();
|
||||
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
|
||||
Printf("The %zu top leak(s):\n", num_leaks_to_report);
|
||||
InternalSort(&leaks_, leaks_.size(), LeakComparator);
|
||||
Sort(leaks_.data(), leaks_.size(), &LeakComparator);
|
||||
uptr leaks_reported = 0;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
if (leaks_[i].is_suppressed) continue;
|
||||
|
@ -25,9 +25,9 @@
|
||||
// because of "small" (4 bytes) pointer size that leads to high false negative
|
||||
// ratio on large leaks. But we still want to have it for some 32 bit arches
|
||||
// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
|
||||
// To enable LeakSanitizer on new architecture, one need to implement
|
||||
// internal_clone function as well as (probably) adjust TLS machinery for
|
||||
// new architecture inside sanitizer library.
|
||||
// To enable LeakSanitizer on a new architecture, one needs to implement the
|
||||
// internal_clone function as well as (probably) adjust the TLS machinery for
|
||||
// the new architecture inside the sanitizer library.
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
|
||||
(SANITIZER_WORDSIZE == 64) && \
|
||||
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
|
||||
@ -45,6 +45,7 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
class FlagParser;
|
||||
class ThreadRegistry;
|
||||
struct DTLS;
|
||||
}
|
||||
|
||||
@ -93,7 +94,7 @@ struct LeakedObject {
|
||||
// Aggregates leaks by stack trace prefix.
|
||||
class LeakReport {
|
||||
public:
|
||||
LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
|
||||
LeakReport() {}
|
||||
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
|
||||
ChunkTag tag);
|
||||
void ReportTopLeaks(uptr max_leaks);
|
||||
@ -101,12 +102,11 @@ class LeakReport {
|
||||
void ApplySuppressions();
|
||||
uptr UnsuppressedLeakCount();
|
||||
|
||||
|
||||
private:
|
||||
void PrintReportForLeak(uptr index);
|
||||
void PrintLeakedObjectsForLeak(uptr index);
|
||||
|
||||
u32 next_id_;
|
||||
u32 next_id_ = 0;
|
||||
InternalMmapVector<Leak> leaks_;
|
||||
InternalMmapVector<LeakedObject> leaked_objects_;
|
||||
};
|
||||
@ -203,6 +203,7 @@ bool WordIsPoisoned(uptr addr);
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry();
|
||||
void UnlockThreadRegistry();
|
||||
ThreadRegistry *GetThreadRegistryLocked();
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls);
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_getauxval.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
||||
@ -28,8 +29,12 @@ static const char kLinkerName[] = "ld";
|
||||
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
|
||||
static LoadedModule *linker = nullptr;
|
||||
|
||||
static bool IsLinker(const char* full_name) {
|
||||
return LibraryNameIs(full_name, kLinkerName);
|
||||
static bool IsLinker(const LoadedModule& module) {
|
||||
#if SANITIZER_USE_GETAUXVAL
|
||||
return module.base_address() == getauxval(AT_BASE);
|
||||
#else
|
||||
return LibraryNameIs(module.full_name(), kLinkerName);
|
||||
#endif // SANITIZER_USE_GETAUXVAL
|
||||
}
|
||||
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
@ -47,22 +52,25 @@ void InitializePlatformSpecificModules() {
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
for (LoadedModule &module : modules) {
|
||||
if (!IsLinker(module.full_name())) continue;
|
||||
if (!IsLinker(module))
|
||||
continue;
|
||||
if (linker == nullptr) {
|
||||
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
||||
*linker = module;
|
||||
module = LoadedModule();
|
||||
} else {
|
||||
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
||||
"TLS will not be handled correctly.\n", kLinkerName);
|
||||
"TLS and other allocations originating from linker might be "
|
||||
"falsely reported as leaks.\n", kLinkerName);
|
||||
linker->clear();
|
||||
linker = nullptr;
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (linker == nullptr) {
|
||||
VReport(1, "LeakSanitizer: Dynamic linker not found. "
|
||||
"TLS will not be handled correctly.\n");
|
||||
VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
|
||||
"allocations originating from linker might be falsely reported "
|
||||
"as leaks.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,8 @@ void ProcessGlobalRegions(Frontier *frontier) {
|
||||
for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
|
||||
|
||||
MemoryMappingLayout memory_mapping(false);
|
||||
InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
|
||||
InternalMmapVector<LoadedModule> modules;
|
||||
modules.reserve(128);
|
||||
memory_mapping.DumpListOfModules(&modules);
|
||||
for (uptr i = 0; i < modules.size(); ++i) {
|
||||
// Even when global scanning is disabled, we still need to scan
|
||||
@ -139,12 +140,6 @@ void ProcessGlobalRegions(Frontier *frontier) {
|
||||
}
|
||||
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
mach_port_name_t port;
|
||||
if (task_for_pid(mach_task_self(), internal_getpid(), &port)
|
||||
!= KERN_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned depth = 1;
|
||||
vm_size_t size = 0;
|
||||
vm_address_t address = 0;
|
||||
@ -155,7 +150,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
|
||||
while (err == KERN_SUCCESS) {
|
||||
struct vm_region_submap_info_64 info;
|
||||
err = vm_region_recurse_64(port, &address, &size, &depth,
|
||||
err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
|
||||
(vm_region_info_t)&info, &count);
|
||||
|
||||
uptr end_address = address + size;
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_report.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
@ -84,9 +85,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) {
|
||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
*memptr = lsan_memalign(alignment, size, stack);
|
||||
// FIXME: Return ENOMEM if user requested more than max alloc size.
|
||||
return 0;
|
||||
return lsan_posix_memalign(memptr, alignment, size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, valloc, uptr size) {
|
||||
@ -121,7 +120,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
|
||||
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return lsan_memalign(alignment, size, stack);
|
||||
return lsan_aligned_alloc(alignment, size, stack);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
|
||||
#else
|
||||
@ -164,13 +163,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
|
||||
INTERCEPTOR(void*, pvalloc, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
size = RoundUpTo(size, PageSize);
|
||||
if (size == 0) {
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = PageSize;
|
||||
}
|
||||
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
|
||||
return lsan_pvalloc(size, stack);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
|
||||
#else
|
||||
@ -200,21 +193,21 @@ INTERCEPTOR(int, mprobe, void *ptr) {
|
||||
|
||||
|
||||
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
|
||||
#define OPERATOR_NEW_BODY(nothrow) \
|
||||
ENSURE_LSAN_INITED; \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *res = lsan_malloc(size, stack); \
|
||||
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
|
||||
#define OPERATOR_NEW_BODY(nothrow)\
|
||||
ENSURE_LSAN_INITED;\
|
||||
GET_STACK_TRACE_MALLOC;\
|
||||
void *res = lsan_malloc(size, stack);\
|
||||
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
|
||||
return res;
|
||||
#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
|
||||
ENSURE_LSAN_INITED; \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *res = lsan_memalign((uptr)align, size, stack); \
|
||||
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
|
||||
#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
|
||||
ENSURE_LSAN_INITED;\
|
||||
GET_STACK_TRACE_MALLOC;\
|
||||
void *res = lsan_memalign((uptr)align, size, stack);\
|
||||
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
|
||||
return res;
|
||||
|
||||
#define OPERATOR_DELETE_BODY \
|
||||
ENSURE_LSAN_INITED; \
|
||||
#define OPERATOR_DELETE_BODY\
|
||||
ENSURE_LSAN_INITED;\
|
||||
lsan_free(ptr);
|
||||
|
||||
// On OS X it's not enough to just provide our own 'operator new' and
|
||||
@ -307,6 +300,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
|
||||
|
||||
///// Thread initialization and finalization. /////
|
||||
|
||||
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
|
||||
static unsigned g_thread_finalize_key;
|
||||
|
||||
static void thread_finalize(void *v) {
|
||||
@ -320,6 +314,29 @@ static void thread_finalize(void *v) {
|
||||
}
|
||||
ThreadFinish();
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_NETBSD
|
||||
INTERCEPTOR(void, _lwp_exit) {
|
||||
ENSURE_LSAN_INITED;
|
||||
ThreadFinish();
|
||||
REAL(_lwp_exit)();
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
|
||||
#else
|
||||
#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_THR_EXIT
|
||||
INTERCEPTOR(void, thr_exit, tid_t *state) {
|
||||
ENSURE_LSAN_INITED;
|
||||
ThreadFinish();
|
||||
REAL(thr_exit)(state);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
|
||||
#else
|
||||
#define LSAN_MAYBE_INTERCEPT_THR_EXIT
|
||||
#endif
|
||||
|
||||
struct ThreadParam {
|
||||
void *(*callback)(void *arg);
|
||||
@ -333,11 +350,13 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
|
||||
void *param = p->param;
|
||||
// Wait until the last iteration to maximize the chance that we are the last
|
||||
// destructor to run.
|
||||
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
|
||||
if (pthread_setspecific(g_thread_finalize_key,
|
||||
(void*)GetPthreadDestructorIterations())) {
|
||||
Report("LeakSanitizer: failed to set thread key.\n");
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
int tid = 0;
|
||||
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
|
||||
internal_sched_yield();
|
||||
@ -425,10 +444,15 @@ void InitializeInterceptors() {
|
||||
INTERCEPT_FUNCTION(pthread_join);
|
||||
INTERCEPT_FUNCTION(_exit);
|
||||
|
||||
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
|
||||
LSAN_MAYBE_INTERCEPT_THR_EXIT;
|
||||
|
||||
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
|
||||
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
|
||||
Report("LeakSanitizer: failed to create thread key.\n");
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
@ -35,6 +35,9 @@ using namespace __lsan;
|
||||
#define COMMON_MALLOC_CALLOC(count, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_calloc(count, size, stack)
|
||||
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
int res = lsan_posix_memalign(memptr, alignment, size, stack)
|
||||
#define COMMON_MALLOC_VALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_valloc(size, stack)
|
||||
|
@ -153,4 +153,9 @@ void UnlockThreadRegistry() {
|
||||
thread_registry->Unlock();
|
||||
}
|
||||
|
||||
ThreadRegistry *GetThreadRegistryLocked() {
|
||||
thread_registry->CheckLocked();
|
||||
return thread_registry;
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
5
libsanitizer/sanitizer_common/sancov_begin.S
Normal file
5
libsanitizer/sanitizer_common/sancov_begin.S
Normal file
@ -0,0 +1,5 @@
|
||||
.type __start___sancov_guards,@object
|
||||
.globl __start___sancov_guards
|
||||
.section __sancov_guards,"aw",@progbits
|
||||
.p2align 2
|
||||
__start___sancov_guards:
|
5
libsanitizer/sanitizer_common/sancov_end.S
Normal file
5
libsanitizer/sanitizer_common/sancov_end.S
Normal file
@ -0,0 +1,5 @@
|
||||
.type __stop___sancov_guards,@object
|
||||
.globl __stop___sancov_guards
|
||||
.section __sancov_guards,"aw",@progbits
|
||||
.p2align 2
|
||||
__stop___sancov_guards:
|
@ -19,6 +19,10 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Default allocator names.
|
||||
const char *PrimaryAllocatorName = "SizeClassAllocator";
|
||||
const char *SecondaryAllocatorName = "LargeMmapAllocator";
|
||||
|
||||
// ThreadSanitizer for Go uses libc malloc/free.
|
||||
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
|
||||
# if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
@ -134,12 +138,19 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
|
||||
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
|
||||
|
||||
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
||||
SetAllocatorOutOfMemory();
|
||||
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
|
||||
"0x%zx bytes\n", SanitizerToolName, requested_size);
|
||||
Die();
|
||||
}
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
|
||||
if (size + sizeof(u64) < size)
|
||||
return nullptr;
|
||||
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
if (UNLIKELY(!p))
|
||||
ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
|
||||
((u64*)p)[0] = kBlockMagic;
|
||||
return (char*)p + sizeof(u64);
|
||||
}
|
||||
@ -153,16 +164,21 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||
size = size + sizeof(u64);
|
||||
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
|
||||
void *p = RawInternalRealloc(addr, size, cache);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
if (UNLIKELY(!p))
|
||||
ReportInternalAllocatorOutOfMemory(size);
|
||||
return (char*)p + sizeof(u64);
|
||||
}
|
||||
|
||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||
if (UNLIKELY(CheckForCallocOverflow(count, size)))
|
||||
return InternalAllocator::FailureHandler::OnBadRequest();
|
||||
if (UNLIKELY(CheckForCallocOverflow(count, size))) {
|
||||
Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n", SanitizerToolName, count,
|
||||
size);
|
||||
Die();
|
||||
}
|
||||
void *p = InternalAlloc(count * size, cache);
|
||||
if (p) internal_memset(p, 0, count * size);
|
||||
if (LIKELY(p))
|
||||
internal_memset(p, 0, count * size);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -176,11 +192,13 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||
}
|
||||
|
||||
// LowLevelAllocator
|
||||
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
|
||||
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
|
||||
static LowLevelAllocateCallback low_level_alloc_callback;
|
||||
|
||||
void *LowLevelAllocator::Allocate(uptr size) {
|
||||
// Align allocation size.
|
||||
size = RoundUpTo(size, 8);
|
||||
size = RoundUpTo(size, low_level_alloc_min_alignment);
|
||||
if (allocated_end_ - allocated_current_ < (sptr)size) {
|
||||
uptr size_to_allocate = Max(size, GetPageSizeCached());
|
||||
allocated_current_ =
|
||||
@ -197,10 +215,17 @@ void *LowLevelAllocator::Allocate(uptr size) {
|
||||
return res;
|
||||
}
|
||||
|
||||
void SetLowLevelAllocateMinAlignment(uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
|
||||
}
|
||||
|
||||
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
|
||||
low_level_alloc_callback = callback;
|
||||
}
|
||||
|
||||
// Allocator's OOM and other errors handling support.
|
||||
|
||||
static atomic_uint8_t allocator_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_may_return_null = {0};
|
||||
|
||||
@ -208,13 +233,8 @@ bool IsAllocatorOutOfMemory() {
|
||||
return atomic_load_relaxed(&allocator_out_of_memory);
|
||||
}
|
||||
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||
SanitizerToolName);
|
||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||
CHECK(0);
|
||||
Die();
|
||||
void SetAllocatorOutOfMemory() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
}
|
||||
|
||||
bool AllocatorMayReturnNull() {
|
||||
@ -226,26 +246,9 @@ void SetAllocatorMayReturnNull(bool may_return_null) {
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnBadRequest() {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void NORETURN *DieOnFailure::OnBadRequest() {
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void NORETURN *DieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
ReportAllocatorCannotReturnNull();
|
||||
void PrintHintAllocatorCannotReturnNull() {
|
||||
Report("HINT: if you don't care about these errors you may set "
|
||||
"allocator_may_return_null=1\n");
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -22,28 +22,23 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Allows the tools to name their allocations appropriately.
|
||||
extern const char *PrimaryAllocatorName;
|
||||
extern const char *SecondaryAllocatorName;
|
||||
|
||||
// Since flags are immutable and allocator behavior can be changed at runtime
|
||||
// (unit tests or ASan on Android are some examples), allocator_may_return_null
|
||||
// flag value is cached here and can be altered later.
|
||||
bool AllocatorMayReturnNull();
|
||||
void SetAllocatorMayReturnNull(bool may_return_null);
|
||||
|
||||
// Allocator failure handling policies:
|
||||
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
|
||||
// dies otherwise.
|
||||
struct ReturnNullOrDieOnFailure {
|
||||
static void *OnBadRequest();
|
||||
static void *OnOOM();
|
||||
};
|
||||
// Always dies on the failure.
|
||||
struct DieOnFailure {
|
||||
static void NORETURN *OnBadRequest();
|
||||
static void NORETURN *OnOOM();
|
||||
};
|
||||
|
||||
// Returns true if allocator detected OOM condition. Can be used to avoid memory
|
||||
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
|
||||
// hungry operations.
|
||||
bool IsAllocatorOutOfMemory();
|
||||
// Should be called by a particular allocator when OOM is detected.
|
||||
void SetAllocatorOutOfMemory();
|
||||
|
||||
void PrintHintAllocatorCannotReturnNull();
|
||||
|
||||
// Allocators call these callbacks on mmap/munmap.
|
||||
struct NoOpMapUnmapCallback {
|
||||
@ -54,6 +49,21 @@ struct NoOpMapUnmapCallback {
|
||||
// Callback type for iterating over chunks.
|
||||
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
|
||||
|
||||
INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
|
||||
return (*state = *state * 1103515245 + 12345) >> 16;
|
||||
}
|
||||
|
||||
INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
|
||||
|
||||
template<typename T>
|
||||
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
|
||||
if (n <= 1) return;
|
||||
u32 state = *rand_state;
|
||||
for (u32 i = n - 1; i > 0; i--)
|
||||
Swap(a[i], a[RandN(&state, i + 1)]);
|
||||
*rand_state = state;
|
||||
}
|
||||
|
||||
#include "sanitizer_allocator_size_class_map.h"
|
||||
#include "sanitizer_allocator_stats.h"
|
||||
#include "sanitizer_allocator_primary64.h"
|
||||
|
@ -16,7 +16,7 @@
|
||||
template<u64 kSize>
|
||||
class FlatByteMap {
|
||||
public:
|
||||
void TestOnlyInit() {
|
||||
void Init() {
|
||||
internal_memset(map_, 0, sizeof(map_));
|
||||
}
|
||||
|
||||
@ -42,7 +42,7 @@ class FlatByteMap {
|
||||
template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
class TwoLevelByteMap {
|
||||
public:
|
||||
void TestOnlyInit() {
|
||||
void Init() {
|
||||
internal_memset(map1_, 0, sizeof(map1_));
|
||||
mu_.Init();
|
||||
}
|
||||
|
@ -42,16 +42,18 @@ INLINE void *SetErrnoOnNull(void *ptr) {
|
||||
// of alignment.
|
||||
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
|
||||
#if SANITIZER_POSIX
|
||||
return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0;
|
||||
return alignment != 0 && IsPowerOfTwo(alignment) &&
|
||||
(size & (alignment - 1)) == 0;
|
||||
#else
|
||||
return size % alignment == 0;
|
||||
return alignment != 0 && size % alignment == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Checks posix_memalign() parameters, verifies that alignment is a power of two
|
||||
// and a multiple of sizeof(void *).
|
||||
INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
|
||||
return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT
|
||||
return alignment != 0 && IsPowerOfTwo(alignment) &&
|
||||
(alignment % sizeof(void *)) == 0; // NOLINT
|
||||
}
|
||||
|
||||
// Returns true if calloc(size, n) call overflows on size*n calculation.
|
||||
|
@ -22,8 +22,6 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator> // NOLINT
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
|
||||
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
secondary_.InitLinkerInitialized();
|
||||
@ -40,8 +38,12 @@ class CombinedAllocator {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size)
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (size + alignment < size) {
|
||||
Report("WARNING: %s: CombinedAllocator allocation overflow: "
|
||||
"0x%zx bytes with 0x%zx alignment requested\n",
|
||||
SanitizerToolName, size, alignment);
|
||||
return nullptr;
|
||||
}
|
||||
uptr original_size = size;
|
||||
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||
// rather than by the primary or secondary, passing an alignment lower than
|
||||
@ -60,8 +62,6 @@ class CombinedAllocator {
|
||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||
else
|
||||
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||
if (!res)
|
||||
return FailureHandler::OnOOM();
|
||||
if (alignment > 8)
|
||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||
return res;
|
||||
@ -75,6 +75,10 @@ class CombinedAllocator {
|
||||
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
primary_.ForceReleaseToOS();
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *cache, void *p) {
|
||||
if (!p) return;
|
||||
if (primary_.PointerIsMine(p))
|
||||
|
@ -36,6 +36,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_purge_allocator();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
|
||||
} // extern "C"
|
||||
|
@ -44,9 +44,12 @@ typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
||||
InternalAllocatorCache;
|
||||
|
||||
typedef LargeMmapAllocator<NoOpMapUnmapCallback,
|
||||
LargeMmapAllocatorPtrArrayStatic>
|
||||
SecondaryInternalAllocator;
|
||||
|
||||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
|
||||
> InternalAllocator;
|
||||
SecondaryInternalAllocator> InternalAllocator;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||
uptr alignment = 0);
|
||||
@ -57,15 +60,6 @@ void *InternalCalloc(uptr countr, uptr size,
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
||||
InternalAllocator *internal_allocator();
|
||||
|
||||
enum InternalAllocEnum {
|
||||
INTERNAL_ALLOC
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
__sanitizer::InternalAllocEnum) {
|
||||
return __sanitizer::InternalAlloc(size);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
|
@ -17,8 +17,7 @@
|
||||
// object per thread in TLS, is has to be POD.
|
||||
template<class SizeClassAllocator>
|
||||
struct SizeClassAllocatorLocalCache
|
||||
: SizeClassAllocator::AllocatorCache {
|
||||
};
|
||||
: SizeClassAllocator::AllocatorCache {};
|
||||
|
||||
// Cache used by SizeClassAllocator64.
|
||||
template <class SizeClassAllocator>
|
||||
@ -44,13 +43,12 @@ struct SizeClassAllocator64LocalCache {
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(c, allocator, class_id)))
|
||||
return nullptr;
|
||||
DCHECK_GT(c->count, 0);
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_GT(c->count, 0);
|
||||
CompactPtrT chunk = c->chunks[--c->count];
|
||||
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
|
||||
allocator->GetRegionBeginBySizeClass(class_id), chunk));
|
||||
return res;
|
||||
}
|
||||
|
||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||
@ -58,20 +56,19 @@ struct SizeClassAllocator64LocalCache {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
InitCache(c);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(c, allocator, class_id, c->max_count / 2);
|
||||
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
||||
allocator->GetRegionBeginBySizeClass(class_id),
|
||||
reinterpret_cast<uptr>(p));
|
||||
c->chunks[c->count++] = chunk;
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
while (c->count > 0)
|
||||
Drain(c, allocator, i, c->count);
|
||||
@ -92,20 +89,22 @@ struct SizeClassAllocator64LocalCache {
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache() {
|
||||
if (LIKELY(per_class_[1].max_count))
|
||||
void InitCache(PerClass *c) {
|
||||
if (LIKELY(c->max_count))
|
||||
return;
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
|
||||
c->class_size = Allocator::ClassIdToSize(i);
|
||||
const uptr size = Allocator::ClassIdToSize(i);
|
||||
c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
|
||||
c->class_size = size;
|
||||
}
|
||||
DCHECK_NE(c->max_count, 0UL);
|
||||
}
|
||||
|
||||
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache();
|
||||
uptr num_requested_chunks = c->max_count / 2;
|
||||
InitCache(c);
|
||||
const uptr num_requested_chunks = c->max_count / 2;
|
||||
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks)))
|
||||
return false;
|
||||
@ -115,9 +114,8 @@ struct SizeClassAllocator64LocalCache {
|
||||
|
||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
|
||||
uptr count) {
|
||||
InitCache();
|
||||
CHECK_GE(c->count, count);
|
||||
uptr first_idx_to_drain = c->count - count;
|
||||
const uptr first_idx_to_drain = c->count - count;
|
||||
c->count -= count;
|
||||
allocator->ReturnToAllocator(&stats_, class_id,
|
||||
&c->chunks[first_idx_to_drain], count);
|
||||
@ -162,12 +160,13 @@ struct SizeClassAllocator32LocalCache {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(allocator, class_id)))
|
||||
if (UNLIKELY(!Refill(c, allocator, class_id)))
|
||||
return nullptr;
|
||||
DCHECK_GT(c->count, 0);
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -176,20 +175,19 @@ struct SizeClassAllocator32LocalCache {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
InitCache(c);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(allocator, class_id);
|
||||
Drain(c, allocator, class_id);
|
||||
c->batch[c->count++] = p;
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
while (c->count > 0)
|
||||
Drain(allocator, i);
|
||||
Drain(c, allocator, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,15 +212,16 @@ struct SizeClassAllocator32LocalCache {
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache() {
|
||||
if (LIKELY(per_class_[1].max_count))
|
||||
void InitCache(PerClass *c) {
|
||||
if (LIKELY(c->max_count))
|
||||
return;
|
||||
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
uptr max_cached = TransferBatch::MaxCached(i);
|
||||
const uptr size = Allocator::ClassIdToSize(i);
|
||||
const uptr max_cached = TransferBatch::MaxCached(size);
|
||||
c->max_count = 2 * max_cached;
|
||||
c->class_size = Allocator::ClassIdToSize(i);
|
||||
c->class_size = size;
|
||||
// Precompute the class id to use to store batches for the current class
|
||||
// id. 0 means the class size is large enough to store a batch within one
|
||||
// of the chunks. If using a separate size class, it will always be
|
||||
@ -230,16 +229,17 @@ struct SizeClassAllocator32LocalCache {
|
||||
if (kUseSeparateSizeClassForBatch) {
|
||||
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
|
||||
} else {
|
||||
c->batch_class_id = (c->class_size <
|
||||
c->batch_class_id = (size <
|
||||
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
|
||||
batch_class_id : 0;
|
||||
}
|
||||
}
|
||||
DCHECK_NE(c->max_count, 0UL);
|
||||
}
|
||||
|
||||
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache(c);
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
@ -250,20 +250,21 @@ struct SizeClassAllocator32LocalCache {
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
uptr cnt = Min(c->max_count / 2, c->count);
|
||||
uptr first_idx_to_drain = c->count - cnt;
|
||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
const uptr count = Min(c->max_count / 2, c->count);
|
||||
const uptr first_idx_to_drain = c->count - count;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
// Failure to allocate a batch while releasing memory is non recoverable.
|
||||
// TODO(alekseys): Figure out how to do it without allocating a new batch.
|
||||
if (UNLIKELY(!b))
|
||||
DieOnFailure::OnOOM();
|
||||
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
|
||||
&c->batch[first_idx_to_drain], cnt);
|
||||
c->count -= cnt;
|
||||
if (UNLIKELY(!b)) {
|
||||
Report("FATAL: Internal error: %s's allocator failed to allocate a "
|
||||
"transfer batch.\n", SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
b->SetFromArray(&c->batch[first_idx_to_drain], count);
|
||||
c->count -= count;
|
||||
allocator->DeallocateBatch(&stats_, class_id, b);
|
||||
}
|
||||
};
|
||||
|
@ -61,9 +61,9 @@ class SizeClassAllocator32 {
|
||||
|
||||
struct TransferBatch {
|
||||
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
|
||||
void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
|
||||
void SetFromArray(void *batch[], uptr count) {
|
||||
DCHECK_LE(count, kMaxNumCached);
|
||||
count_ = count;
|
||||
CHECK_LE(count_, kMaxNumCached);
|
||||
for (uptr i = 0; i < count; i++)
|
||||
batch_[i] = batch[i];
|
||||
}
|
||||
@ -71,9 +71,9 @@ class SizeClassAllocator32 {
|
||||
void Clear() { count_ = 0; }
|
||||
void Add(void *ptr) {
|
||||
batch_[count_++] = ptr;
|
||||
CHECK_LE(count_, kMaxNumCached);
|
||||
DCHECK_LE(count_, kMaxNumCached);
|
||||
}
|
||||
void CopyToArray(void *to_batch[]) {
|
||||
void CopyToArray(void *to_batch[]) const {
|
||||
for (uptr i = 0, n = Count(); i < n; i++)
|
||||
to_batch[i] = batch_[i];
|
||||
}
|
||||
@ -82,8 +82,8 @@ class SizeClassAllocator32 {
|
||||
static uptr AllocationSizeRequiredForNElements(uptr n) {
|
||||
return sizeof(uptr) * 2 + sizeof(void *) * n;
|
||||
}
|
||||
static uptr MaxCached(uptr class_id) {
|
||||
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
|
||||
static uptr MaxCached(uptr size) {
|
||||
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
|
||||
}
|
||||
|
||||
TransferBatch *next;
|
||||
@ -106,7 +106,7 @@ class SizeClassAllocator32 {
|
||||
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
|
||||
|
||||
void Init(s32 release_to_os_interval_ms) {
|
||||
possible_regions.TestOnlyInit();
|
||||
possible_regions.Init();
|
||||
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
|
||||
}
|
||||
|
||||
@ -118,8 +118,12 @@ class SizeClassAllocator32 {
|
||||
// This is empty here. Currently only implemented in 64-bit allocator.
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
// Currently implemented in 64-bit allocator only.
|
||||
}
|
||||
|
||||
void *MapWithCallback(uptr size) {
|
||||
void *res = MmapOrDie(size, "SizeClassAllocator32");
|
||||
void *res = MmapOrDie(size, PrimaryAllocatorName);
|
||||
MapUnmapCallback().OnMap((uptr)res, size);
|
||||
return res;
|
||||
}
|
||||
@ -147,13 +151,14 @@ class SizeClassAllocator32 {
|
||||
|
||||
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
|
||||
uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty() &&
|
||||
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
CHECK(!sci->free_list.empty());
|
||||
if (sci->free_list.empty()) {
|
||||
if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
DCHECK(!sci->free_list.empty());
|
||||
}
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
return b;
|
||||
@ -161,15 +166,13 @@ class SizeClassAllocator32 {
|
||||
|
||||
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
|
||||
TransferBatch *b) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
CHECK_GT(b->Count(), 0);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
sci->free_list.push_front(b);
|
||||
}
|
||||
|
||||
uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
|
||||
@ -245,12 +248,9 @@ class SizeClassAllocator32 {
|
||||
}
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
}
|
||||
void PrintStats() {}
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
return 0;
|
||||
}
|
||||
static uptr AdditionalSize() { return 0; }
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
@ -259,16 +259,15 @@ class SizeClassAllocator32 {
|
||||
static const uptr kRegionSize = 1 << kRegionSizeLog;
|
||||
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
|
||||
|
||||
struct SizeClassInfo {
|
||||
SpinMutex mutex;
|
||||
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
|
||||
StaticSpinMutex mutex;
|
||||
IntrusiveList<TransferBatch> free_list;
|
||||
char padding[kCacheLineSize - sizeof(uptr) -
|
||||
sizeof(IntrusiveList<TransferBatch>)];
|
||||
u32 rand_state;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
|
||||
COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
|
||||
|
||||
uptr ComputeRegionId(uptr mem) {
|
||||
uptr res = mem >> kRegionSizeLog;
|
||||
const uptr res = mem >> kRegionSizeLog;
|
||||
CHECK_LT(res, kNumPossibleRegions);
|
||||
return res;
|
||||
}
|
||||
@ -278,9 +277,9 @@ class SizeClassAllocator32 {
|
||||
}
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, "SizeClassAllocator32"));
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, PrimaryAllocatorName));
|
||||
if (UNLIKELY(!res))
|
||||
return 0;
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
@ -291,33 +290,66 @@ class SizeClassAllocator32 {
|
||||
}
|
||||
|
||||
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr reg = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!reg))
|
||||
return false;
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = TransferBatch::MaxCached(class_id);
|
||||
CHECK_GT(max_count, 0);
|
||||
TransferBatch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
|
||||
TransferBatch **current_batch, uptr max_count,
|
||||
uptr *pointers_array, uptr count) {
|
||||
// If using a separate class for batches, we do not need to shuffle it.
|
||||
if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
|
||||
class_id != SizeClassMap::kBatchClassID))
|
||||
RandomShuffle(pointers_array, count, &sci->rand_state);
|
||||
TransferBatch *b = *current_batch;
|
||||
for (uptr i = 0; i < count; i++) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)i);
|
||||
b->Add((void*)pointers_array[i]);
|
||||
if (b->Count() == max_count) {
|
||||
sci->free_list.push_back(b);
|
||||
b = nullptr;
|
||||
}
|
||||
}
|
||||
*current_batch = b;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
const uptr region = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!region))
|
||||
return false;
|
||||
if (kRandomShuffleChunks)
|
||||
if (UNLIKELY(sci->rand_state == 0))
|
||||
// The random state is initialized from ASLR (PIE) and time.
|
||||
sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
const uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
const uptr max_count = TransferBatch::MaxCached(size);
|
||||
DCHECK_GT(max_count, 0);
|
||||
TransferBatch *b = nullptr;
|
||||
constexpr uptr kShuffleArraySize = 48;
|
||||
uptr shuffle_array[kShuffleArraySize];
|
||||
uptr count = 0;
|
||||
for (uptr i = region; i < region + n_chunks * size; i += size) {
|
||||
shuffle_array[count++] = i;
|
||||
if (count == kShuffleArraySize) {
|
||||
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
|
||||
shuffle_array, count)))
|
||||
return false;
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
if (count) {
|
||||
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
|
||||
shuffle_array, count)))
|
||||
return false;
|
||||
}
|
||||
if (b) {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
|
@ -70,15 +70,17 @@ class SizeClassAllocator64 {
|
||||
void Init(s32 release_to_os_interval_ms) {
|
||||
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
|
||||
if (kUsingConstantSpaceBeg) {
|
||||
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
|
||||
MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
|
||||
CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
|
||||
PrimaryAllocatorName, kSpaceBeg));
|
||||
} else {
|
||||
NonConstSpaceBeg =
|
||||
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
|
||||
NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
|
||||
PrimaryAllocatorName);
|
||||
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
|
||||
}
|
||||
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
|
||||
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
|
||||
// Check that the RegionInfo array is aligned on the CacheLine size.
|
||||
DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
@ -90,6 +92,13 @@ class SizeClassAllocator64 {
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
|
||||
MaybeReleaseToOS(class_id, true /*force*/);
|
||||
}
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
@ -107,14 +116,18 @@ class SizeClassAllocator64 {
|
||||
// Failure to allocate free array space while releasing memory is non
|
||||
// recoverable.
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
|
||||
new_num_freed_chunks)))
|
||||
DieOnFailure::OnOOM();
|
||||
new_num_freed_chunks))) {
|
||||
Report("FATAL: Internal error: %s's allocator exhausted the free list "
|
||||
"space for size class %zd (%zd bytes).\n", SanitizerToolName,
|
||||
class_id, ClassIdToSize(class_id));
|
||||
Die();
|
||||
}
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
free_array[old_num_chunks + i] = chunks[i];
|
||||
region->num_freed_chunks = new_num_freed_chunks;
|
||||
region->stats.n_freed += n_chunks;
|
||||
|
||||
MaybeReleaseToOS(class_id);
|
||||
MaybeReleaseToOS(class_id, false /*force*/);
|
||||
}
|
||||
|
||||
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||
@ -232,22 +245,28 @@ class SizeClassAllocator64 {
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
uptr total_mapped = 0;
|
||||
uptr n_allocated = 0;
|
||||
uptr n_freed = 0;
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
total_mapped += region->mapped_user;
|
||||
n_allocated += region->stats.n_allocated;
|
||||
n_freed += region->stats.n_freed;
|
||||
}
|
||||
Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
|
||||
"remains %zd\n",
|
||||
total_mapped >> 20, n_allocated, n_allocated - n_freed);
|
||||
uptr rss_stats[kNumClasses];
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
|
||||
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
|
||||
GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
|
||||
|
||||
uptr total_mapped = 0;
|
||||
uptr total_rss = 0;
|
||||
uptr n_allocated = 0;
|
||||
uptr n_freed = 0;
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user != 0) {
|
||||
total_mapped += region->mapped_user;
|
||||
total_rss += rss_stats[class_id];
|
||||
}
|
||||
n_allocated += region->stats.n_allocated;
|
||||
n_freed += region->stats.n_freed;
|
||||
}
|
||||
|
||||
Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
|
||||
"%zd allocations; remains %zd\n", total_mapped >> 20,
|
||||
total_rss >> 20, n_allocated, n_allocated - n_freed);
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++)
|
||||
PrintStats(class_id, rss_stats[class_id]);
|
||||
}
|
||||
@ -529,6 +548,8 @@ class SizeClassAllocator64 {
|
||||
private:
|
||||
friend class MemoryMapper;
|
||||
|
||||
ReservedAddressRange address_range;
|
||||
|
||||
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
|
||||
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
|
||||
// In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
|
||||
@ -567,7 +588,7 @@ class SizeClassAllocator64 {
|
||||
u64 last_released_bytes;
|
||||
};
|
||||
|
||||
struct RegionInfo {
|
||||
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
|
||||
BlockingMutex mutex;
|
||||
uptr num_freed_chunks; // Number of elements in the freearray.
|
||||
uptr mapped_free_array; // Bytes mapped for freearray.
|
||||
@ -580,24 +601,11 @@ class SizeClassAllocator64 {
|
||||
Stats stats;
|
||||
ReleaseToOsInfo rtoi;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
|
||||
|
||||
u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
|
||||
return (*state = *state * 1103515245 + 12345) >> 16;
|
||||
}
|
||||
|
||||
u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
|
||||
|
||||
void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
|
||||
if (n <= 1) return;
|
||||
for (u32 i = n - 1; i > 0; i--)
|
||||
Swap(a[i], a[RandN(rand_state, i + 1)]);
|
||||
}
|
||||
COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
|
||||
|
||||
RegionInfo *GetRegionInfo(uptr class_id) const {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *regions =
|
||||
reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
|
||||
return ®ions[class_id];
|
||||
}
|
||||
|
||||
@ -622,7 +630,7 @@ class SizeClassAllocator64 {
|
||||
}
|
||||
|
||||
bool MapWithCallback(uptr beg, uptr size) {
|
||||
uptr mapped = reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(beg, size));
|
||||
uptr mapped = address_range.Map(beg, size);
|
||||
if (UNLIKELY(!mapped))
|
||||
return false;
|
||||
CHECK_EQ(beg, mapped);
|
||||
@ -631,13 +639,13 @@ class SizeClassAllocator64 {
|
||||
}
|
||||
|
||||
void MapWithCallbackOrDie(uptr beg, uptr size) {
|
||||
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
|
||||
CHECK_EQ(beg, address_range.MapOrDie(beg, size));
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
}
|
||||
|
||||
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
address_range.Unmap(beg, size);
|
||||
}
|
||||
|
||||
bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
|
||||
@ -656,55 +664,74 @@ class SizeClassAllocator64 {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check whether this size class is exhausted.
|
||||
bool IsRegionExhausted(RegionInfo *region, uptr class_id,
|
||||
uptr additional_map_size) {
|
||||
if (LIKELY(region->mapped_user + region->mapped_meta +
|
||||
additional_map_size <= kRegionSize - kFreeArraySize))
|
||||
return false;
|
||||
if (!region->exhausted) {
|
||||
region->exhausted = true;
|
||||
Printf("%s: Out of memory. ", SanitizerToolName);
|
||||
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
||||
kRegionSize >> 20, ClassIdToSize(class_id));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
|
||||
RegionInfo *region, uptr requested_count) {
|
||||
// region->mutex is held.
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
const uptr new_space_beg = region->allocated_user;
|
||||
const uptr new_space_end = new_space_beg + requested_count * size;
|
||||
const uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
|
||||
const uptr total_user_bytes =
|
||||
region->allocated_user + requested_count * size;
|
||||
// Map more space for chunks, if necessary.
|
||||
if (new_space_end > region->mapped_user) {
|
||||
if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
|
||||
region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
|
||||
// Do the mmap for the user memory.
|
||||
uptr map_size = kUserMapSize;
|
||||
while (new_space_end > region->mapped_user + map_size)
|
||||
map_size += kUserMapSize;
|
||||
CHECK_GE(region->mapped_user + map_size, new_space_end);
|
||||
if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
|
||||
map_size)))
|
||||
return false;
|
||||
stat->Add(AllocatorStatMapped, map_size);
|
||||
region->mapped_user += map_size;
|
||||
}
|
||||
const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size;
|
||||
|
||||
// Calculate the required space for metadata.
|
||||
const uptr requested_allocated_meta =
|
||||
region->allocated_meta + new_chunks_count * kMetadataSize;
|
||||
uptr requested_mapped_meta = region->mapped_meta;
|
||||
while (requested_allocated_meta > requested_mapped_meta)
|
||||
requested_mapped_meta += kMetaMapSize;
|
||||
// Check whether this size class is exhausted.
|
||||
if (region->mapped_user + requested_mapped_meta >
|
||||
kRegionSize - kFreeArraySize) {
|
||||
if (!region->exhausted) {
|
||||
region->exhausted = true;
|
||||
Printf("%s: Out of memory. ", SanitizerToolName);
|
||||
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
||||
kRegionSize >> 20, size);
|
||||
if (LIKELY(total_user_bytes > region->mapped_user)) {
|
||||
if (UNLIKELY(region->mapped_user == 0)) {
|
||||
if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
|
||||
// The random state is initialized from ASLR.
|
||||
region->rand_state = static_cast<u32>(region_beg >> 12);
|
||||
// Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
|
||||
// preventing just allocated memory from being released sooner than
|
||||
// necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
|
||||
// for short lived processes.
|
||||
// Do it only when the feature is turned on, to avoid a potentially
|
||||
// extraneous syscall.
|
||||
if (ReleaseToOSIntervalMs() >= 0)
|
||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// Map more space for metadata, if necessary.
|
||||
if (requested_mapped_meta > region->mapped_meta) {
|
||||
if (UNLIKELY(!MapWithCallback(
|
||||
GetMetadataEnd(region_beg) - requested_mapped_meta,
|
||||
requested_mapped_meta - region->mapped_meta)))
|
||||
// Do the mmap for the user memory.
|
||||
const uptr user_map_size =
|
||||
RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
|
||||
if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
|
||||
return false;
|
||||
region->mapped_meta = requested_mapped_meta;
|
||||
if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
|
||||
user_map_size)))
|
||||
return false;
|
||||
stat->Add(AllocatorStatMapped, user_map_size);
|
||||
region->mapped_user += user_map_size;
|
||||
}
|
||||
const uptr new_chunks_count =
|
||||
(region->mapped_user - region->allocated_user) / size;
|
||||
|
||||
if (kMetadataSize) {
|
||||
// Calculate the required space for metadata.
|
||||
const uptr total_meta_bytes =
|
||||
region->allocated_meta + new_chunks_count * kMetadataSize;
|
||||
const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
|
||||
RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
|
||||
// Map more space for metadata, if necessary.
|
||||
if (meta_map_size) {
|
||||
if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
|
||||
return false;
|
||||
if (UNLIKELY(!MapWithCallback(
|
||||
GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
|
||||
meta_map_size)))
|
||||
return false;
|
||||
region->mapped_meta += meta_map_size;
|
||||
}
|
||||
}
|
||||
|
||||
// If necessary, allocate more space for the free array and populate it with
|
||||
@ -713,7 +740,7 @@ class SizeClassAllocator64 {
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
|
||||
return false;
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count;
|
||||
for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
|
||||
i++, chunk += size)
|
||||
free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
|
||||
if (kRandomShuffleChunks)
|
||||
@ -725,7 +752,7 @@ class SizeClassAllocator64 {
|
||||
region->num_freed_chunks += new_chunks_count;
|
||||
region->allocated_user += new_chunks_count * size;
|
||||
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||
region->allocated_meta = requested_allocated_meta;
|
||||
region->allocated_meta += new_chunks_count * kMetadataSize;
|
||||
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||
region->exhausted = false;
|
||||
|
||||
@ -784,7 +811,7 @@ class SizeClassAllocator64 {
|
||||
|
||||
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
||||
// expected to be locked.
|
||||
void MaybeReleaseToOS(uptr class_id) {
|
||||
void MaybeReleaseToOS(uptr class_id, bool force) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
const uptr chunk_size = ClassIdToSize(class_id);
|
||||
const uptr page_size = GetPageSizeCached();
|
||||
@ -797,12 +824,16 @@ class SizeClassAllocator64 {
|
||||
return; // Nothing new to release.
|
||||
}
|
||||
|
||||
s32 interval_ms = ReleaseToOSIntervalMs();
|
||||
if (interval_ms < 0)
|
||||
return;
|
||||
if (!force) {
|
||||
s32 interval_ms = ReleaseToOSIntervalMs();
|
||||
if (interval_ms < 0)
|
||||
return;
|
||||
|
||||
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime())
|
||||
return; // Memory was returned recently.
|
||||
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
|
||||
MonotonicNanoTime()) {
|
||||
return; // Memory was returned recently.
|
||||
}
|
||||
}
|
||||
|
||||
MemoryMapper memory_mapper(*this, class_id);
|
||||
|
||||
@ -816,6 +847,6 @@ class SizeClassAllocator64 {
|
||||
region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
|
||||
region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
|
||||
}
|
||||
region->rtoi.last_release_at_ns = NanoTime();
|
||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||
}
|
||||
};
|
||||
|
123
libsanitizer/sanitizer_common/sanitizer_allocator_report.cc
Normal file
123
libsanitizer/sanitizer_common/sanitizer_allocator_report.cc
Normal file
@ -0,0 +1,123 @@
|
||||
//===-- sanitizer_allocator_report.cc ---------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// \file
|
||||
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_allocator_report.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_report_decorator.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class ScopedAllocatorErrorReport {
|
||||
public:
|
||||
ScopedAllocatorErrorReport(const char *error_summary_,
|
||||
const StackTrace *stack_)
|
||||
: error_summary(error_summary_),
|
||||
stack(stack_) {
|
||||
Printf("%s", d.Error());
|
||||
}
|
||||
~ScopedAllocatorErrorReport() {
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(error_summary, stack);
|
||||
}
|
||||
|
||||
private:
|
||||
ScopedErrorReportLock lock;
|
||||
const char *error_summary;
|
||||
const StackTrace* const stack;
|
||||
const SanitizerCommonDecorator d;
|
||||
};
|
||||
|
||||
void NORETURN ReportCallocOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("calloc-overflow", stack);
|
||||
Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n", SanitizerToolName, count,
|
||||
size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("pvalloc-overflow", stack);
|
||||
Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to "
|
||||
"system page size 0x%zx cannot be represented in type size_t\n",
|
||||
SanitizerToolName, size, GetPageSizeCached());
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack);
|
||||
Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a "
|
||||
"power of two\n", SanitizerToolName, alignment);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack);
|
||||
#if SANITIZER_POSIX
|
||||
Report("ERROR: %s: invalid alignment requested in "
|
||||
"aligned_alloc: %zd, alignment must be a power of two and the "
|
||||
"requested size 0x%zx must be a multiple of alignment\n",
|
||||
SanitizerToolName, alignment, size);
|
||||
#else
|
||||
Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, "
|
||||
"the requested size 0x%zx must be a multiple of alignment\n",
|
||||
SanitizerToolName, alignment, size);
|
||||
#endif
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment",
|
||||
stack);
|
||||
Report("ERROR: %s: invalid alignment requested in "
|
||||
"posix_memalign: %zd, alignment must be a power of two and a "
|
||||
"multiple of sizeof(void*) == %zd\n", SanitizerToolName, alignment,
|
||||
sizeof(void*)); // NOLINT
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("allocation-size-too-big", stack);
|
||||
Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum "
|
||||
"supported size of 0x%zx\n", SanitizerToolName, user_size, max_size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("out-of-memory", stack);
|
||||
Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
|
||||
"bytes\n", SanitizerToolName, requested_size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
36
libsanitizer/sanitizer_common/sanitizer_allocator_report.h
Normal file
36
libsanitizer/sanitizer_common/sanitizer_allocator_report.h
Normal file
@ -0,0 +1,36 @@
|
||||
//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// \file
|
||||
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_REPORT_H
|
||||
#define SANITIZER_ALLOCATOR_REPORT_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_stacktrace.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void NORETURN ReportCallocOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack);
|
||||
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_REPORT_H
|
@ -12,17 +12,66 @@
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
|
||||
// allocated chunks. To be used in memory constrained or not memory hungry cases
|
||||
// (currently, 32 bits and internal allocator).
|
||||
class LargeMmapAllocatorPtrArrayStatic {
|
||||
public:
|
||||
INLINE void *Init() { return &p_[0]; }
|
||||
INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << 15;
|
||||
uptr p_[kMaxNumChunks];
|
||||
};
|
||||
|
||||
// Much less restricted LargeMmapAllocator chunks list (comparing to
|
||||
// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
|
||||
// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
|
||||
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
|
||||
class LargeMmapAllocatorPtrArrayDynamic {
|
||||
public:
|
||||
INLINE void *Init() {
|
||||
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
|
||||
SecondaryAllocatorName);
|
||||
CHECK(p);
|
||||
return reinterpret_cast<void*>(p);
|
||||
}
|
||||
|
||||
INLINE void EnsureSpace(uptr n) {
|
||||
CHECK_LT(n, kMaxNumChunks);
|
||||
DCHECK(n <= n_reserved_);
|
||||
if (UNLIKELY(n == n_reserved_)) {
|
||||
address_range_.MapOrDie(
|
||||
reinterpret_cast<uptr>(address_range_.base()) +
|
||||
n_reserved_ * sizeof(uptr),
|
||||
kChunksBlockCount * sizeof(uptr));
|
||||
n_reserved_ += kChunksBlockCount;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << 20;
|
||||
static const int kChunksBlockCount = 1 << 14;
|
||||
ReservedAddressRange address_range_;
|
||||
uptr n_reserved_;
|
||||
};
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
|
||||
#else
|
||||
typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
|
||||
#endif
|
||||
|
||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||
// The main purpose of this allocator is to cover large and rare allocation
|
||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
||||
class FailureHandlerT = ReturnNullOrDieOnFailure>
|
||||
class PtrArrayT = DefaultLargeMmapAllocatorPtrArray>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
typedef FailureHandlerT FailureHandler;
|
||||
|
||||
void InitLinkerInitialized() {
|
||||
page_size_ = GetPageSizeCached();
|
||||
chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
|
||||
}
|
||||
|
||||
void Init() {
|
||||
@ -36,12 +85,16 @@ class LargeMmapAllocator {
|
||||
if (alignment > page_size_)
|
||||
map_size += alignment;
|
||||
// Overflow.
|
||||
if (map_size < size)
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (map_size < size) {
|
||||
Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
|
||||
"0x%zx bytes with 0x%zx alignment requested\n",
|
||||
SanitizerToolName, map_size, alignment);
|
||||
return nullptr;
|
||||
}
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
|
||||
MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
|
||||
if (!map_beg)
|
||||
return FailureHandler::OnOOM();
|
||||
return nullptr;
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||
uptr map_end = map_beg + map_size;
|
||||
@ -60,11 +113,11 @@ class LargeMmapAllocator {
|
||||
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
|
||||
{
|
||||
SpinMutexLock l(&mutex_);
|
||||
ptr_array_.EnsureSpace(n_chunks_);
|
||||
uptr idx = n_chunks_++;
|
||||
chunks_sorted_ = false;
|
||||
CHECK_LT(idx, kMaxNumChunks);
|
||||
h->chunk_idx = idx;
|
||||
chunks_[idx] = h;
|
||||
chunks_sorted_ = false;
|
||||
stats.n_allocs++;
|
||||
stats.currently_allocated += map_size;
|
||||
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
|
||||
@ -82,9 +135,8 @@ class LargeMmapAllocator {
|
||||
uptr idx = h->chunk_idx;
|
||||
CHECK_EQ(chunks_[idx], h);
|
||||
CHECK_LT(idx, n_chunks_);
|
||||
chunks_[idx] = chunks_[n_chunks_ - 1];
|
||||
chunks_[idx] = chunks_[--n_chunks_];
|
||||
chunks_[idx]->chunk_idx = idx;
|
||||
n_chunks_--;
|
||||
chunks_sorted_ = false;
|
||||
stats.n_frees++;
|
||||
stats.currently_allocated -= h->map_size;
|
||||
@ -148,7 +200,7 @@ class LargeMmapAllocator {
|
||||
|
||||
void EnsureSortedChunks() {
|
||||
if (chunks_sorted_) return;
|
||||
SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
|
||||
Sort(reinterpret_cast<uptr *>(chunks_), n_chunks_);
|
||||
for (uptr i = 0; i < n_chunks_; i++)
|
||||
chunks_[i]->chunk_idx = i;
|
||||
chunks_sorted_ = true;
|
||||
@ -220,7 +272,7 @@ class LargeMmapAllocator {
|
||||
EnsureSortedChunks(); // Avoid doing the sort while iterating.
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
auto t = chunks_[i];
|
||||
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
|
||||
callback(reinterpret_cast<uptr>(GetUser(t)), arg);
|
||||
// Consistency check: verify that the array did not change.
|
||||
CHECK_EQ(chunks_[i], t);
|
||||
CHECK_EQ(chunks_[i]->chunk_idx, i);
|
||||
@ -228,7 +280,6 @@ class LargeMmapAllocator {
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
|
||||
struct Header {
|
||||
uptr map_beg;
|
||||
uptr map_size;
|
||||
@ -254,11 +305,12 @@ class LargeMmapAllocator {
|
||||
}
|
||||
|
||||
uptr page_size_;
|
||||
Header *chunks_[kMaxNumChunks];
|
||||
Header **chunks_;
|
||||
PtrArrayT ptr_array_;
|
||||
uptr n_chunks_;
|
||||
bool chunks_sorted_;
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
SpinMutex mutex_;
|
||||
StaticSpinMutex mutex_;
|
||||
};
|
||||
|
@ -159,23 +159,24 @@ class SizeClassMap {
|
||||
return 0;
|
||||
if (size <= kMidSize)
|
||||
return (size + kMinSize - 1) >> kMinSizeLog;
|
||||
uptr l = MostSignificantSetBitIndex(size);
|
||||
uptr hbits = (size >> (l - S)) & M;
|
||||
uptr lbits = size & ((1 << (l - S)) - 1);
|
||||
uptr l1 = l - kMidSizeLog;
|
||||
const uptr l = MostSignificantSetBitIndex(size);
|
||||
const uptr hbits = (size >> (l - S)) & M;
|
||||
const uptr lbits = size & ((1U << (l - S)) - 1);
|
||||
const uptr l1 = l - kMidSizeLog;
|
||||
return kMidClass + (l1 << S) + hbits + (lbits > 0);
|
||||
}
|
||||
|
||||
static uptr MaxCachedHint(uptr class_id) {
|
||||
// Estimate the result for kBatchClassID because this class does not know
|
||||
// the exact size of TransferBatch. We need to cache fewer batches than user
|
||||
// chunks, so this number can be small.
|
||||
if (UNLIKELY(class_id == kBatchClassID))
|
||||
return 16;
|
||||
if (UNLIKELY(class_id == 0))
|
||||
static uptr MaxCachedHint(uptr size) {
|
||||
DCHECK_LE(size, kMaxSize);
|
||||
if (UNLIKELY(size == 0))
|
||||
return 0;
|
||||
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
|
||||
return Max<uptr>(1, Min(kMaxNumCachedHint, n));
|
||||
uptr n;
|
||||
// Force a 32-bit division if the template parameters allow for it.
|
||||
if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
|
||||
n = (1UL << kMaxBytesCachedLog) / size;
|
||||
else
|
||||
n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
|
||||
return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
|
||||
}
|
||||
|
||||
static void Print() {
|
||||
@ -188,12 +189,12 @@ class SizeClassMap {
|
||||
uptr d = s - prev_s;
|
||||
uptr p = prev_s ? (d * 100 / prev_s) : 0;
|
||||
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
|
||||
uptr cached = MaxCachedHint(i) * s;
|
||||
uptr cached = MaxCachedHint(s) * s;
|
||||
if (i == kBatchClassID)
|
||||
d = p = l = 0;
|
||||
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
|
||||
"cached: %zd %zd; id %zd\n",
|
||||
i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
|
||||
i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
|
||||
total_cached += cached;
|
||||
prev_s = s;
|
||||
}
|
||||
@ -229,3 +230,8 @@ class SizeClassMap {
|
||||
typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
|
||||
typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
|
||||
typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
|
||||
|
||||
// The following SizeClassMap only holds a way small number of cached entries,
|
||||
// allowing for denser per-class arrays, smaller memory footprint and usually
|
||||
// better performances in threaded environments.
|
||||
typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
|
||||
|
@ -99,5 +99,5 @@ class AllocatorGlobalStats : public AllocatorStats {
|
||||
}
|
||||
|
||||
private:
|
||||
mutable SpinMutex mu_;
|
||||
mutable StaticSpinMutex mu_;
|
||||
};
|
||||
|
@ -45,12 +45,12 @@
|
||||
# define ASM_HIDDEN(symbol) .hidden symbol
|
||||
# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
|
||||
# define ASM_SIZE(symbol) .size symbol, .-symbol
|
||||
# define ASM_TSAN_SYMBOL(symbol) symbol
|
||||
# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) symbol
|
||||
# define ASM_SYMBOL(symbol) symbol
|
||||
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
|
||||
#else
|
||||
# define ASM_HIDDEN(symbol)
|
||||
# define ASM_TYPE_FUNCTION(symbol)
|
||||
# define ASM_SIZE(symbol)
|
||||
# define ASM_TSAN_SYMBOL(symbol) _##symbol
|
||||
# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
|
||||
# define ASM_SYMBOL(symbol) _##symbol
|
||||
# define ASM_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
|
||||
#endif
|
||||
|
@ -76,17 +76,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
|
||||
typedef typename T::Type Type;
|
||||
Type cmpv = *cmp;
|
||||
Type prev;
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
if (sizeof(*a) == 8) {
|
||||
Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
|
||||
prev = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
|
||||
} else {
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
}
|
||||
#else
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
#endif
|
||||
if (prev == cmpv) return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
@ -102,6 +92,13 @@ INLINE bool atomic_compare_exchange_weak(volatile T *a,
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
// This include provides explicit template instantiations for atomic_uint64_t
|
||||
// on MIPS32, which does not directly support 8 byte atomics. It has to
|
||||
// proceed the template definitions above.
|
||||
#if defined(_MIPS_SIM) && defined(_ABIO32)
|
||||
#include "sanitizer_atomic_clang_mips.h"
|
||||
#endif
|
||||
|
||||
#undef ATOMIC_ORDER
|
||||
|
||||
#endif // SANITIZER_ATOMIC_CLANG_H
|
||||
|
115
libsanitizer/sanitizer_common/sanitizer_atomic_clang_mips.h
Normal file
115
libsanitizer/sanitizer_common/sanitizer_atomic_clang_mips.h
Normal file
@ -0,0 +1,115 @@
|
||||
//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
// Not intended for direct inclusion. Include sanitizer_atomic.h.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
|
||||
#define SANITIZER_ATOMIC_CLANG_MIPS_H
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// MIPS32 does not support atomics > 4 bytes. To address this lack of
|
||||
// functionality, the sanitizer library provides helper methods which use an
|
||||
// internal spin lock mechanism to emulate atomic oprations when the size is
|
||||
// 8 bytes.
|
||||
static void __spin_lock(volatile int *lock) {
|
||||
while (__sync_lock_test_and_set(lock, 1))
|
||||
while (*lock) {
|
||||
}
|
||||
}
|
||||
|
||||
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
|
||||
|
||||
// Make sure the lock is on its own cache line to prevent false sharing.
|
||||
// Put it inside a struct that is aligned and padded to the typical MIPS
|
||||
// cacheline which is 32 bytes.
|
||||
static struct {
|
||||
int lock;
|
||||
char pad[32 - sizeof(int)];
|
||||
} __attribute__((aligned(32))) lock = {0, {0}};
|
||||
|
||||
template <>
|
||||
INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
|
||||
atomic_uint64_t::Type val,
|
||||
memory_order mo) {
|
||||
DCHECK(mo &
|
||||
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
|
||||
DCHECK(!((uptr)ptr % sizeof(*ptr)));
|
||||
|
||||
atomic_uint64_t::Type ret;
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
|
||||
ptr->val_dont_use = ret + val;
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <>
|
||||
INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
|
||||
atomic_uint64_t::Type val,
|
||||
memory_order mo) {
|
||||
return atomic_fetch_add(ptr, -val, mo);
|
||||
}
|
||||
|
||||
template <>
|
||||
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
|
||||
atomic_uint64_t::Type *cmp,
|
||||
atomic_uint64_t::Type xchg,
|
||||
memory_order mo) {
|
||||
DCHECK(mo &
|
||||
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
|
||||
DCHECK(!((uptr)ptr % sizeof(*ptr)));
|
||||
|
||||
typedef atomic_uint64_t::Type Type;
|
||||
Type cmpv = *cmp;
|
||||
Type prev;
|
||||
bool ret = false;
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
|
||||
if (prev == cmpv) {
|
||||
ret = true;
|
||||
ptr->val_dont_use = xchg;
|
||||
}
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <>
|
||||
INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
|
||||
memory_order mo) {
|
||||
DCHECK(mo &
|
||||
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
|
||||
DCHECK(!((uptr)ptr % sizeof(*ptr)));
|
||||
|
||||
atomic_uint64_t::Type zero = 0;
|
||||
volatile atomic_uint64_t *Newptr =
|
||||
const_cast<volatile atomic_uint64_t *>(ptr);
|
||||
return atomic_fetch_add(Newptr, zero, mo);
|
||||
}
|
||||
|
||||
template <>
|
||||
INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
|
||||
memory_order mo) {
|
||||
DCHECK(mo &
|
||||
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
|
||||
DCHECK(!((uptr)ptr % sizeof(*ptr)));
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
ptr->val_dont_use = v;
|
||||
__spin_unlock(&lock.lock);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ATOMIC_CLANG_MIPS_H
|
@ -15,55 +15,6 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// MIPS32 does not support atomic > 4 bytes. To address this lack of
|
||||
// functionality, the sanitizer library provides helper methods which use an
|
||||
// internal spin lock mechanism to emulate atomic oprations when the size is
|
||||
// 8 bytes.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
static void __spin_lock(volatile int *lock) {
|
||||
while (__sync_lock_test_and_set(lock, 1))
|
||||
while (*lock) {
|
||||
}
|
||||
}
|
||||
|
||||
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
|
||||
|
||||
|
||||
// Make sure the lock is on its own cache line to prevent false sharing.
|
||||
// Put it inside a struct that is aligned and padded to the typical MIPS
|
||||
// cacheline which is 32 bytes.
|
||||
static struct {
|
||||
int lock;
|
||||
char pad[32 - sizeof(int)];
|
||||
} __attribute__((aligned(32))) lock = {0, {0}};
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
|
||||
T ret;
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
*ptr = ret + val;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
|
||||
T ret;
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
if (ret == oldval) *ptr = newval;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
INLINE void proc_yield(int cnt) {
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
@ -101,15 +52,8 @@ INLINE typename T::Type atomic_load(
|
||||
// 64-bit load on 32-bit platform.
|
||||
// Gross, but simple and reliable.
|
||||
// Assume that it is not in read-only memory.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
v = __mips_sync_fetch_and_add<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), 0);
|
||||
#else
|
||||
v = __sync_fetch_and_add(
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
|
||||
#endif
|
||||
}
|
||||
return v;
|
||||
}
|
||||
@ -139,15 +83,8 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
typename T::Type cmp = a->val_dont_use;
|
||||
typename T::Type cur;
|
||||
for (;;) {
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
cur = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
|
||||
#else
|
||||
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
|
||||
#endif
|
||||
if (cmp == v)
|
||||
if (cur == cmp || cur == v)
|
||||
break;
|
||||
cmp = cur;
|
||||
}
|
||||
|
@ -59,8 +59,7 @@ INLINE typename T::Type atomic_load(
|
||||
"emms;" // Empty mmx state/Reset FP regs
|
||||
: "=m" (v)
|
||||
: "m" (a->val_dont_use)
|
||||
: // mark the FP stack and mmx registers as clobbered
|
||||
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
|
||||
: // mark the mmx registers as clobbered
|
||||
#ifdef __MMX__
|
||||
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
|
||||
#endif // #ifdef __MMX__
|
||||
@ -98,8 +97,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
"emms;" // Empty mmx state/Reset FP regs
|
||||
: "=m" (a->val_dont_use)
|
||||
: "m" (v)
|
||||
: // mark the FP stack and mmx registers as clobbered
|
||||
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
|
||||
: // mark the mmx registers as clobbered
|
||||
#ifdef __MMX__
|
||||
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
|
||||
#endif // #ifdef __MMX__
|
||||
|
@ -20,7 +20,7 @@ namespace __sanitizer {
|
||||
template <class basic_int_t = uptr>
|
||||
class BasicBitVector {
|
||||
public:
|
||||
enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
|
||||
enum SizeEnum : uptr { kSize = sizeof(basic_int_t) * 8 };
|
||||
|
||||
uptr size() const { return kSize; }
|
||||
// No CTOR.
|
||||
@ -113,7 +113,7 @@ class TwoLevelBitVector {
|
||||
// This structure allows O(kLevel1Size) time for clear() and empty(),
|
||||
// as well fast handling of sparse BVs.
|
||||
public:
|
||||
enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
|
||||
enum SizeEnum : uptr { kSize = BV::kSize * BV::kSize * kLevel1Size };
|
||||
// No CTOR.
|
||||
|
||||
uptr size() const { return kSize; }
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user