libsanitizer merge from upstream r209283

From-SVN: r210743
This commit is contained in:
Kostya Serebryany 2014-05-22 07:09:21 +00:00 committed by Kostya Serebryany
parent b95591361e
commit dee5ea7a0b
166 changed files with 12587 additions and 5162 deletions

View File

@ -1,3 +1,9 @@
2014-05-22 Kostya Serebryany <kcc@google.com>
* c-c++-common/tsan/mutexset1.c: Update the test to match
upstream r209283.
* g++.dg/asan/symbolize-callback-1.C: Delete the deprecated test.
2014-05-21 Marek Polacek <polacek@redhat.com>
PR sanitizer/61272

View File

@ -35,6 +35,6 @@ int main() {
/* { dg-output "WARNING: ThreadSanitizer: data race.*(\n|\r\n|\r)" } */
/* { dg-output " Read of size 4 at 0x\[0-9a-f\]+ by thread T1 \\(mutexes: write M\[0-9\]\\):.*" } */
/* { dg-output " Previous write of size 4 at 0x\[0-9a-f\]+ by thread T2:.*" } */
/* { dg-output " Mutex M\[0-9\] created at:.*" } */
/* { dg-output " Mutex M\[0-9\] \\(0x.*\\) created at:.*" } */
/* { dg-output " #0 pthread_mutex_init.*" } */
/* { dg-output " #1 main (.*mutexset1.c|\\?{2}):\[0-9]+.*" } */

View File

@ -1,21 +0,0 @@
// { dg-do run }
// { dg-skip-if "" { *-*-* } { "*" } { "-O2" } }
// { dg-options "-fno-builtin-malloc -fno-builtin-free" }
// { dg-shouldfail "asan" }
#include <stdio.h>
#include <stdlib.h>
extern "C"
bool __asan_symbolize(const void *, char *out_buffer, int out_size) {
snprintf(out_buffer, out_size, "MySymbolizer");
return true;
}
int main() {
char *x = (char*)malloc(10);
free(x);
return x[5];
}
// { dg-output "MySymbolizer" }

View File

@ -1,3 +1,13 @@
2014-05-22 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r209283.
* asan/Makefile.am (asan_files): Added new files.
* asan/Makefile.in: Regenerate.
* tsan/Makefile.am (tsan_files): Added new files.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new files.
* sanitizer_common/Makefile.in: Regenerate.
2014-05-14 Yury Gribov <y.gribov@samsung.com>
PR sanitizer/61100

View File

@ -1,4 +1,4 @@
196489
209283
The first line of this file holds the svn revision number of the
last merge done from the master library sources.

View File

@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \

View File

@ -88,12 +88,13 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
am__objects_1 = asan_activation.lo asan_allocator2.lo \
asan_dll_thunk.lo asan_fake_stack.lo asan_globals.lo \
asan_interceptors.lo asan_linux.lo asan_mac.lo \
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
asan_thread.lo asan_win.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@ -272,6 +273,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
@ -412,6 +414,7 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@

View File

@ -0,0 +1,72 @@
//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
int quarantine_size;
int max_redzone;
int malloc_context_size;
bool poison_heap;
} asan_deactivated_flags;
static bool asan_is_deactivated;
void AsanStartDeactivated() {
VReport(1, "Deactivating ASan\n");
// Save flag values.
asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
asan_deactivated_flags.max_redzone = flags()->max_redzone;
asan_deactivated_flags.poison_heap = flags()->poison_heap;
asan_deactivated_flags.malloc_context_size =
common_flags()->malloc_context_size;
flags()->quarantine_size = 0;
flags()->max_redzone = 16;
flags()->poison_heap = false;
common_flags()->malloc_context_size = 0;
asan_is_deactivated = true;
}
void AsanActivate() {
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
// Restore flag values.
// FIXME: this is not atomic, and there may be other threads alive.
flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
common_flags()->malloc_context_size =
asan_deactivated_flags.malloc_context_size;
ParseExtraActivationFlags();
ReInitializeAllocator();
asan_is_deactivated = false;
VReport(
1,
"quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
"%d\n",
flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
common_flags()->malloc_context_size);
}
} // namespace __asan

View File

@ -0,0 +1,21 @@
//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan activation/deactivation logic.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_H
#define ASAN_ACTIVATION_H
namespace __asan {
void AsanStartDeactivated();
void AsanActivate();
} // namespace __asan
#endif // ASAN_ACTIVATION_H

View File

@ -15,6 +15,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
namespace __asan {
@ -29,6 +30,7 @@ static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
void InitializeAllocator();
void ReInitializeAllocator();
class AsanChunkView {
public:
@ -40,6 +42,7 @@ class AsanChunkView {
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
void GetAllocStack(StackTrace *stack);
void GetFreeStack(StackTrace *stack);
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
@ -88,9 +91,46 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
uptr size_;
};
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
void OnUnmap(uptr p, uptr size) const;
};
#if SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
# if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<kNumRegions> ByteMap;
# elif SANITIZER_WORDSIZE == 64
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
# endif
typedef CompactSizeClassMap SizeClassMap;
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
SizeClassMap, kRegionSizeLog,
ByteMap,
AsanMapUnmapCallback> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
AllocatorCache allocator2_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.

View File

@ -17,8 +17,8 @@
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
@ -28,65 +28,30 @@
namespace __asan {
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
};
#if SANITIZER_WORDSIZE == 64
#if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
#else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
#endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#elif SANITIZER_WORDSIZE == 32
static const u64 kAddressSpaceSize = 1ULL << 32;
typedef CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
SizeClassMap, kRegionSizeLog,
FlatByteMap<kFlatByteMapSize>,
AsanMapUnmapCallback> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
return &ms->allocator2_cache;
}
static Allocator allocator;
@ -132,7 +97,8 @@ static uptr ComputeRZLog(uptr user_requested_size) {
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
return Max(rz_log, RZSize2Log(flags()->redzone));
return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
@ -307,10 +273,14 @@ void InitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (!asan_inited)
__asan_init();
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@ -355,6 +325,16 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if flags()->poison_heap was disabled for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
@ -708,8 +688,12 @@ uptr PointsIntoChunk(void* p) {
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) &&
m->AddrIsInside(addr, /*locked_version=*/true))
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
@ -778,7 +762,7 @@ uptr __asan_get_estimated_allocated_size(uptr size) {
return size;
}
bool __asan_get_ownership(const void *p) {
int __asan_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}

View File

@ -0,0 +1,599 @@
// This file was generated by gen_asm_instrumentation.sh. Please, do not edit
.section .text
#if defined(__x86_64__) || defined(__i386__)
.globl __asan_report_store1
.globl __asan_report_load1
.globl __asan_report_store2
.globl __asan_report_load2
.globl __asan_report_store4
.globl __asan_report_load4
.globl __asan_report_store8
.globl __asan_report_load8
.globl __asan_report_store16
.globl __asan_report_load16
#endif // defined(__x86_64__) || defined(__i386__)
#if defined(__i386__)
// Sanitize 1-byte store. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_store1
.type __sanitizer_sanitize_store1, @function
__sanitizer_sanitize_store1:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_store1_done
movl %eax, %edx
andl $0x7, %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_store1_done
pushl %eax
cld
emms
call __asan_report_store1@PLT
.sanitize_store1_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 1-byte load. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_load1
.type __sanitizer_sanitize_load1, @function
__sanitizer_sanitize_load1:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_load1_done
movl %eax, %edx
andl $0x7, %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_load1_done
pushl %eax
cld
emms
call __asan_report_load1@PLT
.sanitize_load1_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 2-byte store. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_store2
.type __sanitizer_sanitize_store2, @function
__sanitizer_sanitize_store2:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_store2_done
movl %eax, %edx
andl $0x7, %edx
incl %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_store2_done
pushl %eax
cld
emms
call __asan_report_store2@PLT
.sanitize_store2_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 2-byte load. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_load2
.type __sanitizer_sanitize_load2, @function
__sanitizer_sanitize_load2:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_load2_done
movl %eax, %edx
andl $0x7, %edx
incl %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_load2_done
pushl %eax
cld
emms
call __asan_report_load2@PLT
.sanitize_load2_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 4-byte store. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_store4
.type __sanitizer_sanitize_store4, @function
__sanitizer_sanitize_store4:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_store4_done
movl %eax, %edx
andl $0x7, %edx
addl $0x3, %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_store4_done
pushl %eax
cld
emms
call __asan_report_store4@PLT
.sanitize_store4_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 4-byte load. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_load4
.type __sanitizer_sanitize_load4, @function
__sanitizer_sanitize_load4:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushl %edx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
movb 0x20000000(%ecx), %cl
testb %cl, %cl
je .sanitize_load4_done
movl %eax, %edx
andl $0x7, %edx
addl $0x3, %edx
movsbl %cl, %ecx
cmpl %ecx, %edx
jl .sanitize_load4_done
pushl %eax
cld
emms
call __asan_report_load4@PLT
.sanitize_load4_done:
popfl
popl %edx
popl %ecx
popl %eax
leave
ret
// Sanitize 8-byte store. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_store8
.type __sanitizer_sanitize_store8, @function
__sanitizer_sanitize_store8:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
cmpb $0x0, 0x20000000(%ecx)
je .sanitize_store8_done
pushl %eax
cld
emms
call __asan_report_store8@PLT
.sanitize_store8_done:
popfl
popl %ecx
popl %eax
leave
ret
// Sanitize 8-byte load. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_load8
.type __sanitizer_sanitize_load8, @function
__sanitizer_sanitize_load8:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
cmpb $0x0, 0x20000000(%ecx)
je .sanitize_load8_done
pushl %eax
cld
emms
call __asan_report_load8@PLT
.sanitize_load8_done:
popfl
popl %ecx
popl %eax
leave
ret
// Sanitize 16-byte store. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_store16
.type __sanitizer_sanitize_store16, @function
__sanitizer_sanitize_store16:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
cmpw $0x0, 0x20000000(%ecx)
je .sanitize_store16_done
pushl %eax
cld
emms
call __asan_report_store16@PLT
.sanitize_store16_done:
popfl
popl %ecx
popl %eax
leave
ret
// Sanitize 16-byte load. Takes one 4-byte address as an argument on
// stack, nothing is returned.
.globl __sanitizer_sanitize_load16
.type __sanitizer_sanitize_load16, @function
__sanitizer_sanitize_load16:
pushl %ebp
movl %esp, %ebp
pushl %eax
pushl %ecx
pushfl
movl 8(%ebp), %eax
movl %eax, %ecx
shrl $0x3, %ecx
cmpw $0x0, 0x20000000(%ecx)
je .sanitize_load16_done
pushl %eax
cld
emms
call __asan_report_load16@PLT
.sanitize_load16_done:
popfl
popl %ecx
popl %eax
leave
ret
#endif // defined(__i386__)
#if defined(__x86_64__)
// Sanitize 1-byte store. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_store1
.type __sanitizer_sanitize_store1, @function
__sanitizer_sanitize_store1:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_store1_done
movl %edi, %ecx
andl $0x7, %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_store1_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_store1@PLT
.sanitize_store1_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 1-byte load. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_load1
.type __sanitizer_sanitize_load1, @function
__sanitizer_sanitize_load1:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_load1_done
movl %edi, %ecx
andl $0x7, %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_load1_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_load1@PLT
.sanitize_load1_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 2-byte store. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_store2
.type __sanitizer_sanitize_store2, @function
__sanitizer_sanitize_store2:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_store2_done
movl %edi, %ecx
andl $0x7, %ecx
incl %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_store2_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_store2@PLT
.sanitize_store2_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 2-byte load. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_load2
.type __sanitizer_sanitize_load2, @function
__sanitizer_sanitize_load2:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_load2_done
movl %edi, %ecx
andl $0x7, %ecx
incl %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_load2_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_load2@PLT
.sanitize_load2_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 4-byte store. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_store4
.type __sanitizer_sanitize_store4, @function
__sanitizer_sanitize_store4:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_store4_done
movl %edi, %ecx
andl $0x7, %ecx
addl $0x3, %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_store4_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_store4@PLT
.sanitize_store4_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 4-byte load. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_load4
.type __sanitizer_sanitize_load4, @function
__sanitizer_sanitize_load4:
leaq -128(%rsp), %rsp
pushq %rax
pushq %rcx
pushfq
movq %rdi, %rax
shrq $0x3, %rax
movb 0x7fff8000(%rax), %al
test %al, %al
je .sanitize_load4_done
movl %edi, %ecx
andl $0x7, %ecx
addl $0x3, %ecx
movsbl %al, %eax
cmpl %eax, %ecx
jl .sanitize_load4_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_load4@PLT
.sanitize_load4_done:
popfq
popq %rcx
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 8-byte store. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_store8
.type __sanitizer_sanitize_store8, @function
__sanitizer_sanitize_store8:
leaq -128(%rsp), %rsp
pushq %rax
pushfq
movq %rdi, %rax
shrq $0x3, %rax
cmpb $0x0, 0x7fff8000(%rax)
je .sanitize_store8_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_store8@PLT
.sanitize_store8_done:
popfq
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 8-byte load. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_load8
.type __sanitizer_sanitize_load8, @function
__sanitizer_sanitize_load8:
leaq -128(%rsp), %rsp
pushq %rax
pushfq
movq %rdi, %rax
shrq $0x3, %rax
cmpb $0x0, 0x7fff8000(%rax)
je .sanitize_load8_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_load8@PLT
.sanitize_load8_done:
popfq
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 16-byte store. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_store16
.type __sanitizer_sanitize_store16, @function
__sanitizer_sanitize_store16:
leaq -128(%rsp), %rsp
pushq %rax
pushfq
movq %rdi, %rax
shrq $0x3, %rax
cmpw $0x0, 0x7fff8000(%rax)
je .sanitize_store16_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_store16@PLT
.sanitize_store16_done:
popfq
popq %rax
leaq 128(%rsp), %rsp
ret
// Sanitize 16-byte load. Takes one 8-byte address as an argument in %rdi,
// nothing is returned.
.globl __sanitizer_sanitize_load16
.type __sanitizer_sanitize_load16, @function
__sanitizer_sanitize_load16:
leaq -128(%rsp), %rsp
pushq %rax
pushfq
movq %rdi, %rax
shrq $0x3, %rax
cmpw $0x0, 0x7fff8000(%rax)
je .sanitize_load16_done
subq $8, %rsp
andq $-16, %rsp
cld
emms
call __asan_report_load16@PLT
.sanitize_load16_done:
popfq
popq %rax
leaq 128(%rsp), %rsp
ret
#endif // defined(__x86_64__)
/* We do not need executable stack. */
#if defined(__arm__)
.section .note.GNU-stack,"",%progbits
#else
.section .note.GNU-stack,"",@progbits
#endif // defined(__arm__)
#endif // __linux__

View File

@ -18,6 +18,7 @@
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "sanitizer_common/sanitizer_interception.h"
// ----------------- Helper functions and macros --------------------- {{{1
extern "C" {
@ -113,7 +114,50 @@ static void *getRealProcAddressOrDie(const char *name) {
}
// }}}
// --------- Interface interception helper functions and macros ----------- {{{1
// We need to intercept the ASan interface exported by the DLL thunk and forward
// all the functions to the runtime in the main module.
// However, we don't want to keep two lists of interface functions.
// To avoid that, the list of interface functions should be defined using the
// INTERFACE_FUNCTION macro. Then, all the interface can be intercepted at once
// by calling INTERCEPT_ASAN_INTERFACE().
// Use macro+template magic to automatically generate the list of interface
// functions. Each interface function at line LINE defines a template class
// with a static InterfaceInteceptor<LINE>::Execute() method intercepting the
// function. The default implementation of InterfaceInteceptor<LINE> is to call
// the Execute() method corresponding to the previous line.
template<int LINE>
struct InterfaceInteceptor {
static void Execute() { InterfaceInteceptor<LINE-1>::Execute(); }
};
// There shouldn't be any interface function with negative line number.
template<>
struct InterfaceInteceptor<0> {
static void Execute() {}
};
#define INTERFACE_FUNCTION(name) \
extern "C" void name() { __debugbreak(); } \
template<> struct InterfaceInteceptor<__LINE__> { \
static void Execute() { \
void *wrapper = getRealProcAddressOrDie(#name); \
if (!__interception::OverrideFunction((uptr)name, (uptr)wrapper, 0)) \
abort(); \
InterfaceInteceptor<__LINE__-1>::Execute(); \
} \
};
// INTERCEPT_ASAN_INTERFACE must be used after the last INTERFACE_FUNCTION.
#define INTERCEPT_ASAN_INTERFACE InterfaceInteceptor<__LINE__>::Execute
static void InterceptASanInterface();
// }}}
// ----------------- ASan own interface functions --------------------
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
extern "C" {
@ -123,54 +167,75 @@ extern "C" {
// __asan_option_detect_stack_use_after_return afterwards.
void __asan_init_v3() {
typedef void (*fntype)();
static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
static fntype fn = 0;
if (fn) return;
fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
InterceptASanInterface();
}
}
WRAP_V_V(__asan_handle_no_return)
INTERFACE_FUNCTION(__asan_handle_no_return)
WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4)
WRAP_V_W(__asan_report_store8)
WRAP_V_W(__asan_report_store16)
WRAP_V_WW(__asan_report_store_n)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
INTERFACE_FUNCTION(__asan_report_store4)
INTERFACE_FUNCTION(__asan_report_store8)
INTERFACE_FUNCTION(__asan_report_store16)
INTERFACE_FUNCTION(__asan_report_store_n)
WRAP_V_W(__asan_report_load1)
WRAP_V_W(__asan_report_load2)
WRAP_V_W(__asan_report_load4)
WRAP_V_W(__asan_report_load8)
WRAP_V_W(__asan_report_load16)
WRAP_V_WW(__asan_report_load_n)
INTERFACE_FUNCTION(__asan_report_load1)
INTERFACE_FUNCTION(__asan_report_load2)
INTERFACE_FUNCTION(__asan_report_load4)
INTERFACE_FUNCTION(__asan_report_load8)
INTERFACE_FUNCTION(__asan_report_load16)
INTERFACE_FUNCTION(__asan_report_load_n)
WRAP_V_WW(__asan_register_globals)
WRAP_V_WW(__asan_unregister_globals)
INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
WRAP_W_WW(__asan_stack_malloc_0)
WRAP_W_WW(__asan_stack_malloc_1)
WRAP_W_WW(__asan_stack_malloc_2)
WRAP_W_WW(__asan_stack_malloc_3)
WRAP_W_WW(__asan_stack_malloc_4)
WRAP_W_WW(__asan_stack_malloc_5)
WRAP_W_WW(__asan_stack_malloc_6)
WRAP_W_WW(__asan_stack_malloc_7)
WRAP_W_WW(__asan_stack_malloc_8)
WRAP_W_WW(__asan_stack_malloc_9)
WRAP_W_WW(__asan_stack_malloc_10)
INTERFACE_FUNCTION(__asan_register_globals)
INTERFACE_FUNCTION(__asan_unregister_globals)
WRAP_V_WWW(__asan_stack_free_0)
WRAP_V_WWW(__asan_stack_free_1)
WRAP_V_WWW(__asan_stack_free_2)
WRAP_V_WWW(__asan_stack_free_4)
WRAP_V_WWW(__asan_stack_free_5)
WRAP_V_WWW(__asan_stack_free_6)
WRAP_V_WWW(__asan_stack_free_7)
WRAP_V_WWW(__asan_stack_free_8)
WRAP_V_WWW(__asan_stack_free_9)
WRAP_V_WWW(__asan_stack_free_10)
INTERFACE_FUNCTION(__asan_before_dynamic_init)
INTERFACE_FUNCTION(__asan_after_dynamic_init)
INTERFACE_FUNCTION(__asan_poison_stack_memory)
INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
INTERFACE_FUNCTION(__asan_poison_memory_region)
INTERFACE_FUNCTION(__asan_unpoison_memory_region)
INTERFACE_FUNCTION(__asan_get_current_fake_stack)
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
INTERFACE_FUNCTION(__asan_stack_malloc_0)
INTERFACE_FUNCTION(__asan_stack_malloc_1)
INTERFACE_FUNCTION(__asan_stack_malloc_2)
INTERFACE_FUNCTION(__asan_stack_malloc_3)
INTERFACE_FUNCTION(__asan_stack_malloc_4)
INTERFACE_FUNCTION(__asan_stack_malloc_5)
INTERFACE_FUNCTION(__asan_stack_malloc_6)
INTERFACE_FUNCTION(__asan_stack_malloc_7)
INTERFACE_FUNCTION(__asan_stack_malloc_8)
INTERFACE_FUNCTION(__asan_stack_malloc_9)
INTERFACE_FUNCTION(__asan_stack_malloc_10)
INTERFACE_FUNCTION(__asan_stack_free_0)
INTERFACE_FUNCTION(__asan_stack_free_1)
INTERFACE_FUNCTION(__asan_stack_free_2)
INTERFACE_FUNCTION(__asan_stack_free_4)
INTERFACE_FUNCTION(__asan_stack_free_5)
INTERFACE_FUNCTION(__asan_stack_free_6)
INTERFACE_FUNCTION(__asan_stack_free_7)
INTERFACE_FUNCTION(__asan_stack_free_8)
INTERFACE_FUNCTION(__asan_stack_free_9)
INTERFACE_FUNCTION(__asan_stack_free_10)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@ -190,7 +255,16 @@ WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
void InterceptASanInterface() {
INTERCEPT_ASAN_INTERFACE();
}
#endif // ASAN_DLL_THUNK

View File

@ -40,21 +40,32 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
uptr size = RequiredSize(stack_size_log);
FakeStack *res = reinterpret_cast<FakeStack *>(
MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
: MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
if (common_flags()->verbosity) {
u8 *p = reinterpret_cast<u8 *>(res);
Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
}
u8 *p = reinterpret_cast<u8 *>(res);
VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
size >> 10, flags()->uar_noreserve);
return res;
}
void FakeStack::Destroy() {
void FakeStack::Destroy(int tid) {
PoisonAll(0);
UnmapOrDie(this, RequiredSize(stack_size_log_));
if (common_flags()->verbosity >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id));
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
UnmapOrDie(this, size);
}
void FakeStack::PoisonAll(u8 magic) {
@ -91,7 +102,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
return 0; // We are out of fake stack.
}
uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
@ -101,7 +112,10 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
return base + pos * BytesInSizeClass(class_id);
uptr res = base + pos * BytesInSizeClass(class_id);
*frame_end = res + BytesInSizeClass(class_id);
*frame_beg = res + sizeof(FakeFrame);
return res;
}
void FakeStack::HandleNoReturn() {
@ -195,14 +209,15 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
return __asan::OnMalloc(class_id, size, real_stack); \
return OnMalloc(class_id, size, real_stack); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
__asan::OnFree(ptr, class_id, size, real_stack); \
OnFree(ptr, class_id, size, real_stack); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@ -216,3 +231,23 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end) {
FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
if (!fs) return 0;
uptr frame_beg, frame_end;
FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
if (!frame) return 0;
if (frame->magic != kCurrentStackFrameMagic)
return 0;
if (beg) *beg = reinterpret_cast<void*>(frame_beg);
if (end) *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack);
}
} // extern "C"

View File

@ -63,7 +63,7 @@ class FakeStack {
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
void Destroy();
void Destroy(int tid);
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
@ -127,7 +127,11 @@ class FakeStack {
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
uptr AddrIsInFakeStack(uptr addr);
uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
USED uptr AddrIsInFakeStack(uptr addr) {
uptr t1, t2;
return AddrIsInFakeStack(addr, &t1, &t2);
}
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {

View File

@ -26,88 +26,42 @@
namespace __asan {
struct Flags {
// Size (in bytes) of quarantine used to detect use-after-free errors.
// Lower value may reduce memory usage but increase the chance of
// false negatives.
// Flag descriptions are in asan_rtl.cc.
int quarantine_size;
// Size (in bytes) of redzones around heap objects.
// Requirement: redzone >= 32, is a power of two.
int redzone;
// If set, prints some debugging information and does additional checks.
int max_redzone;
bool debug;
// Controls the way to handle globals (0 - don't detect buffer overflow
// on globals, 1 - detect buffer overflow, 2 - print data about registered
// globals).
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
bool replace_str;
// If set, uses custom wrappers for memset/memcpy/memmove intinsics.
bool replace_intrin;
// Used on Mac only.
bool mac_ignore_invalid_free;
// Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
// The minimal fake stack size log.
int uar_stack_size_log;
// ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
// that will be filled with malloc_fill_byte on malloc.
int min_uar_stack_size_log;
int max_uar_stack_size_log;
bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
// Override exit status if something was reported.
int exitcode;
// If set, user may manually mark memory regions as poisoned or unpoisoned.
bool allow_user_poisoning;
// Number of seconds to sleep between printing an error report and
// terminating application. Useful for debug purposes (when one needs
// to attach gdb, for example).
int sleep_before_dying;
// If set, registers ASan custom segv handler.
bool handle_segv;
// If set, allows user register segv handler even if ASan registers one.
bool allow_user_segv_handler;
// If set, uses alternate stack for signal handling.
bool use_sigaltstack;
// Allow the users to work around the bug in Nvidia drivers prior to 295.*.
bool check_malloc_usable_size;
// If set, explicitly unmaps (huge) shadow at exit.
bool unmap_shadow_on_exit;
// If set, calls abort() instead of _exit() after printing an error report.
bool abort_on_error;
// Print various statistics after printing an error message or if atexit=1.
bool print_stats;
// Print the legend for the shadow bytes.
bool print_legend;
// If set, prints ASan exit stats even after program terminates successfully.
bool atexit;
// If set, coverage information will be dumped at shutdown time if the
// appropriate instrumentation was enabled.
bool coverage;
// By default, disable core dumper on 64-bit - it makes little sense
// to dump 16T+ core.
bool disable_core;
// Allow the tool to re-exec the program. This may interfere badly with the
// debugger.
bool allow_reexec;
// If set, prints not only thread creation stacks for threads in error report,
// but also thread creation stacks for threads that created those threads,
// etc. up to main thread.
bool print_full_thread_history;
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
// If true, poison partially addressable 8-byte aligned words (default=true).
// This flag affects heap and global buffers, but not stack buffers.
bool poison_partial;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2.
bool strict_memcmp;
// If true, assume that dynamic initializers can never access globals from
// other modules, even if the latter are already initialized.
bool strict_init_order;
bool start_deactivated;
int detect_invalid_pointer_pairs;
bool detect_container_overflow;
int detect_odr_violation;
};
extern Flags asan_flags_dont_use_directly;

View File

@ -90,6 +90,19 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->detect_odr_violation) {
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
// where two globals with the same name are defined in different modules.
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size))
ReportODRViolation(g, l->g);
}
}
}
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;

View File

@ -1,77 +0,0 @@
//===-- asan_intercepted_functions.h ----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header containing prototypes for wrapper functions and wrappers
//===----------------------------------------------------------------------===//
#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
#define ASAN_INTERCEPTED_FUNCTIONS_H
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_MLOCKX 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_MLOCKX 0
#endif
#if SANITIZER_LINUX
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_ATEXIT 1
#else
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
#endif // ASAN_INTERCEPTED_FUNCTIONS_H

View File

@ -12,14 +12,12 @@
#include "asan_interceptors.h"
#include "asan_allocator.h"
#include "asan_intercepted_functions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@ -43,6 +41,10 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
if (__offset > __offset + __size) { \
GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
} \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
GET_CURRENT_PC_BP_SP; \
@ -70,13 +72,6 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} \
} while (0)
#define ENSURE_ASAN_INITED() do { \
CHECK(!asan_init_is_running); \
if (!asan_inited) { \
__asan_init(); \
} \
} while (0)
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN
if (REAL(strnlen) != 0) {
@ -106,11 +101,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \
do { \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name)) && \
common_flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
#define ASAN_INTERCEPT_FUNC(name) \
do { \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
@ -118,19 +112,18 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
if (asan_init_is_running) return REAL(func)(__VA_ARGS__); \
ctx = 0; \
(void) ctx; \
if (SANITIZER_MAC && !asan_inited) return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
if (asan_init_is_running) \
return REAL(func)(__VA_ARGS__); \
ctx = 0; \
(void) ctx; \
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
@ -194,20 +187,41 @@ INTERCEPTOR(int, pthread_create, void *thread,
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
}
#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
}
#endif
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
}
namespace __sanitizer {
int real_sigaction(int signum, const void *act, void *oldact) {
return REAL(sigaction)(signum,
(struct sigaction *)act, (struct sigaction *)oldact);
}
} // namespace __sanitizer
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
@ -284,10 +298,9 @@ static void MlockIsUnsupported() {
static bool printed = false;
if (printed) return;
printed = true;
if (common_flags()->verbosity > 0) {
Printf("INFO: AddressSanitizer ignores "
"mlock/mlockall/munlock/munlockall\n");
}
VPrintf(1,
"INFO: AddressSanitizer ignores "
"mlock/mlockall/munlock/munlockall\n");
}
INTERCEPTOR(int, mlock, const void *addr, uptr len) {
@ -315,7 +328,7 @@ static inline int CharCmp(unsigned char c1, unsigned char c2) {
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
if (!asan_inited) return internal_memcmp(a1, a2, size);
if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
@ -342,24 +355,8 @@ INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
return REAL(memcmp(a1, a2, size));
}
#define MEMMOVE_BODY { \
if (!asan_inited) return internal_memmove(to, from, size); \
if (asan_init_is_running) { \
return REAL(memmove)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(from, size); \
ASAN_WRITE_RANGE(to, size); \
} \
return internal_memmove(to, from, size); \
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) MEMMOVE_BODY
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
#if !SANITIZER_MAC
if (!asan_inited) return internal_memcpy(to, from, size);
void *__asan_memcpy(void *to, const void *from, uptr size) {
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
// memcpy is called during __asan_init() from the internals
// of printf(...).
if (asan_init_is_running) {
@ -375,23 +372,11 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8, so
// calling REAL(memcpy) here leads to infinite recursion.
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
return internal_memcpy(to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
// calls. If we just disable error reporting with
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
MEMMOVE_BODY
#endif // !SANITIZER_MAC
return REAL(memcpy)(to, from, size);
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
if (!asan_inited) return internal_memset(block, c, size);
void *__asan_memset(void *block, int c, uptr size) {
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
// memset is called inside Printf.
if (asan_init_is_running) {
return REAL(memset)(block, c, size);
@ -403,8 +388,41 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
return REAL(memset)(block, c, size);
}
void *__asan_memmove(void *to, const void *from, uptr size) {
if (UNLIKELY(!asan_inited))
return internal_memmove(to, from, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
return internal_memmove(to, from, size);
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
return __asan_memmove(to, from, size);
}
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
#if !SANITIZER_MAC
return __asan_memcpy(to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
// calls. If we just disable error reporting with
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
return __asan_memmove(to, from, size);
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
return __asan_memset(block, c, size);
}
INTERCEPTOR(char*, strchr, const char *str, int c) {
if (!asan_inited) return internal_strchr(str, c);
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
if (asan_init_is_running) {
@ -473,7 +491,7 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if SANITIZER_MAC
if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT
if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif
// strcpy is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
@ -492,7 +510,7 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
if (!asan_inited) return internal_strdup(s);
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
@ -506,7 +524,7 @@ INTERCEPTOR(char*, strdup, const char *s) {
#endif
INTERCEPTOR(uptr, strlen, const char *s) {
if (!asan_inited) return internal_strlen(s);
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
@ -588,7 +606,7 @@ INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
INTERCEPTOR(int, atoi, const char *nptr) {
#if SANITIZER_MAC
if (!asan_inited) return REAL(atoi)(nptr);
if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@ -607,7 +625,7 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
#if SANITIZER_MAC
if (!asan_inited) return REAL(atol)(nptr);
if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
@ -664,7 +682,7 @@ static void AtCxaAtexit(void *unused) {
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
if (!asan_inited) return REAL(__cxa_atexit)(func, arg, dso_handle);
if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
#endif
ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@ -705,7 +723,7 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
was_called_once = true;
SANITIZER_COMMON_INTERCEPTORS_INIT;
InitializeCommonInterceptors();
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp);
@ -753,8 +771,12 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
ASAN_INTERCEPT_FUNC(sigaction);
#if SANITIZER_ANDROID
ASAN_INTERCEPT_FUNC(bsd_signal);
#else
ASAN_INTERCEPT_FUNC(signal);
#endif
#endif
#if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
#endif
@ -785,9 +807,7 @@ void InitializeAsanInterceptors() {
InitializeWindowsInterceptors();
#endif
if (common_flags()->verbosity > 0) {
Report("AddressSanitizer: libc interceptors initialized\n");
}
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
} // namespace __asan

View File

@ -13,7 +13,68 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_MLOCKX 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_MLOCKX 0
#endif
#if SANITIZER_FREEBSD || SANITIZER_LINUX
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_ATEXIT 1
#else
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
@ -31,6 +92,13 @@ namespace __asan {
void InitializeAsanInterceptors();
#define ENSURE_ASAN_INITED() do { \
CHECK(!asan_init_is_running); \
if (UNLIKELY(!asan_inited)) { \
AsanInitFromRtl(); \
} \
} while (0)
} // namespace __asan
#endif // ASAN_INTERCEPTORS_H

View File

@ -20,7 +20,7 @@ using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
// Everytime the asan ABI changes we also change the version number in this
// Every time the asan ABI changes we also change the version number in this
// name. Objects build with incompatible asan ABI version
// will not link with run-time.
// Changes between ABI versions:
@ -75,7 +75,7 @@ extern "C" {
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
bool __asan_address_is_poisoned(void const volatile *addr);
int __asan_address_is_poisoned(void const volatile *addr);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
@ -85,7 +85,7 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, bool is_write, uptr access_size);
uptr addr, int is_write, uptr access_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
@ -97,14 +97,10 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE int __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
@ -123,6 +119,29 @@ extern "C" {
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr *__asan_test_only_reported_buggy_pointer;
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memmove(void* dest, const void* src, uptr n);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H

View File

@ -28,26 +28,11 @@
// Build-time configuration options.
// If set, asan will install its own SEGV signal handler.
#ifndef ASAN_NEEDS_SEGV
# if SANITIZER_ANDROID == 1
# define ASAN_NEEDS_SEGV 0
# else
# define ASAN_NEEDS_SEGV 1
# endif
#endif
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
# define ASAN_HAS_EXCEPTIONS 1
#endif
// If set, asan uses the values of SHADOW_SCALE and SHADOW_OFFSET
// provided by the instrumented objects. Otherwise constants are used.
#ifndef ASAN_FLEXIBLE_MAPPING_AND_OFFSET
# define ASAN_FLEXIBLE_MAPPING_AND_OFFSET 0
#endif
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
@ -62,32 +47,41 @@
# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
#endif
#ifndef ASAN_DYNAMIC
# ifdef PIC
# define ASAN_DYNAMIC 1
# else
# define ASAN_DYNAMIC 0
# endif
#endif
// All internal functions in asan reside inside the __asan namespace
// to avoid namespace collisions with the user programs.
// Seperate namespace also makes it simpler to distinguish the asan run-time
// Separate namespace also makes it simpler to distinguish the asan run-time
// functions from the instrumented user code in a profile.
namespace __asan {
class AsanThread;
using __sanitizer::StackTrace;
void AsanInitFromRtl();
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
void ReplaceOperatorsNewAndDelete();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
void MaybeReexec();
bool AsanInterceptsSignal(int signum);
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@ -100,7 +94,9 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
// Platfrom-specific options.
void ParseExtraActivationFlags();
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \

View File

@ -11,11 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@ -30,12 +31,41 @@
#include <unistd.h>
#include <unwind.h>
#if !SANITIZER_ANDROID
// FIXME: where to get ucontext on Android?
#include <sys/ucontext.h>
#if SANITIZER_FREEBSD
#include <sys/link_elf.h>
#endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD
#include <ucontext.h>
extern "C" void* _DYNAMIC;
#else
#include <sys/ucontext.h>
#include <dlfcn.h>
#include <link.h>
#endif
// x86_64 FreeBSD 9.2 and older define 64-bit register names in both 64-bit
// and 32-bit modes.
#if SANITIZER_FREEBSD
#include <sys/param.h>
# if __FreeBSD_version <= 902001 // v9.2
# define mc_eip mc_rip
# define mc_ebp mc_rbp
# define mc_esp mc_rsp
# endif
#endif
typedef enum {
ASAN_RT_VERSION_UNDEFINED = 0,
ASAN_RT_VERSION_DYNAMIC,
ASAN_RT_VERSION_STATIC,
} asan_rt_version_t;
// FIXME: perhaps also store abi version here?
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
asan_rt_version_t __asan_rt_version;
}
namespace __asan {
@ -48,38 +78,115 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if SANITIZER_ANDROID
*pc = *sp = *bp = 0;
#elif defined(__arm__)
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
#else
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
void *data) {
// Continue until the first dynamic library is found
if (!info->dlpi_name || info->dlpi_name[0] == 0)
return 0;
*(const char **)data = info->dlpi_name;
return 1;
}
static bool IsDynamicRTName(const char *libname) {
return internal_strstr(libname, "libclang_rt.asan") ||
internal_strstr(libname, "libasan.so");
}
static void ReportIncompatibleRT() {
Report("Your application is linked against incompatible ASan runtimes.\n");
Die();
}
void AsanCheckDynamicRTPrereqs() {
// Ensure that dynamic RT is the first DSO in the list
const char *first_dso_name = 0;
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
Report("ASan runtime does not come first in initial library list; "
"you should either link runtime to your application or "
"manually preload it with LD_PRELOAD.\n");
Die();
}
}
void AsanCheckIncompatibleRT() {
if (ASAN_DYNAMIC) {
if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
__asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
} else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
ReportIncompatibleRT();
}
} else {
if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
// Ensure that dynamic runtime is not present. We should detect it
// as early as possible, otherwise ASan interceptors could bind to
// the functions in dynamic ASan runtime instead of the functions in
// system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128];
while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) {
if (IsDynamicRTName(filename)) {
Report("Your application is linked against "
"incompatible ASan runtimes.\n");
Die();
}
}
__asan_rt_version = ASAN_RT_VERSION_STATIC;
} else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
ReportIncompatibleRT();
}
}
}
#endif // SANITIZER_ANDROID
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
# elif defined(__hppa__)
#elif defined(__aarch64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
# elif defined(__x86_64__)
#elif defined(__x86_64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
# elif defined(__i386__)
# endif
#elif defined(__i386__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
# elif defined(__powerpc__) || defined(__powerpc64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
# elif defined(__sparc__)
# endif
#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
# if defined (__arch64__)
@ -93,7 +200,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
# elif defined(__mips__)
#elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
@ -104,7 +211,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
}
bool AsanInterceptsSignal(int signum) {
return signum == SIGSEGV && flags()->handle_segv;
return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@ -125,4 +232,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
} // namespace __asan
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -15,12 +15,12 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mac.h"
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mac.h"
#include <crt_externs.h> // for _NSGetArgv
#include <dlfcn.h> // for dladdr()
@ -51,43 +51,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
switch (version[0]) {
case '9': return MACOS_VERSION_LEOPARD;
case '1': {
switch (version[1]) {
case '0': return MACOS_VERSION_SNOW_LEOPARD;
case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
default: return MACOS_VERSION_UNKNOWN;
}
}
default: return MACOS_VERSION_UNKNOWN;
}
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
}
return result;
}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
@ -172,12 +135,10 @@ void MaybeReexec() {
// Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
}
if (common_flags()->verbosity >= 1) {
Report("exec()-ing the program with\n");
Report("%s=%s\n", kDyldInsertLibraries, new_env);
Report("to enable ASan wrappers.\n");
Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
}
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
@ -236,8 +197,15 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
// No-op. Mac does not support static linkage anyway.
void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
bool AsanInterceptsSignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && flags()->handle_segv;
return (signum == SIGSEGV || signum == SIGBUS) &&
common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
@ -309,11 +277,10 @@ extern "C"
void asan_dispatch_call_block_and_release(void *block) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
if (common_flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
}
VReport(2,
"asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
block, pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@ -347,10 +314,10 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
if (common_flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
asan_dispatch_call_block_and_release); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
asan_dispatch_call_block_and_release); \
}
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
@ -386,7 +353,6 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
#if !defined(MISSING_BLOCKS_SUPPORT)
extern "C" {
// FIXME: consolidate these declarations with asan_intercepted_functions.h.
void dispatch_async(dispatch_queue_t dq, void(^work)(void));
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
void(^work)(void));

View File

@ -1,57 +0,0 @@
//===-- asan_mac.h ----------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Mac-specific ASan definitions.
//===----------------------------------------------------------------------===//
#ifndef ASAN_MAC_H
#define ASAN_MAC_H
// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
// and subject to change in further CoreFoundation versions. Apple does not
// guarantee any binary compatibility from release to release.
// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
#if defined(__BIG_ENDIAN__)
#define CF_RC_BITS 0
#endif
#if defined(__LITTLE_ENDIAN__)
#define CF_RC_BITS 3
#endif
// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
typedef struct __CFRuntimeBase {
uptr _cfisa;
u8 _cfinfo[4];
#if __LP64__
u32 _rc;
#endif
} CFRuntimeBase;
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LEOPARD,
MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS
};
// Used by asan_malloc_mac.cc and asan_mac.cc
extern "C" void __CFInitialize();
namespace __asan {
MacosVersion GetMacosVersion();
void MaybeReplaceCFAllocator();
} // namespace __asan
#endif // ASAN_MAC_H

View File

@ -13,8 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
@ -74,7 +75,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const uptr kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@ -99,8 +100,12 @@ INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
ALIAS("memalign");
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
DTLS_on_libc_memalign(res, size * boundary);
return res;
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
@ -146,4 +151,4 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -22,10 +22,10 @@
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mac.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
@ -39,7 +39,7 @@ static malloc_zone_t asan_zone;
INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
@ -58,34 +58,34 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
}
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
// FIXME: ASan should support purgeable allocations.
// https://code.google.com/p/address-sanitizer/issues/detail?id=139
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return &asan_zone;
}
INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
}
INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
// FIXME: ASan should support purgeable allocations. Ignoring them is fine
// for now.
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
// Must return 0 if the contents were not purged since the last call to
// malloc_make_purgeable().
return 0;
}
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen);
@ -100,44 +100,44 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
}
INTERCEPTOR(void *, malloc, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
void *res = asan_malloc(size, &stack);
return res;
}
INTERCEPTOR(void, free, void *ptr) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
if (!ptr) return;
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, valloc, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
}
INTERCEPTOR(size_t, malloc_good_size, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
return asan_zone.introspect->good_size(&asan_zone, size);
}
INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
if (!asan_inited) __asan_init();
ENSURE_ASAN_INITED();
CHECK(memptr);
GET_STACK_TRACE_MALLOC;
void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC);
@ -157,7 +157,7 @@ size_t mz_size(malloc_zone_t* zone, const void* ptr) {
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
@ -166,7 +166,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@ -182,7 +182,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
}
void *mz_valloc(malloc_zone_t *zone, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
@ -240,7 +240,7 @@ void mz_destroy(malloc_zone_t* zone) {
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (!asan_inited) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}

View File

@ -17,7 +17,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_interception.h"
#include <stddef.h>
@ -101,6 +101,21 @@ size_t _msize(void *ptr) {
return asan_malloc_usable_size(ptr, pc, bp);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_expand(void *memblock, size_t size) {
// _expand is used in realloc-like functions to resize the buffer if possible.
// We don't want memory to stand still while resizing buffers, so return 0.
return 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_expand_dbg(void *memblock, size_t size) {
return 0;
}
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
int _CrtDbgReport(int, const char*, int,
const char*, const char*, ...) {
ShowStatsAndAbort();

View File

@ -41,54 +41,81 @@
// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
//
// Default Linux/i386 mapping:
// Default Linux/i386 mapping on x86_64 machine:
// || `[0x40000000, 0xffffffff]` || HighMem ||
// || `[0x28000000, 0x3fffffff]` || HighShadow ||
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/i386 mapping on i386 machine
// (addresses starting with 0xc0000000 are reserved
// for kernel and thus not sanitized):
// || `[0x38000000, 0xbfffffff]` || HighMem ||
// || `[0x27000000, 0x37ffffff]` || HighShadow ||
// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
//
// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
// || `[0x60000000, 0xffffffff]` || HighMem ||
// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset)
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
#else
# define SHADOW_SCALE kDefaultShadowScale
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# else
# if SANITIZER_WORDSIZE == 32
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# if SANITIZER_WORDSIZE == 32
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# else
# if defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# if SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# endif
# else
# if defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
#endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET
#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))

View File

@ -14,20 +14,14 @@
#include "asan_internal.h"
#include "asan_stack.h"
#include <stddef.h>
#include "sanitizer_common/sanitizer_interception.h"
namespace __asan {
// This function is a no-op. We need it to make sure that object file
// with our replacements will actually be loaded from static ASan
// run-time library at link-time.
void ReplaceOperatorsNewAndDelete() { }
}
#include <stddef.h>
using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors.
// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !SANITIZER_ANDROID
// This code has issues on OSX.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
@ -46,6 +40,15 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
// FreeBSD prior v9.2 have wrong definition of 'size_t'.
// http://svnweb.freebsd.org/base?view=revision&revision=232261
#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
#include <sys/param.h>
#if __FreeBSD_version <= 902001 // v9.2
#define size_t unsigned
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
@ -78,15 +81,21 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
void operator delete[](void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
void operator delete(void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
@ -102,5 +111,3 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
#endif

View File

@ -11,6 +11,8 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
@ -48,6 +50,15 @@ struct ShadowSegmentEndpoint {
}
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@ -67,10 +78,8 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (common_flags()->verbosity >= 1) {
Printf("Trying to poison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@ -109,10 +118,8 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size;
if (common_flags()->verbosity >= 1) {
Printf("Trying to unpoison memory region [%p, %p)\n",
(void*)beg_addr, (void*)end_addr);
}
VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
@ -137,7 +144,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
}
bool __asan_address_is_poisoned(void const volatile *addr) {
int __asan_address_is_poisoned(void const volatile *addr) {
return __asan::AddressIsPoisoned((uptr)addr);
}
@ -146,6 +153,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr end = beg + size;
if (!AddrIsInMem(beg)) return beg;
if (!AddrIsInMem(end)) return end;
CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
@ -243,14 +251,12 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
if (common_flags()->verbosity > 0)
Report("poisoning: %p %zx\n", (void*)addr, size);
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
}
void __asan_unpoison_stack_memory(uptr addr, uptr size) {
if (common_flags()->verbosity > 0)
Report("unpoisoning: %p %zx\n", (void*)addr, size);
VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, false);
}
@ -258,33 +264,40 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
if (common_flags()->verbosity >= 2)
Printf("contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
if (!flags()->detect_container_overflow) return;
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end= reinterpret_cast<uptr>(end_p);
uptr end = reinterpret_cast<uptr>(end_p);
uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
uptr granularity = SHADOW_GRANULARITY;
CHECK(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity));
if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
IsAligned(beg, granularity))) {
GET_STACK_TRACE_FATAL_HERE;
ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
&stack);
}
CHECK_LE(end - beg,
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
uptr d1 = RoundDownTo(old_mid, granularity);
uptr d2 = RoundUpTo(old_mid, granularity);
// uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
// Make a quick sanity check that we are indeed in this state.
if (d1 != d2)
CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
//
// FIXME: Two of these three checks are disabled until we fix
// https://code.google.com/p/address-sanitizer/issues/detail?id=258.
// if (d1 != d2)
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
CHECK_EQ(*(u8*)MemToShadow(a), 0);
if (d2 + granularity <= c && c <= end)
CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
kAsanContiguousContainerOOBMagic);
// if (d2 + granularity <= c && c <= end)
// CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
// kAsanContiguousContainerOOBMagic);
uptr b1 = RoundDownTo(new_mid, granularity);
uptr b2 = RoundUpTo(new_mid, granularity);
@ -297,3 +310,42 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
*(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
}
}
int __sanitizer_verify_contiguous_container(const void *beg_p,
const void *mid_p,
const void *end_p) {
if (!flags()->detect_container_overflow) return 1;
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end = reinterpret_cast<uptr>(end_p);
uptr mid = reinterpret_cast<uptr>(mid_p);
CHECK_LE(beg, mid);
CHECK_LE(mid, end);
// Check some bytes starting from beg, some bytes around mid, and some bytes
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
uptr r1_end = Min(end + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
uptr r3_end = end;
for (uptr i = r1_beg; i < r1_end; i++)
if (AddressIsPoisoned(i))
return 0;
for (uptr i = r2_beg; i < mid; i++)
if (AddressIsPoisoned(i))
return 0;
for (uptr i = mid; i < r2_end; i++)
if (!AddressIsPoisoned(i))
return 0;
for (uptr i = r3_beg; i < r3_end; i++)
if (!AddressIsPoisoned(i))
return 0;
return 1;
}
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool WordIsPoisoned(uptr addr) {
return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
}
}

View File

@ -13,6 +13,7 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@ -32,10 +33,35 @@ void PoisonShadowPartialRightRedzone(uptr addr,
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
// FIXME: Page states are different on Windows, so using the same interface
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
// For now, just memset on Windows.
if (value ||
SANITIZER_WINDOWS == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
CHECK_EQ(page_beg, res);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
@ -55,4 +81,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
// Calls __sanitizer::FlushUnneededShadowMemory() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan

View File

@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#if SANITIZER_POSIX
#include "asan_internal.h"
#include "asan_interceptors.h"
@ -28,70 +28,27 @@
#include <sys/resource.h>
#include <unistd.h>
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
namespace __asan {
static void MaybeInstallSigaction(int signum,
void (*handler)(int, siginfo_t *, void *)) {
if (!AsanInterceptsSignal(signum))
return;
struct sigaction sigact;
REAL(memset)(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = handler;
sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (common_flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum);
}
}
static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
uptr addr = (uptr)siginfo->si_addr;
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
uptr pc, sp, bp;
GetPcSpBp(context, &pc, &sp, &bp);
ReportSIGSEGV(pc, sp, bp, addr);
}
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
CHECK_EQ(0, sigaltstack(0, &oldstack));
// If the alternate stack is already in place, do nothing.
if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
// future. It is not required by man 2 sigaltstack now (they're using
// malloc()).
void* base = MmapOrDie(kAltStackSize, __FUNCTION__);
altstack.ss_sp = base;
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, 0));
if (common_flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n",
GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
}
}
void UnsetAlternateSignalStack() {
stack_t altstack, oldstack;
altstack.ss_sp = 0;
altstack.ss_flags = SS_DISABLE;
altstack.ss_size = 0;
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
void InstallSignalHandlers() {
// Set the alternate signal stack for the main thread.
// This will cause SetAlternateSignalStack to be called twice, but the stack
// will be actually set only once.
if (flags()->use_sigaltstack) SetAlternateSignalStack();
MaybeInstallSigaction(SIGSEGV, ASAN_OnSIGSEGV);
MaybeInstallSigaction(SIGBUS, ASAN_OnSIGSEGV);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 redzone, ARM push of multiple registers, etc) is probably a
// stack overflow.
// We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access.
if (addr + 128 > sp && addr < sp + 0xFFFF &&
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(pc, sp, bp, context, addr);
else
ReportSIGSEGV(pc, sp, bp, context, addr);
}
// ---------------------- TSD ---------------- {{{1
@ -125,4 +82,4 @@ void PlatformTSDDtor(void *tsd) {
}
} // namespace __asan
#endif // SANITIZER_LINUX || SANITIZER_MAC
#endif // SANITIZER_POSIX

View File

@ -43,11 +43,9 @@ void AppendToErrorMessageBuffer(const char *buffer) {
}
// ---------------------- Decorator ------------------------------ {{{1
class Decorator: private __sanitizer::AnsiColorDecorator {
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
const char *Warning() { return Red(); }
const char *EndWarning() { return Default(); }
Decorator() : SanitizerCommonDecorator() { }
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
@ -89,68 +87,77 @@ class Decorator: private __sanitizer::AnsiColorDecorator {
// ---------------------- Helper functions ----------------------- {{{1
static void PrintShadowByte(const char *before, u8 byte,
const char *after = "\n") {
static void PrintShadowByte(InternalScopedString *str, const char *before,
u8 byte, const char *after = "\n") {
Decorator d;
Printf("%s%s%x%x%s%s", before,
d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
str->append("%s%s%x%x%s%s", before, d.ShadowByte(byte), byte >> 4, byte & 15,
d.EndShadowByte(), after);
}
static void PrintShadowBytes(const char *before, u8 *bytes,
u8 *guilty, uptr n) {
static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
if (before)
Printf("%s%p:", before, bytes);
if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before = p == guilty ? "[" :
(p - 1 == guilty && i != 0) ? "" : " ";
const char *before =
p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
PrintShadowByte(before, *p, after);
PrintShadowByte(str, before, *p, after);
}
Printf("\n");
str->append("\n");
}
static void PrintLegend() {
Printf("Shadow byte legend (one shadow byte represents %d "
"application bytes):\n", (int)SHADOW_GRANULARITY);
PrintShadowByte(" Addressable: ", 0);
Printf(" Partially addressable: ");
for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
PrintShadowByte("", i, " ");
Printf("\n");
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
PrintShadowByte(" Contiguous container OOB:",
static void PrintLegend(InternalScopedString *str) {
str->append(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
(int)SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
str->append(" Partially addressable: ");
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
str->append("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Heap right redzone: ",
kAsanHeapRightRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(str, " Stack left redzone: ",
kAsanStackLeftRedzoneMagic);
PrintShadowByte(str, " Stack mid redzone: ",
kAsanStackMidRedzoneMagic);
PrintShadowByte(str, " Stack right redzone: ",
kAsanStackRightRedzoneMagic);
PrintShadowByte(str, " Stack partial redzone: ",
kAsanStackPartialRedzoneMagic);
PrintShadowByte(str, " Stack after return: ",
kAsanStackAfterReturnMagic);
PrintShadowByte(str, " Stack use after scope: ",
kAsanStackUseAfterScopeMagic);
PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(str, " Global init order: ",
kAsanInitializationOrderMagic);
PrintShadowByte(str, " Poisoned by user: ",
kAsanUserPoisonedMemoryMagic);
PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
}
static void PrintShadowMemoryForAddress(uptr addr) {
if (!AddrIsInMem(addr))
return;
if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
Printf("Shadow bytes around the buggy address:\n");
InternalScopedString str(4096 * 8);
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
PrintShadowBytes(prefix,
(u8*)(aligned_shadow + i * n_bytes_per_row),
(u8*)shadow_addr, n_bytes_per_row);
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
(u8 *)shadow_addr, n_bytes_per_row);
}
if (flags()->print_legend)
PrintLegend();
if (flags()->print_legend) PrintLegend(&str);
Printf("%s", str.data());
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@ -182,20 +189,25 @@ static bool IsASCII(unsigned char c) {
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
return (name[0] == '_' && name[1] == 'Z')
? Symbolizer::Get()->Demangle(name)
: name;
bool should_demangle = false;
if (name[0] == '_' && name[1] == 'Z')
should_demangle = true;
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
should_demangle = true;
return should_demangle ? Symbolizer::Get()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
static void PrintGlobalNameIfASCII(const __asan_global &g) {
static void PrintGlobalNameIfASCII(InternalScopedString *str,
const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char*)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char*)(g.beg + g.size - 1) != '\0') return;
Printf(" '%s' is ascii string '%s'\n",
MaybeDemangleGlobalName(g.name), (char*)g.beg);
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
(char *)g.beg);
}
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
@ -203,23 +215,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
static const uptr kMinimalDistanceFromAnotherGlobal = 64;
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
if (addr >= g.beg + g.size_with_redzone) return false;
InternalScopedString str(4096);
Decorator d;
Printf("%s", d.Location());
str.append("%s", d.Location());
if (addr < g.beg) {
Printf("%p is located %zd bytes to the left", (void*)addr, g.beg - addr);
str.append("%p is located %zd bytes to the left", (void *)addr,
g.beg - addr);
} else if (addr + size > g.beg + g.size) {
if (addr < g.beg + g.size)
addr = g.beg + g.size;
Printf("%p is located %zd bytes to the right", (void*)addr,
addr - (g.beg + g.size));
str.append("%p is located %zd bytes to the right", (void *)addr,
addr - (g.beg + g.size));
} else {
// Can it happen?
Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg);
str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
str.append(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
Printf("%s", d.EndLocation());
PrintGlobalNameIfASCII(g);
str.append("%s", d.EndLocation());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
return true;
}
@ -288,16 +303,18 @@ void PrintAccessAndVarIntersection(const char *var_name,
addr - prev_var_end >= var_beg - addr_end)
pos_descr = "underflows";
}
Printf(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
InternalScopedString str(1024);
str.append(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
Printf("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
} else {
Printf("\n");
str.append("\n");
}
Printf("%s", str.data());
}
struct StackVarDescr {
@ -346,7 +363,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
alloca_stack.trace[0] = frame_pc + 16;
alloca_stack.size = 1;
Printf("%s", d.EndLocation());
PrintStack(&alloca_stack);
alloca_stack.Print();
// Report the number of stack objects.
char *p;
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
@ -394,24 +411,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
uptr access_size) {
sptr offset;
Decorator d;
Printf("%s", d.Location());
InternalScopedString str(4096);
str.append("%s", d.Location());
if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
Printf("%p is located %zd bytes to the left of", (void*)addr, offset);
str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
} else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
if (offset < 0) {
addr -= offset;
offset = 0;
}
Printf("%p is located %zd bytes to the right of", (void*)addr, offset);
str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
} else if (chunk.AddrIsInside(addr, access_size, &offset)) {
Printf("%p is located %zd bytes inside of", (void*)addr, offset);
str.append("%p is located %zd bytes inside of", (void*)addr, offset);
} else {
Printf("%p is located somewhere around (this is AddressSanitizer bug!)",
(void*)addr);
str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
(void *)addr);
}
Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
(void*)(chunk.Beg()), (void*)(chunk.End()));
Printf("%s", d.EndLocation());
str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
(void *)(chunk.Beg()), (void *)(chunk.End()));
str.append("%s", d.EndLocation());
Printf("%s", str.data());
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
@ -439,7 +458,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
d.EndAllocation());
StackTrace free_stack;
chunk.GetFreeStack(&free_stack);
PrintStack(&free_stack);
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
@ -450,7 +469,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
PrintStack(&alloc_stack);
alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread)
DescribeThread(free_thread);
@ -481,15 +500,16 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
char tname[128];
Printf("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
Printf(" created by T%d%s here:\n",
context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid,
tname, sizeof(tname)));
InternalScopedString str(1024);
str.append("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
str.append(
" created by T%d%s here:\n", context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
Printf("%s", str.data());
uptr stack_size;
const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size);
PrintStack(stack_trace, stack_size);
StackTrace::PrintStack(stack_trace, stack_size);
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
@ -539,6 +559,8 @@ class ScopedInErrorReport {
NORETURN ~ScopedInErrorReport() {
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
// We may want to grab this lock again when printing stats.
asanThreadRegistry().Unlock();
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
@ -550,17 +572,33 @@ class ScopedInErrorReport {
}
};
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
" (pc %p sp %p bp %p T%d)\n",
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
GetCurrentTidOrInvalid());
Report(
"ERROR: AddressSanitizer: stack-overflow on address %p"
" (pc %p sp %p bp %p T%d)\n",
(void *)addr, (void *)pc, (void *)sp, (void *)bp,
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
GET_STACK_TRACE_SIGNAL(pc, bp, context);
stack.Print();
ReportErrorSummary("stack-overflow", &stack);
}
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: SEGV on unknown address %p"
" (pc %p sp %p bp %p T%d)\n",
(void *)addr, (void *)pc, (void *)sp, (void *)bp,
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
GET_STACK_TRACE_SIGNAL(pc, bp, context);
stack.Print();
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
}
@ -578,7 +616,7 @@ void ReportDoubleFree(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("double-free", &stack);
}
@ -595,7 +633,7 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-free", &stack);
}
@ -616,7 +654,7 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
PrintStack(&stack);
stack.Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("alloc-dealloc-mismatch", &stack);
Report("HINT: if you don't care about these warnings you may set "
@ -631,7 +669,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
"malloc_usable_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
PrintStack(stack);
stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-malloc_usable_size", stack);
}
@ -644,7 +682,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
"__asan_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
PrintStack(stack);
stack->Print();
DescribeHeapAddress(addr, 1);
ReportErrorSummary("bad-__asan_get_allocated_size", stack);
}
@ -661,12 +699,81 @@ void ReportStringFunctionMemoryRangesOverlap(
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
Printf("%s", d.EndWarning());
PrintStack(stack);
stack->Print();
DescribeAddress((uptr)offset1, length1);
DescribeAddress((uptr)offset2, length2);
ReportErrorSummary(bug_type, stack);
}
void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
StackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
const char *bug_type = "negative-size-param";
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
Printf("%s", d.EndWarning());
stack->Print();
DescribeAddress(offset, size);
ReportErrorSummary(bug_type, stack);
}
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
StackTrace *stack) {
ScopedInErrorReport in_report;
Report("ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_annotate_contiguous_container:\n"
" beg : %p\n"
" end : %p\n"
" old_mid : %p\n"
" new_mid : %p\n",
beg, end, old_mid, new_mid);
stack->Print();
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
}
void ReportODRViolation(const __asan_global *g1, const __asan_global *g2) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
Printf("%s", d.EndWarning());
Printf(" [1] size=%zd %s %s\n", g1->size, g1->name, g1->module_name);
Printf(" [2] size=%zd %s %s\n", g2->size, g2->name, g2->module_name);
Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
ReportErrorSummary("odr-violation", g1->module_name, 0, g1->name);
}
// ----------------------- CheckForInvalidPointerPair ----------- {{{1
static NOINLINE void
ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
Printf("%s", d.EndWarning());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
DescribeAddress(a1, 1);
DescribeAddress(a2, 1);
ReportErrorSummary("invalid-pointer-pair", &stack);
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
if (!flags()->detect_invalid_pointer_pairs) return;
uptr a1 = reinterpret_cast<uptr>(p1);
uptr a2 = reinterpret_cast<uptr>(p2);
AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
bool valid1 = chunk1.IsValid();
bool valid2 = chunk2.IsValid();
if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
GET_CALLER_PC_BP_SP; \
return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
}
}
// ----------------------- Mac-specific reports ----------------- {{{1
void WarnMacFreeUnallocated(
@ -676,7 +783,7 @@ void WarnMacFreeUnallocated(
"AddressSanitizer is ignoring this error on Mac OS now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
PrintStack(stack);
stack->Print();
DescribeHeapAddress(addr, 1);
}
@ -687,7 +794,7 @@ void ReportMacMzReallocUnknown(
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
PrintStack(stack);
stack->Print();
DescribeHeapAddress(addr, 1);
}
@ -698,7 +805,7 @@ void ReportMacCfReallocUnknown(
"This is an unrecoverable problem, exiting now.\n",
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
PrintStack(stack);
stack->Print();
DescribeHeapAddress(addr, 1);
}
@ -707,8 +814,8 @@ void ReportMacCfReallocUnknown(
// --------------------------- Interface --------------------- {{{1
using namespace __asan; // NOLINT
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, bool is_write, uptr access_size) {
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
uptr access_size) {
ScopedInErrorReport in_report;
// Determine the error type.
@ -774,7 +881,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
d.EndAccess());
GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
stack.Print();
DescribeAddress(addr, access_size);
ReportErrorSummary(bug_descr, &stack);
@ -786,7 +893,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
if (callback) {
error_message_buffer_size = 1 << 16;
error_message_buffer =
(char*)MmapOrDie(error_message_buffer_size, __FUNCTION__);
(char*)MmapOrDie(error_message_buffer_size, __func__);
error_message_buffer_pos = 0;
}
}
@ -795,6 +902,17 @@ void __asan_describe_address(uptr addr) {
DescribeAddress(addr, 1);
}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_ptr_sub(void *a, void *b) {
CheckForInvalidPointerPair(a, b);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_ptr_cmp(void *a, void *b) {
CheckForInvalidPointerPair(a, b);
}
} // extern "C"
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.

View File

@ -30,7 +30,10 @@ void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
void NORETURN
ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
void NORETURN
ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
@ -43,6 +46,14 @@ void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
void NORETURN ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack);
void NORETURN
ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack);
void NORETURN
ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid,
uptr new_mid, StackTrace *stack);
void NORETURN
ReportODRViolation(const __asan_global *g1, const __asan_global *g2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(

View File

@ -9,6 +9,7 @@
//
// Main file of the ASan run-time library.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
@ -26,6 +27,7 @@
#include "lsan/lsan_common.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
namespace __asan {
@ -49,7 +51,7 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
}
}
if (flags()->coverage)
if (common_flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
@ -60,8 +62,8 @@ static void AsanDie() {
static void AsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
line, cond, (uptr)v1, (uptr)v2);
// FIXME: check for infinite recursion without a thread-local counter here.
PRINT_CURRENT_STACK();
Die();
@ -76,7 +78,7 @@ static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
@ -91,56 +93,154 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->redzone, "redzone");
// Please write meaningful flag descriptions when adding new flags.
ParseFlag(str, &f->quarantine_size, "quarantine_size",
"Size (in bytes) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.");
ParseFlag(str, &f->redzone, "redzone",
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.");
ParseFlag(str, &f->max_redzone, "max_redzone",
"Maximal size (in bytes) of redzones around heap objects.");
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
ParseFlag(str, &f->debug, "debug");
ParseFlag(str, &f->report_globals, "report_globals");
ParseFlag(str, &f->check_initialization_order, "check_initialization_order");
ParseFlag(str, &f->debug, "debug",
"If set, prints some debugging information and does additional checks.");
ParseFlag(str, &f->report_globals, "report_globals",
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).");
ParseFlag(str, &f->replace_str, "replace_str");
ParseFlag(str, &f->replace_intrin, "replace_intrin");
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
ParseFlag(str, &f->check_initialization_order,
"check_initialization_order",
"If set, attempts to catch initialization order issues.");
ParseFlag(str, &f->replace_str, "replace_str",
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.");
ParseFlag(str, &f->replace_intrin, "replace_intrin",
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.");
ParseFlag(str, &f->detect_stack_use_after_return,
"detect_stack_use_after_return");
ParseFlag(str, &f->uar_stack_size_log, "uar_stack_size_log");
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte");
ParseFlag(str, &f->exitcode, "exitcode");
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
ParseFlag(str, &f->handle_segv, "handle_segv");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
ParseFlag(str, &f->abort_on_error, "abort_on_error");
ParseFlag(str, &f->print_stats, "print_stats");
ParseFlag(str, &f->print_legend, "print_legend");
ParseFlag(str, &f->atexit, "atexit");
ParseFlag(str, &f->coverage, "coverage");
ParseFlag(str, &f->disable_core, "disable_core");
ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->poison_partial, "poison_partial");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
ParseFlag(str, &f->strict_init_order, "strict_init_order");
"detect_stack_use_after_return",
"Enables stack-use-after-return checking at run-time.");
ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
"Minimum fake stack size log.");
ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
"Maximum fake stack size log.");
ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
"Use mmap with 'norserve' flag to allocate fake stack.");
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.");
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
"Value used to fill the newly allocated memory.");
ParseFlag(str, &f->exitcode, "exitcode",
"Override the program exit status if the tool found an error.");
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.");
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).");
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.");
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
"If set, explicitly unmaps the (huge) shadow at exit.");
ParseFlag(str, &f->abort_on_error, "abort_on_error",
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.");
ParseFlag(str, &f->print_stats, "print_stats",
"Print various statistics after printing an error message or if "
"atexit=1.");
ParseFlag(str, &f->print_legend, "print_legend",
"Print the legend for the shadow bytes.");
ParseFlag(str, &f->atexit, "atexit",
"If set, prints ASan exit stats even after program terminates "
"successfully.");
ParseFlag(str, &f->disable_core, "disable_core",
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. "
"Ignored on OSes that don't dump core by default.");
ParseFlag(str, &f->allow_reexec, "allow_reexec",
"Allow the tool to re-exec the program. This may interfere badly with "
"the debugger.");
ParseFlag(str, &f->print_full_thread_history,
"print_full_thread_history",
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.");
ParseFlag(str, &f->poison_heap, "poison_heap",
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.");
ParseFlag(str, &f->poison_partial, "poison_partial",
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
"Report errors on malloc/delete, new/free, new/delete[], etc.");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.");
ParseFlag(str, &f->strict_init_order, "strict_init_order",
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.");
ParseFlag(str, &f->start_deactivated, "start_deactivated",
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ");
ParseFlag(str, &f->detect_invalid_pointer_pairs,
"detect_invalid_pointer_pairs",
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.");
ParseFlag(str, &f->detect_container_overflow,
"detect_container_overflow",
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->detect_leaks = CAN_SANITIZE_LEAKS;
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->malloc_context_size = kDefaultMallocContextSize;
cf->intercept_tls_get_addr = true;
cf->coverage = false;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->redzone = 16;
f->max_redzone = 2048;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = false;
@ -148,53 +248,55 @@ void InitializeFlags(Flags *f, const char *env) {
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
f->uar_stack_size_log = 0;
f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
f->uar_noreserve = false;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
f->handle_segv = ASAN_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = false;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
f->abort_on_error = false;
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
f->coverage = false;
f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->strict_memcmp = true;
f->strict_init_order = false;
f->start_deactivated = false;
f->detect_invalid_pointer_pairs = 0;
f->detect_container_overflow = true;
// Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton());
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
if (cf->verbosity) {
Report("Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions());
}
VReport(1, "Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions());
// Override from command line.
ParseFlagsFromString(f, env);
if (common_flags()->help) {
PrintFlagDescriptions();
}
#if !CAN_SANITIZE_LEAKS
if (cf->detect_leaks) {
if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
#endif
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
@ -203,6 +305,17 @@ void InitializeFlags(Flags *f, const char *env) {
}
}
// Parse flags that may change between startup and activation.
// On Android they come from a system property.
// On other platforms this is no-op.
void ParseExtraActivationFlags() {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
ParseFlagsFromString(flags(), buf);
if (buf[0] != '\0')
VReport(1, "Extra activation flags: %s\n", buf);
}
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
@ -224,6 +337,7 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
@ -269,6 +383,53 @@ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \
void __asan_##type##size(uptr addr) { \
uptr sp = MEM_TO_SHADOW(addr); \
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
: *reinterpret_cast<u16 *>(sp); \
if (UNLIKELY(s)) { \
if (UNLIKELY(size >= SHADOW_GRANULARITY || \
((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
(s8)s)) { \
if (__asan_test_only_reported_buggy_pointer) { \
*__asan_test_only_reported_buggy_pointer = addr; \
} else { \
GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size); \
} \
} \
} \
}
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
extern "C"
NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) {
if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, false, size);
}
}
extern "C"
NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) {
if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, true, size);
}
}
// Force the linker to keep the symbols for various ASan interface functions.
// We want to keep those in the executable in order to let the instrumented
// dynamic libraries access the symbol even if it is not used by the executable
@ -372,7 +533,8 @@ static void PrintAddressSpaceLayout() {
(void*)MEM_TO_SHADOW(kMidShadowEnd));
}
Printf("\n");
Printf("red_zone=%zu\n", (uptr)flags()->redzone);
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@ -387,6 +549,171 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
CHECK(!asan_init_is_running && "ASan init calls itself!");
asan_init_is_running = true;
// Initialize flags. This must be done early, because most of the
// initialization steps look at flags().
const char *options = GetEnv("ASAN_OPTIONS");
InitializeFlags(flags(), options);
InitializeHighMemEnd();
// Make sure we are not statically linked.
AsanDoesNotSupportStaticLinkage();
// Install tool-specific callbacks in sanitizer_common.
SetDieCallback(AsanDie);
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
if (!flags()->start_deactivated)
ParseExtraActivationFlags();
__sanitizer_set_report_path(common_flags()->log_path);
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
if (options) {
VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
}
if (flags()->start_deactivated)
AsanStartDeactivated();
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
// Setup internal allocator callback.
SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors();
ReplaceSystemMalloc();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
if (flags()->disable_core) {
DisableCoreDumper();
}
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd);
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
// Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator.
InitializeAllocator();
Symbolizer::Init(common_flags()->external_symbolizer_path);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
asan_init_is_running = false;
if (flags()->atexit)
Atexit(asan_atexit);
if (common_flags()->coverage) {
__sanitizer_cov_init();
Atexit(__sanitizer_cov_dump);
}
// interceptors
InitTlsSize();
// Create main thread.
AsanThread *main_thread = AsanThread::Create(0, 0);
CreateThreadContextArgs create_main_args = { main_thread, 0 };
u32 main_tid = asanThreadRegistry().CreateThread(
0, true, 0, &create_main_args);
CHECK_EQ(0, main_tid);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#endif // CAN_SANITIZE_LEAKS
VReport(1, "AddressSanitizer Init done\n");
}
// Initialize as requested from some part of ASan runtime library (interceptors,
// allocator, etc).
void AsanInitFromRtl() {
AsanInitInternal();
}
#if ASAN_DYNAMIC
// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
// (and thus normal initializer from .preinit_array haven't run).
class AsanInitializer {
public: // NOLINT
AsanInitializer() {
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
if (UNLIKELY(!asan_inited))
__asan_init();
}
};
static AsanInitializer asan_initializer;
#endif // ASAN_DYNAMIC
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@ -435,139 +762,10 @@ void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
death_callback = callback;
}
// Initialize as requested from instrumented application code.
// We use this call as a trigger to wake up ASan from deactivated state.
void __asan_init() {
if (asan_inited) return;
SanitizerToolName = "AddressSanitizer";
CHECK(!asan_init_is_running && "ASan init calls itself!");
asan_init_is_running = true;
InitializeHighMemEnd();
// Make sure we are not statically linked.
AsanDoesNotSupportStaticLinkage();
// Install tool-specific callbacks in sanitizer_common.
SetDieCallback(AsanDie);
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
// Initialize flags. This must be done early, because most of the
// initialization steps look at flags().
const char *options = GetEnv("ASAN_OPTIONS");
InitializeFlags(flags(), options);
__sanitizer_set_report_path(common_flags()->log_path);
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
if (common_flags()->verbosity && options) {
Report("Parsed ASAN_OPTIONS: %s\n", options);
}
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
// Setup internal allocator callback.
SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors();
ReplaceSystemMalloc();
ReplaceOperatorsNewAndDelete();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
if (flags()->disable_core) {
DisableCoreDumper();
}
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd);
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
AsanTSDInit(PlatformTSDDtor);
InstallSignalHandlers();
// Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator.
InitializeAllocator();
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
Symbolizer::Init(common_flags()->external_symbolizer_path);
} else {
Symbolizer::Disable();
}
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
asan_init_is_running = false;
if (flags()->atexit)
Atexit(asan_atexit);
if (flags()->coverage)
Atexit(__sanitizer_cov_dump);
// interceptors
InitTlsSize();
// Create main thread.
AsanThread *main_thread = AsanThread::Create(0, 0);
CreateThreadContextArgs create_main_args = { main_thread, 0 };
u32 main_tid = asanThreadRegistry().CreateThread(
0, true, 0, &create_main_args);
CHECK_EQ(0, main_tid);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
force_interface_symbols(); // no-op.
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#endif // CAN_SANITIZE_LEAKS
if (common_flags()->verbosity) {
Report("AddressSanitizer Init done\n");
}
AsanCheckIncompatibleRT();
AsanActivate();
AsanInitInternal();
}

View File

@ -10,40 +10,10 @@
// Code for ASan stack trace.
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_flags.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
int out_size) {
return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
: false;
}
void PrintStack(const uptr *trace, uptr size) {
StackTrace::PrintStack(trace, size, MaybeCallAsanSymbolize);
}
void PrintStack(StackTrace *stack) {
PrintStack(stack->trace, stack->size);
}
} // namespace __asan
// ------------------ Interface -------------- {{{1
// Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this.
#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false;
}
#endif
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {

View File

@ -19,49 +19,62 @@
namespace __asan {
void PrintStack(StackTrace *stack);
void PrintStack(const uptr *trace, uptr size);
} // namespace __asan
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc,
uptr bp, void *context, bool fast) {
#if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
stack.Unwind(max_s, pc, bp, 0, 0, fast)
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited) { \
if ((t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
stack.Unwind(max_s, pc, bp, stack_top, stack_bottom, fast); \
} else if (t == 0 && !fast) { \
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */ \
stack.Unwind(max_s, pc, bp, 0, 0, false); \
} \
} \
AsanThread *t;
stack->size = 0;
if (LIKELY(asan_inited)) {
if ((t = GetCurrentThread()) && !t->isUnwinding()) {
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
} else if (t == 0 && !fast) {
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
}
}
#endif // SANITIZER_WINDOWS
}
} // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
#define GET_STACK_TRACE(max_size, fast) \
StackTrace stack; \
if (max_size <= 2) { \
stack.size = max_size; \
if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \
stack.trace[1] = GET_CALLER_PC(); \
} \
} else { \
GetStackTraceWithPcBpAndContext(&stack, max_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), 0, fast); \
}
#define GET_STACK_TRACE_FATAL(pc, bp) \
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL(pc, bp) \
StackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
StackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@ -78,7 +91,7 @@ void PrintStack(const uptr *trace, uptr size);
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE_FATAL_HERE; \
PrintStack(&stack); \
stack.Print(); \
}
#endif // ASAN_STACK_H

View File

@ -18,6 +18,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan/lsan_common.h"
namespace __asan {
@ -76,7 +77,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
@ -85,28 +86,27 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine,
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (common_flags()->verbosity >= 1)
Report("T%d TSDDtor\n", context->tid);
VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
if (common_flags()->verbosity >= 1) {
Report("T%d exited\n", tid());
}
int tid = this->tid();
VReport(1, "T%d exited\n", tid);
malloc_storage().CommitBack();
if (flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid());
if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
asanThreadRegistry().FinishThread(tid);
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStackAndTLS();
DeleteFakeStack();
DeleteFakeStack(tid);
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
DTLS_Destroy();
}
// We want to create the FakeStack lazyly on the first use, but not eralier
@ -121,13 +121,16 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
// if that was successfull, it initilizes the pointer.
// if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
if (flags()->uar_stack_size_log)
stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
stack_size_log =
Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
stack_size_log =
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
@ -140,12 +143,10 @@ void AsanThread::Init() {
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
if (common_flags()->verbosity >= 1) {
int local = 0;
Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
tid(), (void*)stack_bottom_, (void*)stack_top_,
stack_top_ - stack_bottom_, &local);
}
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local);
fake_stack_ = 0; // Will be initialized lazily if needed.
AsanPlatformThreadInit();
}
@ -153,7 +154,7 @@ void AsanThread::Init() {
thread_return_t AsanThread::ThreadStart(uptr os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
if (flags()->use_sigaltstack) SetAlternateSignalStack();
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
@ -265,10 +266,8 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
if (common_flags()->verbosity >= 2) {
Report("SetCurrentThread: %p for thread %p\n",
t->context(), (void*)GetThreadSelf());
}
VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
(void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());

View File

@ -15,7 +15,6 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
@ -76,12 +75,12 @@ class AsanThread {
return addr >= stack_bottom_ && addr < stack_top_;
}
void DeleteFakeStack() {
void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
t->Destroy();
t->Destroy(tid);
}
bool has_fake_stack() {

View File

@ -33,11 +33,6 @@ extern "C" {
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;
@ -73,17 +68,9 @@ void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
void SetAlternateSignalStack() {
// FIXME: Decide what to do on Windows.
}
void AsanCheckDynamicRTPrereqs() { UNIMPLEMENTED(); }
void UnsetAlternateSignalStack() {
// FIXME: Decide what to do on Windows.
}
void InstallSignalHandlers() {
// FIXME: Decide what to do on Windows.
}
void AsanCheckIncompatibleRT() {}
void AsanPlatformThreadInit() {
// Nothing here for now.
@ -93,54 +80,10 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
UNIMPLEMENTED();
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *addr, char *out_buffer, int buffer_size) {
BlockingMutexLock lock(&dbghelp_lock);
if (!dbghelp_initialized) {
SymSetOptions(SYMOPT_DEFERRED_LOADS |
SYMOPT_UNDNAME |
SYMOPT_LOAD_LINES);
CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
// FIXME: We don't call SymCleanup() on exit yet - should we?
dbghelp_initialized = true;
}
// See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
DWORD64 offset = 0;
BOOL got_objname = SymFromAddr(GetCurrentProcess(),
(DWORD64)addr, &offset, symbol);
if (!got_objname)
return false;
DWORD unused;
IMAGEHLP_LINE64 info;
info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(),
(DWORD64)addr, &unused, &info);
int written = 0;
out_buffer[0] = '\0';
// FIXME: it might be useful to print out 'obj' or 'obj+offset' info too.
if (got_fileline) {
written += internal_snprintf(out_buffer + written, buffer_size - written,
" %s %s:%d", symbol->Name,
info.FileName, info.LineNumber);
} else {
written += internal_snprintf(out_buffer + written, buffer_size - written,
" %s+0x%p", symbol->Name, offset);
}
return true;
}
} // extern "C"
#endif // _WIN32

View File

@ -48,9 +48,10 @@ extern "C" {
((void)(addr), (void)(size))
#endif
// Returns true iff addr is poisoned (i.e. 1-byte read/write access to this
// Returns 1 if addr is poisoned (i.e. 1-byte read/write access to this
// address will result in error report from AddressSanitizer).
bool __asan_address_is_poisoned(void const volatile *addr);
// Otherwise returns 0.
int __asan_address_is_poisoned(void const volatile *addr);
// If at least on byte in [beg, beg+size) is poisoned, return the address
// of the first such byte. Otherwise return 0.
@ -63,7 +64,7 @@ extern "C" {
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
void __asan_report_error(void *pc, void *bp, void *sp,
void *addr, bool is_write, size_t access_size);
void *addr, int is_write, size_t access_size);
// Sets the exit code to use when reporting an error.
// Returns the old value.
@ -80,22 +81,14 @@ extern "C" {
// the program crashes before ASan report is printed.
void __asan_on_error();
// User may provide its own implementation for symbolization function.
// It should print the description of instruction at address "pc" to
// "out_buffer". Description should be at most "out_size" bytes long.
// User-specified function should return true if symbolization was
// successful.
bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size);
// Returns the estimated number of bytes that will be reserved by allocator
// for request of "size" bytes. If ASan allocator can't allocate that much
// memory, returns the maximal possible allocation size, otherwise returns
// "size".
size_t __asan_get_estimated_allocated_size(size_t size);
// Returns true if p was returned by the ASan allocator and
// is not yet freed.
bool __asan_get_ownership(const void *p);
// Returns 1 if p was returned by the ASan allocator and is not yet freed.
// Otherwise returns 0.
int __asan_get_ownership(const void *p);
// Returns the number of bytes reserved for the pointer p.
// Requires (get_ownership(p) == true) or (p == 0).
size_t __asan_get_allocated_size(const void *p);
@ -128,6 +121,24 @@ extern "C" {
// deallocation of "ptr".
void __asan_malloc_hook(void *ptr, size_t size);
void __asan_free_hook(void *ptr);
// The following 2 functions facilitate garbage collection in presence of
// asan's fake stack.
// Returns an opaque handler to be used later in __asan_addr_is_in_fake_stack.
// Returns NULL if the current thread does not have a fake stack.
void *__asan_get_current_fake_stack();
// If fake_stack is non-NULL and addr belongs to a fake frame in
// fake_stack, returns the address on real stack that corresponds to
// the fake frame and sets beg/end to the boundaries of this fake frame.
// Otherwise returns NULL and does not touch beg/end.
// If beg/end are NULL, they are not touched.
// This function may be called from a thread other than the owner of
// fake_stack, but the owner thread need to be alive.
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -22,13 +22,28 @@
#ifdef __cplusplus
extern "C" {
#endif
// Arguments for __sanitizer_sandbox_on_notify() below.
typedef struct {
// Enable sandbox support in sanitizer coverage.
int coverage_sandboxed;
// File descriptor to write coverage data to. If -1 is passed, a file will
// be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
// effect if coverage_sandboxed == 0.
intptr_t coverage_fd;
// If non-zero, split the coverage data into well-formed blocks. This is
// useful when coverage_fd is a socket descriptor. Each block will contain
// a header, allowing data from multiple processes to be sent over the same
// socket.
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
void __sanitizer_sandbox_on_notify(void *reserved);
void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@ -45,6 +60,8 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
@ -54,7 +71,7 @@ extern "C" {
// in a contiguous region of memory. The container owns the region of memory
// [beg, end); the memory [beg, mid) is used to store the current elements
// and the memory [mid, end) is reserved for future elements;
// end <= mid <= end. For example, in "std::vector<> v"
// beg <= mid <= end. For example, in "std::vector<> v"
// beg = &v[0];
// end = beg + v.capacity() * sizeof(v[0]);
// mid = beg + v.size() * sizeof(v[0]);
@ -82,6 +99,14 @@ extern "C" {
const void *end,
const void *old_mid,
const void *new_mid);
// Returns true if the contiguous container [beg, end) ir properly poisoned
// (e.g. with __sanitizer_annotate_contiguous_container), i.e. if
// - [beg, mid) is addressable,
// - [mid, end) is unaddressable.
// Full verification requires O(end-beg) time; this function tries to avoid
// such complexity by touching only parts of the container around beg/mid/end.
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();

View File

@ -37,6 +37,9 @@ struct dfsan_label_info {
void *userdata;
};
/// Signature of the callback argument to dfsan_set_write_callback().
typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
@ -72,6 +75,14 @@ int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
/// Returns the number of labels allocated.
size_t dfsan_get_label_count(void);
/// Sets a callback to be invoked on calls to write(). The callback is invoked
/// before the write is done. The write is not guaranteed to succeed when the
/// callback executes. Pass in NULL to remove any callback.
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
#ifdef __cplusplus
} // extern "C"

View File

@ -21,13 +21,24 @@ extern "C" {
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// Memory regions registered through this interface will be treated as sources
// of live pointers during leak checking. Useful if you store pointers in
// mapped memory.
// Points of note:
// - __lsan_unregister_root_region() must be called with the same pointer and
// size that have earlier been passed to __lsan_register_root_region()
// - LSan will skip any inaccessible memory when scanning a root region. E.g.,
// if you map memory within a larger region that you have mprotect'ed, you can
// register the entire large region.
// - the implementation is not optimized for performance. This interface is
// intended to be used for a small number of relatively static regions.
void __lsan_register_root_region(const void *p, size_t size);
void __lsan_unregister_root_region(const void *p, size_t size);
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
@ -35,6 +46,16 @@ extern "C" {
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// This function may be optionally provided by the user and should return
// a string containing LSan suppressions.
const char *__lsan_default_suppressions();
#ifdef __cplusplus
} // extern "C"

View File

@ -17,13 +17,10 @@
#ifdef __cplusplus
extern "C" {
#endif
#if __has_feature(memory_sanitizer)
/* Returns a string describing a stack origin.
Return NULL if the origin is invalid, or is not a stack origin. */
const char *__msan_get_origin_descr_if_stack(uint32_t id);
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
@ -39,6 +36,10 @@ extern "C" {
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make a null-terminated string fully initialized (without changing its
contents). */
void __msan_unpoison_string(const volatile char *a);
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
@ -51,6 +52,10 @@ extern "C" {
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
/* Checks that memory range is fully initialized, and reports an error if it
* is not. */
void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
@ -67,13 +72,13 @@ extern "C" {
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
/* Print shadow and origin for the memory range to stdout in a human-readable
/* Print shadow and origin for the memory range to stderr in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
/* Print current function arguments shadow and origin to stdout in a
/* Print shadow for the memory range to stderr in a minimalistic
human-readable format. */
void __msan_print_param_shadow();
void __msan_dump_shadow(const volatile void *x, size_t size);
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
@ -86,6 +91,9 @@ extern "C" {
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __msan_set_death_callback(void (*callback)(void));
/***********************************/
/* Allocator statistics interface. */
@ -132,27 +140,6 @@ extern "C" {
deallocation of "ptr". */
void __msan_malloc_hook(const volatile void *ptr, size_t size);
void __msan_free_hook(const volatile void *ptr);
#else // __has_feature(memory_sanitizer)
#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
#define __msan_set_origin(a, size, origin)
#define __msan_get_origin(a) ((uint32_t)-1)
#define __msan_get_track_origins() (0)
#define __msan_get_umr_origin() ((uint32_t)-1)
#define __msan_unpoison(a, size)
#define __msan_poison(a, size)
#define __msan_partial_poison(data, shadow, size)
#define __msan_test_shadow(x, size) ((intptr_t)-1)
#define __msan_set_exit_code(exit_code)
#define __msan_set_expect_umr(expect_umr)
#define __msan_print_shadow(x, size)
#define __msan_print_param_shadow()
#define __msan_has_dynamic_component() (0)
#define __msan_allocated_memory(data, size)
#endif // __has_feature(memory_sanitizer)
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -7,14 +7,11 @@
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Public interface header for TSan atomics.
//===----------------------------------------------------------------------===//
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
#ifndef INTERFACE_ATTRIBUTE
# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
#endif
#ifdef __cplusplus
extern "C" {
#endif
@ -23,14 +20,12 @@ typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
#if defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
__extension__ typedef __int128 __tsan_atomic128;
#define __TSAN_HAS_INT128 1
# define __TSAN_HAS_INT128 1
#else
typedef char __tsan_atomic128;
#define __TSAN_HAS_INT128 0
# define __TSAN_HAS_INT128 0
#endif
// Part of ABI, do not change.
@ -45,159 +40,181 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
#endif
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
#if __TSAN_HAS_INT128
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
__tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
__tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 v, __tsan_memory_order mo);
#endif
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
#endif
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
__tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order fail_mo);
#endif
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
__tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
#undef INTERFACE_ATTRIBUTE
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
#endif // TSAN_INTERFACE_ATOMIC_H

View File

@ -13,7 +13,8 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
#if !defined(__linux__) && !defined(__FreeBSD__) && \
!defined(__APPLE__) && !defined(_WIN32)
# error "Interception doesn't work on this operating system."
#endif
@ -233,11 +234,11 @@ typedef unsigned long uptr; // NOLINT
#define INCLUDED_FROM_INTERCEPTION_LIB
#if defined(__linux__)
#if defined(__linux__) || defined(__FreeBSD__)
# include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX(func, symver)
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)

View File

@ -10,10 +10,10 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#if defined(__linux__) || defined(__FreeBSD__)
#include "interception.h"
#include <dlfcn.h> // for dlsym
#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
@ -31,4 +31,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) {
} // namespace __interception
#endif // __linux__
#endif // __linux__ || __FreeBSD__

View File

@ -10,7 +10,7 @@
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#if defined(__linux__) || defined(__FreeBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
@ -26,20 +26,20 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX(func) \
::__interception::GetRealFunctionAddress( \
#func, (::__interception::uptr*)&REAL(func), \
(::__interception::uptr)&(func), \
(::__interception::uptr)&WRAP(func))
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::GetRealFunctionAddress( \
#func, (::__interception::uptr *)&__interception::PTR_TO_REAL(func), \
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, symver)
#else
# define INTERCEPT_FUNCTION_VER_LINUX(func, symver) \
INTERCEPT_FUNCTION_LINUX(func)
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
#endif // __linux__
#endif // __linux__ || __FreeBSD__

View File

@ -17,13 +17,13 @@
#include <stddef.h>
#include <stdint.h>
COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t));
COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#ifndef __APPLE__
COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
@ -31,7 +31,7 @@ COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t));
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t));
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif

View File

@ -54,91 +54,136 @@ static void WriteJumpInstruction(char *jmp_from, char *to) {
*(ptrdiff_t*)(jmp_from + 1) = offset;
}
bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
#ifdef _WIN64
# error OverrideFunction was not tested on x64
#endif
// Basic idea:
// We write 5 bytes (jmp-to-new_func) at the beginning of the 'old_func'
// to override it. We want to be able to execute the original 'old_func' from
// the wrapper, so we need to keep the leading 5+ bytes ('head') of the
// original instructions somewhere with a "jmp old_func+head".
// We call these 'head'+5 bytes of instructions a "trampoline".
static char *GetMemoryForTrampoline(size_t size) {
// Trampolines are allocated from a common pool.
const int POOL_SIZE = 1024;
static char *pool = NULL;
static size_t pool_used = 0;
if (pool == NULL) {
pool = (char*)VirtualAlloc(NULL, POOL_SIZE,
MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
// FIXME: set PAGE_EXECUTE_READ access after setting all interceptors?
if (pool == NULL)
return false;
if (!pool) {
pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
// FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
// interceptors are in place.
if (!pool)
return NULL;
_memset(pool, 0xCC /* int 3 */, POOL_SIZE);
}
char* old_bytes = (char*)old_func;
char* trampoline = pool + pool_used;
if (pool_used + size > POOL_SIZE)
return NULL;
// Find out the number of bytes of the instructions we need to copy to the
// island and store it in 'head'.
size_t head = 0;
while (head < 5) {
switch (old_bytes[head]) {
char *ret = pool + pool_used;
pool_used += size;
return ret;
}
// Returns 0 on error.
static size_t RoundUpToInstrBoundary(size_t size, char *code) {
size_t cursor = 0;
while (cursor < size) {
switch (code[cursor]) {
case '\x51': // push ecx
case '\x52': // push edx
case '\x53': // push ebx
case '\x54': // push esp
case '\x55': // push ebp
case '\x56': // push esi
case '\x57': // push edi
head++;
case '\x5D': // pop ebp
cursor++;
continue;
case '\x6A': // 6A XX = push XX
cursor += 2;
continue;
case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
cursor += 5;
continue;
}
switch (*(unsigned short*)(old_bytes + head)) { // NOLINT
switch (*(unsigned short*)(code + cursor)) { // NOLINT
case 0xFF8B: // 8B FF = mov edi, edi
case 0xEC8B: // 8B EC = mov ebp, esp
case 0xC033: // 33 C0 = xor eax, eax
head += 2;
cursor += 2;
continue;
case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
head += 3;
cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
head += 6;
cursor += 6;
continue;
case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
cursor += 7;
continue;
}
switch (0x00FFFFFF & *(unsigned int*)(old_bytes + head)) {
switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
head += 4;
cursor += 4;
continue;
}
// Unknown instruction!
return false;
// FIXME: Unknown instruction failures might happen when we add a new
// interceptor or a new compiler version. In either case, they should result
// in visible and readable error messages. However, merely calling abort()
// or __debugbreak() leads to an infinite recursion in CheckFailed.
// Do we have a good way to abort with an error message here?
return 0;
}
if (pool_used + head + 5 > POOL_SIZE)
return false;
return cursor;
}
// Now put the "jump to trampoline" instruction into the original code.
bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
#ifdef _WIN64
#error OverrideFunction is not yet supported on x64
#endif
// Function overriding works basically like this:
// We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
// to override it.
// We might want to be able to execute the original 'old_func' from the
// wrapper, in this case we need to keep the leading 5+ bytes ('head')
// of the original code somewhere with a "jmp <old_func+head>".
// We call these 'head'+5 bytes of instructions a "trampoline".
char *old_bytes = (char *)old_func;
// We'll need at least 5 bytes for a 'jmp'.
size_t head = 5;
if (orig_old_func) {
// Find out the number of bytes of the instructions we need to copy
// to the trampoline and store it in 'head'.
head = RoundUpToInstrBoundary(head, old_bytes);
if (!head)
return false;
// Put the needed instructions into the trampoline bytes.
char *trampoline = GetMemoryForTrampoline(head + 5);
if (!trampoline)
return false;
_memcpy(trampoline, old_bytes, head);
WriteJumpInstruction(trampoline + head, old_bytes + head);
*orig_old_func = (uptr)trampoline;
}
// Now put the "jmp <new_func>" instruction at the original code location.
// We should preserve the EXECUTE flag as some of our own code might be
// located in the same page (sic!). FIXME: might consider putting the
// __interception code into a separate section or something?
DWORD old_prot, unused_prot;
if (!VirtualProtect((void*)old_func, head, PAGE_EXECUTE_READWRITE,
if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
&old_prot))
return false;
// Put the needed instructions into the trampoline bytes.
_memcpy(trampoline, old_bytes, head);
WriteJumpInstruction(trampoline + head, old_bytes + head);
*orig_old_func = (uptr)trampoline;
pool_used += head + 5;
// Intercept the 'old_func'.
WriteJumpInstruction(old_bytes, (char*)new_func);
WriteJumpInstruction(old_bytes, (char *)new_func);
_memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
if (!VirtualProtect((void*)old_func, head, old_prot, &unused_prot))
// Restore the original permissions.
if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
return false; // not clear if this failure bothers us.
return true;

View File

@ -33,6 +33,11 @@ static void InitializeCommonFlags() {
ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
}
///// Interface to the common LSan module. /////
bool WordIsPoisoned(uptr addr) {
return false;
}
} // namespace __lsan
using namespace __lsan; // NOLINT
@ -53,12 +58,7 @@ extern "C" void __lsan_init() {
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
Symbolizer::Init(common_flags()->external_symbolizer_path);
} else {
Symbolizer::Disable();
}
Symbolizer::Init(common_flags()->external_symbolizer_path);
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)

View File

@ -141,7 +141,11 @@ uptr PointsIntoChunk(void* p) {
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
if (m->allocated && addr < chunk + m->requested_size)
if (!m->allocated)
return 0;
if (addr < chunk + m->requested_size)
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
return chunk;
return 0;
}

View File

@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@ -24,7 +25,8 @@
#if CAN_SANITIZE_LEAKS
namespace __lsan {
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@ -39,42 +41,56 @@ static void InitializeFlags() {
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
f->print_suppressions = true;
f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
f->use_root_regions = true;
f->use_unaligned = false;
f->verbosity = 0;
f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
ParseFlag(options, &f->use_registers, "use_registers");
ParseFlag(options, &f->use_globals, "use_globals");
ParseFlag(options, &f->use_stacks, "use_stacks");
ParseFlag(options, &f->use_tls, "use_tls");
ParseFlag(options, &f->use_unaligned, "use_unaligned");
ParseFlag(options, &f->report_objects, "report_objects");
ParseFlag(options, &f->resolution, "resolution");
ParseFlag(options, &f->use_registers, "use_registers", "");
ParseFlag(options, &f->use_globals, "use_globals", "");
ParseFlag(options, &f->use_stacks, "use_stacks", "");
ParseFlag(options, &f->use_tls, "use_tls", "");
ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
ParseFlag(options, &f->report_objects, "report_objects", "");
ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
ParseFlag(options, &f->max_leaks, "max_leaks");
ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
ParseFlag(options, &f->verbosity, "verbosity");
ParseFlag(options, &f->log_pointers, "log_pointers");
ParseFlag(options, &f->log_threads, "log_threads");
ParseFlag(options, &f->exitcode, "exitcode");
ParseFlag(options, &f->suppressions, "suppressions");
ParseFlag(options, &f->log_pointers, "log_pointers", "");
ParseFlag(options, &f->log_threads, "log_threads", "");
ParseFlag(options, &f->exitcode, "exitcode", "");
ParseFlag(options, &f->print_suppressions, "print_suppressions", "");
ParseFlag(options, &f->suppressions, "suppressions", "");
}
}
#define LOG_POINTERS(...) \
do { \
if (flags()->log_pointers) Report(__VA_ARGS__); \
} while (0);
#define LOG_THREADS(...) \
do { \
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0);
SuppressionContext *suppression_ctx;
void InitializeSuppressions() {
CHECK(!suppression_ctx);
ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
suppression_ctx = new(placeholder_) SuppressionContext;
ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
suppression_ctx = new(placeholder) SuppressionContext;
char *suppressions_from_file;
uptr buffer_size;
if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
@ -89,8 +105,22 @@ void InitializeSuppressions() {
suppression_ctx->Parse(__lsan_default_suppressions());
}
struct RootRegion {
const void *begin;
uptr size;
};
InternalMmapVector<RootRegion> *root_regions;
void InitializeRootRegions() {
CHECK(!root_regions);
ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
void InitCommonLsan() {
InitializeFlags();
InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@ -130,8 +160,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
const uptr alignment = flags()->pointer_alignment();
if (flags()->log_pointers)
Report("Scanning %s range %p-%p.\n", region_type, begin, end);
LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
@ -146,10 +175,19 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
// Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
LOG_POINTERS(
"%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n",
pp, p, chunk, chunk + m.requested_size(), m.requested_size());
continue;
}
m.set_tag(tag);
if (flags()->log_pointers)
Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
chunk, chunk + m.requested_size(), m.requested_size());
LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@ -168,7 +206,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
@ -176,8 +214,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
if (flags()->log_threads)
Report("Thread %d not found in registry.\n", os_id);
LOG_THREADS("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
@ -194,14 +231,12 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
if (flags()->log_threads)
Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
if (flags()->log_threads)
Report("WARNING: stack pointer not in stack range.\n");
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@ -212,7 +247,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
@ -230,6 +265,37 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
}
static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
uptr root_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr begin, end, prot;
while (proc_maps.Next(&begin, &end,
/*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
&prot)) {
uptr intersection_begin = Max(root_begin, begin);
uptr intersection_end = Min(end, root_end);
if (intersection_begin >= intersection_end) continue;
bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
root_begin, root_end, begin, end,
is_readable ? "readable" : "unreadable");
if (is_readable)
ScanRangeForPointers(intersection_begin, intersection_end, frontier,
"ROOT", kReachable);
}
}
// Scans root regions for heap pointers.
static void ProcessRootRegions(Frontier *frontier) {
if (!flags()->use_root_regions) return;
CHECK(root_regions);
for (uptr i = 0; i < root_regions->size(); i++) {
RootRegion region = (*root_regions)[i];
uptr begin_addr = reinterpret_cast<uptr>(region.begin);
ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
}
}
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
@ -264,30 +330,27 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
Frontier frontier(GetPageSizeCached());
Frontier frontier(1);
if (flags()->use_globals)
ProcessGlobalRegions(&frontier);
ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
if (flags()->log_pointers)
Report("Processing platform-specific allocations.\n");
LOG_POINTERS("Processing platform-specific allocations.\n");
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
if (flags()->log_pointers)
Report("Scanning ignored chunks.\n");
LOG_POINTERS("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
if (flags()->log_pointers)
Report("Scanning leaked chunks.\n");
LOG_POINTERS("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
@ -298,7 +361,8 @@ static void PrintStackTraceById(u32 stack_trace_id) {
StackTrace::PrintStack(trace, size);
}
// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
// ForEachChunk callback. Aggregates information about unreachable chunks into
// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
@ -307,26 +371,17 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
u32 stack_trace_id = 0;
if (resolution > 0) {
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
stack_trace_id = StackDepotPut(trace, size);
} else {
leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
stack_trace_id = m.stack_trace_id();
}
}
}
// ForEachChunkCallback. Prints addresses of unreachable chunks.
static void PrintLeakedCb(uptr chunk, void *arg) {
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Printf("%s leaked %zu byte object at %p.\n",
m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
m.requested_size(), chunk);
leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
m.tag());
}
}
@ -345,12 +400,6 @@ static void PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
static void PrintLeaked() {
Printf("\n");
Printf("Reporting individual objects:\n");
ForEachChunk(PrintLeakedCb, 0 /* arg */);
}
struct DoLeakCheckParam {
bool success;
LeakReport leak_report;
@ -361,11 +410,8 @@ static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
CHECK(param);
CHECK(!param->success);
CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
if (!param->leak_report.IsEmpty() && flags()->report_objects)
PrintLeaked();
param->success = true;
}
@ -376,7 +422,7 @@ void DoLeakCheck() {
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off())
return;
return;
DoLeakCheckParam param;
param.success = false;
@ -390,8 +436,9 @@ void DoLeakCheck() {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
uptr have_unsuppressed = param.leak_report.ApplySuppressions();
if (have_unsuppressed) {
param.leak_report.ApplySuppressions();
uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
if (unsuppressed_count > 0) {
Decorator d;
Printf("\n"
"================================================================="
@ -399,27 +446,37 @@ void DoLeakCheck() {
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
param.leak_report.PrintLargest(flags()->max_leaks);
param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
if (have_unsuppressed || (flags()->verbosity >= 1)) {
if (flags()->print_suppressions)
PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
if (flags()->exitcode)
internal__exit(flags()->exitcode);
}
if (have_unsuppressed && flags()->exitcode)
internal__exit(flags()->exitcode);
}
static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s;
// Suppress by module name.
const char *module_name;
uptr module_offset;
if (Symbolizer::Get()->GetModuleNameAndOffsetForPC(addr, &module_name,
&module_offset) &&
suppression_ctx->Match(module_name, SuppressionLeak, &s))
return s;
// Suppress by file or function name.
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
uptr addr_frames_num = Symbolizer::Get()->SymbolizeCode(
uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s))
return s;
}
return 0;
@ -439,26 +496,35 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
// in LeakReport::Add(). We don't expect to ever see this many leaks in
// real-world applications.
// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
// in real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
for (uptr i = 0; i < leaks_.size(); i++)
uptr i;
for (i = 0; i < leaks_.size(); i++) {
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
return;
break;
}
if (leaks_.size() == kMaxLeaksConsidered) return;
Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
is_directly_leaked, /* is_suppressed */ false };
leaks_.push_back(leak);
}
if (i == leaks_.size()) {
if (leaks_.size() == kMaxLeaksConsidered) return;
Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
is_directly_leaked, /* is_suppressed */ false };
leaks_.push_back(leak);
}
if (flags()->report_objects) {
LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
leaked_objects_.push_back(obj);
}
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
@ -468,7 +534,7 @@ static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
return leak1.is_directly_leaked;
}
void LeakReport::PrintLargest(uptr num_leaks_to_print) {
void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
@ -476,31 +542,49 @@ void LeakReport::PrintLargest(uptr num_leaks_to_print) {
"reported.\n",
kMaxLeaksConsidered);
uptr unsuppressed_count = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed) unsuppressed_count++;
if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
Printf("The %zu largest leak(s):\n", num_leaks_to_print);
uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
Printf("The %zu top leak(s):\n", num_leaks_to_report);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
uptr leaks_printed = 0;
Decorator d;
uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
Printf("%s", d.Leak());
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
leaks_[i].total_size, leaks_[i].hit_count);
Printf("%s", d.End());
PrintStackTraceById(leaks_[i].stack_trace_id);
leaks_printed++;
if (leaks_printed == num_leaks_to_print) break;
PrintReportForLeak(i);
leaks_reported++;
if (leaks_reported == num_leaks_to_report) break;
}
if (leaks_printed < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_printed;
if (leaks_reported < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_reported;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
void LeakReport::PrintReportForLeak(uptr index) {
Decorator d;
Printf("%s", d.Leak());
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
leaks_[index].total_size, leaks_[index].hit_count);
Printf("%s", d.End());
PrintStackTraceById(leaks_[index].stack_trace_id);
if (flags()->report_objects) {
Printf("Objects leaked above:\n");
PrintLeakedObjectsForLeak(index);
Printf("\n");
}
}
void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
u32 leak_id = leaks_[index].id;
for (uptr j = 0; j < leaked_objects_.size(); j++) {
if (leaked_objects_[j].leak_id == leak_id)
Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
leaked_objects_[j].size);
}
}
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
@ -516,20 +600,24 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
uptr LeakReport::ApplySuppressions() {
uptr unsuppressed_count = 0;
void LeakReport::ApplySuppressions() {
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count;
leaks_[i].is_suppressed = true;
} else {
unsuppressed_count++;
}
}
return unsuppressed_count;
}
uptr LeakReport::UnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed) result++;
return result;
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
@ -545,13 +633,51 @@ void __lsan_ignore_object(const void *p) {
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
Report("__lsan_ignore_object(): no heap object found at %p", p);
if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
Report("__lsan_ignore_object(): "
if (res == kIgnoreObjectInvalid)
VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
if (res == kIgnoreObjectAlreadyIgnored)
VReport(1, "__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
if (res == kIgnoreObjectSuccess)
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_register_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
BlockingMutexLock l(&global_mutex);
CHECK(root_regions);
RootRegion region = {begin, size};
root_regions->push_back(region);
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_unregister_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
BlockingMutexLock l(&global_mutex);
CHECK(root_regions);
bool removed = false;
for (uptr i = 0; i < root_regions->size(); i++) {
RootRegion region = (*root_regions)[i];
if (region.begin == begin && region.size == size) {
removed = true;
uptr last_index = root_regions->size() - 1;
(*root_regions)[i] = (*root_regions)[last_index];
root_regions->pop_back();
VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
break;
}
}
if (!removed) {
Report(
"__lsan_unregister_root_region(): region at %p of size %llu has not "
"been registered.\n",
begin, size);
Die();
}
#endif // CAN_SANITIZE_LEAKS
}

View File

@ -19,7 +19,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__)
#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@ -49,6 +49,8 @@ struct Flags {
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Print matched suppressions after leak checking.
bool print_suppressions;
// Suppressions file name.
const char* suppressions;
@ -61,12 +63,13 @@ struct Flags {
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Regions added via __lsan_register_root_region().
bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
// User-visible verbosity.
int verbosity;
// Consider pointers found in poisoned memory to be valid.
bool use_poisoned;
// Debug logging.
bool log_pointers;
@ -77,6 +80,7 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
u32 id;
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
@ -84,17 +88,31 @@ struct Leak {
bool is_suppressed;
};
struct LeakedObject {
u32 leak_id;
uptr addr;
uptr size;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() : leaks_(1) {}
void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
void PrintLargest(uptr max_leaks);
LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {}
void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
bool IsEmpty() { return leaks_.size() == 0; }
uptr ApplySuppressions();
void ApplySuppressions();
uptr UnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
void PrintLeakedObjectsForLeak(uptr index);
u32 next_id_;
InternalMmapVector<Leak> leaks_;
InternalMmapVector<LeakedObject> leaked_objects_;
};
typedef InternalMmapVector<uptr> Frontier;
@ -119,6 +137,15 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate one word for the array size (0) and store a pointer
// to the end of allocated chunk.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
*reinterpret_cast<uptr *>(chunk_beg) == 0;
}
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
@ -127,6 +154,8 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();

View File

@ -17,6 +17,7 @@
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@ -41,11 +42,11 @@ void InitializePlatformSpecificModules() {
return;
}
if (num_matches == 0)
Report("LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
VReport(1, "LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
else if (num_matches > 1)
Report("LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
@ -81,6 +82,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
@ -127,6 +129,21 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
// They are allocated with a __libc_memalign() call in allocate_and_init()
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
// blocks, but we can make sure they come from our own allocator by intercepting
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
// addresses are stored in a dynamically allocated array (the DTV) which is
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
// being reachable from the static TLS, and the dynamic TLS being reachable from
// the DTV. This is because the initial DTV is allocated before our interception
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
// can't special-case it either, since we don't know its size.
// Our solution is to include in the root set all allocations made from
// ld-linux.so (which is where allocate_and_init() is implemented). This is
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;

View File

@ -10,11 +10,11 @@
//
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
@ -32,19 +32,19 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
stack_top, stack_bottom, fast); \
}
#define ENSURE_LSAN_INITED do { \
@ -150,7 +150,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(void, cfree, void *p) ALIAS("free");
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
@ -171,9 +171,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
@ -183,7 +183,8 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
ALIAS(WRAPPER_NAME(memalign));
///// Thread initialization and finalization. /////
@ -236,7 +237,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSizeLinux(attr);
AdjustStackSize(attr);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;

View File

@ -22,6 +22,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
@ -30,20 +32,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)

View File

@ -65,19 +65,23 @@ LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_coverage.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_libignore.lo \
sanitizer_linux.lo sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_deadlock_detector1.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix_libcdep.lo \
sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
sanitizer_stacktrace.lo sanitizer_stacktrace_libcdep.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stacktrace_libcdep.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_symbolizer.lo \
sanitizer_suppressions.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo sanitizer_thread_registry.lo \
sanitizer_win.lo
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_tls_get_addr.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
@ -253,6 +257,8 @@ sanitizer_common_files = \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_coverage.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_libignore.cc \
@ -261,20 +267,23 @@ sanitizer_common_files = \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@ -374,6 +383,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@ -385,6 +396,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@ -396,6 +409,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:

View File

@ -0,0 +1,340 @@
//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Concurrent uptr->T hashmap.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ADDRHASHMAP_H
#define SANITIZER_ADDRHASHMAP_H
#include "sanitizer_common.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
#include "sanitizer_allocator_internal.h"
namespace __sanitizer {
// Concurrent uptr->T hashmap.
// T must be a POD type, kSize is preferably a prime but can be any number.
// Usage example:
//
// typedef AddrHashMap<uptr, 11> Map;
// Map m;
// {
// Map::Handle h(&m, addr);
// use h.operator->() to access the data
// if h.created() then the element was just created, and the current thread
// has exclusive access to it
// otherwise the current thread has only read access to the data
// }
// {
// Map::Handle h(&m, addr, true);
// this will remove the data from the map in Handle dtor
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
struct Cell {
atomic_uintptr_t addr;
T val;
};
struct AddBucket {
uptr cap;
uptr size;
Cell cells[1]; // variable len
};
static const uptr kBucketSize = 3;
struct Bucket {
RWMutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
public:
AddrHashMap();
class Handle {
public:
Handle(AddrHashMap<T, kSize> *map, uptr addr);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
~Handle();
T *operator->();
bool created() const;
bool exists() const;
private:
friend AddrHashMap<T, kSize>;
AddrHashMap<T, kSize> *map_;
Bucket *bucket_;
Cell *cell_;
uptr addr_;
uptr addidx_;
bool created_;
bool remove_;
bool create_;
};
private:
friend class Handle;
Bucket *table_;
void acquire(Handle *h);
void release(Handle *h);
uptr calcHash(uptr addr);
};
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
addr_ = addr;
remove_ = false;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = true;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove, bool create) {
map_ = map;
addr_ = addr;
remove_ = remove;
create_ = create;
map_->acquire(this);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::~Handle() {
map_->release(this);
}
template <typename T, uptr kSize>
T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::exists() const {
return cell_ != 0;
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
h->created_ = false;
h->addidx_ = -1U;
h->bucket_ = b;
h->cell_ = 0;
// If we want to remove the element, we need exclusive access to the bucket,
// so skip the lock-free phase.
if (h->remove_)
goto locked;
retry:
// First try to find an existing element w/o read mutex.
CHECK(!h->remove_);
// Check the embed cells.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 == addr) {
h->cell_ = c;
return;
}
}
// Check the add cells with read lock.
if (atomic_load(&b->add, memory_order_relaxed)) {
b->mtx.ReadLock();
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
h->addidx_ = i;
h->cell_ = c;
return;
}
}
b->mtx.ReadUnlock();
}
locked:
// Re-check existence under write lock.
// Embed cells.
b->mtx.Lock();
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
// Add cells.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (add) {
for (uptr i = 0; i < add->size; i++) {
Cell *c = &add->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == addr) {
if (h->remove_) {
h->addidx_ = i;
h->cell_ = c;
return;
}
b->mtx.Unlock();
goto retry;
}
}
}
// The element does not exist, no need to create it if we want to remove.
if (h->remove_ || !h->create_) {
b->mtx.Unlock();
return;
}
// Now try to create it under the mutex.
h->created_ = true;
// See if we have a free embed cell.
for (uptr i = 0; i < kBucketSize; i++) {
Cell *c = &b->cells[i];
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (addr1 == 0) {
h->cell_ = c;
return;
}
}
// Store in the add cells.
if (add == 0) {
// Allocate a new add array.
const uptr kInitSize = 64;
add = (AddBucket*)InternalAlloc(kInitSize);
internal_memset(add, 0, kInitSize);
add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add->size = 0;
atomic_store(&b->add, (uptr)add, memory_order_relaxed);
}
if (add->size == add->cap) {
// Grow existing add array.
uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
uptr newsize = oldsize * 2;
AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
internal_memset(add1, 0, newsize);
add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
add1->size = add->size;
internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
InternalFree(add);
atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
add = add1;
}
// Store.
uptr i = add->size++;
Cell *c = &add->cells[i];
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) {
if (h->cell_ == 0)
return;
Bucket *b = h->bucket_;
Cell *c = h->cell_;
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (h->created_) {
// Denote completion of insertion.
CHECK_EQ(addr1, 0);
// After the following store, the element becomes available
// for lock-free reads.
atomic_store(&c->addr, h->addr_, memory_order_release);
b->mtx.Unlock();
} else if (h->remove_) {
// Denote that the cell is empty now.
CHECK_EQ(addr1, h->addr_);
atomic_store(&c->addr, 0, memory_order_release);
// See if we need to compact the bucket.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (h->addidx_ == -1U) {
// Removed from embed array, move an add element into the freed cell.
if (add && add->size != 0) {
uptr last = --add->size;
Cell *c1 = &add->cells[last];
c->val = c1->val;
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
atomic_store(&c->addr, addr1, memory_order_release);
atomic_store(&c1->addr, 0, memory_order_release);
}
} else {
// Removed from add array, compact it.
uptr last = --add->size;
Cell *c1 = &add->cells[last];
if (c != c1) {
*c = *c1;
atomic_store(&c1->addr, 0, memory_order_relaxed);
}
}
if (add && add->size == 0) {
// FIXME(dvyukov): free add?
}
b->mtx.Unlock();
} else {
CHECK_EQ(addr1, h->addr_);
if (h->addidx_ != -1U)
b->mtx.ReadUnlock();
}
}
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
addr += addr << 10;
addr ^= addr >> 6;
return addr % kSize;
}
} // namespace __sanitizer
#endif // SANITIZER_ADDRHASHMAP_H

View File

@ -17,7 +17,7 @@
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_GO)
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
@ -115,7 +115,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
if (allocated_end_ - allocated_current_ < (sptr)size) {
uptr size_to_allocate = Max(size, GetPageSizeCached());
allocated_current_ =
(char*)MmapOrDie(size_to_allocate, __FUNCTION__);
(char*)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
low_level_alloc_callback((uptr)allocated_current_,

View File

@ -22,14 +22,13 @@ namespace __sanitizer {
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
#if SANITIZER_WORDSIZE == 32
static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
@ -46,10 +45,10 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
// LargeMmapAllocator.
struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!\n");
}
void OnUnmap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!\n");
}
};

View File

@ -42,7 +42,8 @@ struct atomic_uint32_t {
struct atomic_uint64_t {
typedef u64 Type;
volatile Type val_dont_use;
// On 32-bit platforms u64 is not necessary aligned on 8 bytes.
volatile ALIGNED(8) Type val_dont_use;
};
struct atomic_uintptr_t {

View File

@ -13,8 +13,26 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
#if defined(__i386__) || defined(__x86_64__)
# include "sanitizer_atomic_clang_x86.h"
#else
# include "sanitizer_atomic_clang_other.h"
#endif
namespace __sanitizer {
// We would like to just use compiler builtin atomic operations
// for loads and stores, but they are mostly broken in clang:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
INLINE void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
@ -23,59 +41,6 @@ INLINE void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
#if defined(__i386__) || defined(__x86_64__)
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
#endif
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME:
// 64-bit atomic operations are not atomic on 32-bit platforms.
// The implementation lacks necessary memory fences on ARM/PPC.
// We would like to use compiler builtin atomic operations,
// but they are mostly broken:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
atomic_signal_fence(memory_order_seq_cst);
v = a->val_dont_use;
atomic_signal_fence(memory_order_seq_cst);
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
atomic_signal_fence(memory_order_seq_cst);
a->val_dont_use = v;
atomic_signal_fence(memory_order_seq_cst);
}
if (mo == memory_order_seq_cst)
atomic_thread_fence(memory_order_seq_cst);
}
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {

View File

@ -0,0 +1,95 @@
//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
#define SANITIZER_ATOMIC_CLANG_OTHER_H
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__sync_synchronize();
} else { // seq_cst
// E.g. on POWER we need a hw fence even before the store.
__sync_synchronize();
v = a->val_dont_use;
__sync_synchronize();
}
} else {
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
__sync_synchronize();
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
__sync_synchronize();
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
// Gross, but simple and reliable.
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
if (cmp == v)
break;
cmp = cur;
}
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H

View File

@ -0,0 +1,114 @@
//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ATOMIC_CLANG_X86_H
#define SANITIZER_ATOMIC_CLANG_X86_H
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else if (mo == memory_order_consume) {
// Assume that processor respects data dependencies
// (and that compiler won't break them).
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
} else if (mo == memory_order_acquire) {
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
// On x86 loads are implicitly acquire.
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 plain MOV is enough for seq_cst store.
__asm__ __volatile__("" ::: "memory");
v = a->val_dont_use;
__asm__ __volatile__("" ::: "memory");
}
} else {
// 64-bit load on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;" // (ptr could be read-only)
"emms;" // Empty mmx state/Reset FP regs
: "=m" (v)
: "m" (a->val_dont_use)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
}
return v;
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
if (sizeof(*a) < 8 || sizeof(void*) == 8) {
// Assume that aligned loads are atomic.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else if (mo == memory_order_release) {
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__asm__ __volatile__("" ::: "memory");
} else { // seq_cst
// On x86 stores are implicitly release.
__asm__ __volatile__("" ::: "memory");
a->val_dont_use = v;
__sync_synchronize();
}
} else {
// 64-bit store on 32-bit platform.
__asm__ __volatile__(
"movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
"movq %%mm0, %0;"
"emms;" // Empty mmx state/Reset FP regs
: "=m" (a->val_dont_use)
: "m" (v)
: // mark the FP stack and mmx registers as clobbered
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
#ifdef __MMX__
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
#endif // #ifdef __MMX__
"memory");
if (mo == memory_order_seq_cst)
__sync_synchronize();
}
}
} // namespace __sanitizer
#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H

View File

@ -22,8 +22,20 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
extern "C" short _InterlockedCompareExchange16( // NOLINT
short volatile *Destination, // NOLINT
short Exchange, short Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange16)
extern "C"
long long _InterlockedCompareExchange64( // NOLINT
long long volatile *Destination, // NOLINT
long long Exchange, long long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange64)
#ifdef _WIN64
extern "C" long long _InterlockedExchangeAdd64( // NOLINT
long long volatile * Addend, long long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
@ -106,6 +118,40 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
}
INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#ifdef _WIN64
return (uptr)_InterlockedExchangeAdd64(
(volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
#else
return (uptr)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, (long)v); // NOLINT
#endif
}
INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, -(long)v); // NOLINT
}
INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#ifdef _WIN64
return (uptr)_InterlockedExchangeAdd64(
(volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
#else
return (uptr)_InterlockedExchangeAdd(
(volatile long*)&a->val_dont_use, -(long)v); // NOLINT
#endif
}
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
@ -166,6 +212,45 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
u16 cmpv = *cmp;
u16 prev = (u16)_InterlockedCompareExchange16(
(volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
u32 cmpv = *cmp;
u32 prev = (u32)_InterlockedCompareExchange(
(volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
u64 cmpv = *cmp;
u64 prev = (u64)_InterlockedCompareExchange64(
(volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,

View File

@ -0,0 +1,349 @@
//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Specializer BitVector implementation.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_BITVECTOR_H
#define SANITIZER_BITVECTOR_H
#include "sanitizer_common.h"
namespace __sanitizer {
// Fixed size bit vector based on a single basic integer.
template <class basic_int_t = uptr>
class BasicBitVector {
public:
enum SizeEnum { kSize = sizeof(basic_int_t) * 8 };
uptr size() const { return kSize; }
// No CTOR.
void clear() { bits_ = 0; }
void setAll() { bits_ = ~(basic_int_t)0; }
bool empty() const { return bits_ == 0; }
// Returns true if the bit has changed from 0 to 1.
bool setBit(uptr idx) {
basic_int_t old = bits_;
bits_ |= mask(idx);
return bits_ != old;
}
// Returns true if the bit has changed from 1 to 0.
bool clearBit(uptr idx) {
basic_int_t old = bits_;
bits_ &= ~mask(idx);
return bits_ != old;
}
bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
uptr getAndClearFirstOne() {
CHECK(!empty());
uptr idx = LeastSignificantSetBitIndex(bits_);
clearBit(idx);
return idx;
}
// Do "this |= v" and return whether new bits have been added.
bool setUnion(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ |= v.bits_;
return bits_ != old;
}
// Do "this &= v" and return whether any bits have been removed.
bool setIntersection(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ &= v.bits_;
return bits_ != old;
}
// Do "this &= ~v" and return whether any bits have been removed.
bool setDifference(const BasicBitVector &v) {
basic_int_t old = bits_;
bits_ &= ~v.bits_;
return bits_ != old;
}
void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
// Returns true if 'this' intersects with 'v'.
bool intersectsWith(const BasicBitVector &v) const {
return (bits_ & v.bits_) != 0;
}
// for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
// uptr idx = it.next();
// use(idx);
// }
class Iterator {
public:
Iterator() { }
explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
bool hasNext() const { return !bv_.empty(); }
uptr next() { return bv_.getAndClearFirstOne(); }
void clear() { bv_.clear(); }
private:
BasicBitVector bv_;
};
private:
basic_int_t mask(uptr idx) const {
CHECK_LT(idx, size());
return (basic_int_t)1UL << idx;
}
basic_int_t bits_;
};
// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
// The implementation is optimized for better performance on
// sparse bit vectors, i.e. the those with few set bits.
template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
class TwoLevelBitVector {
// This is essentially a 2-level bit vector.
// Set bit in the first level BV indicates that there are set bits
// in the corresponding BV of the second level.
// This structure allows O(kLevel1Size) time for clear() and empty(),
// as well fast handling of sparse BVs.
public:
enum SizeEnum { kSize = BV::kSize * BV::kSize * kLevel1Size };
// No CTOR.
uptr size() const { return kSize; }
void clear() {
for (uptr i = 0; i < kLevel1Size; i++)
l1_[i].clear();
}
void setAll() {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
l1_[i0].setAll();
for (uptr i1 = 0; i1 < BV::kSize; i1++)
l2_[i0][i1].setAll();
}
}
bool empty() const {
for (uptr i = 0; i < kLevel1Size; i++)
if (!l1_[i].empty())
return false;
return true;
}
// Returns true if the bit has changed from 0 to 1.
bool setBit(uptr idx) {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
if (!l1_[i0].getBit(i1)) {
l1_[i0].setBit(i1);
l2_[i0][i1].clear();
}
bool res = l2_[i0][i1].setBit(i2);
// Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
// idx, i0, i1, i2, res);
return res;
}
bool clearBit(uptr idx) {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
bool res = false;
if (l1_[i0].getBit(i1)) {
res = l2_[i0][i1].clearBit(i2);
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
return res;
}
bool getBit(uptr idx) const {
check(idx);
uptr i0 = idx0(idx);
uptr i1 = idx1(idx);
uptr i2 = idx2(idx);
// Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
}
uptr getAndClearFirstOne() {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
if (l1_[i0].empty()) continue;
uptr i1 = l1_[i0].getAndClearFirstOne();
uptr i2 = l2_[i0][i1].getAndClearFirstOne();
if (!l2_[i0][i1].empty())
l1_[i0].setBit(i1);
uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
// Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
return res;
}
CHECK(0);
return 0;
}
// Do "this |= v" and return whether new bits have been added.
bool setUnion(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = v.l1_[i0];
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l1_[i0].setBit(i1))
l2_[i0][i1].clear();
if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
res = true;
}
}
return res;
}
// Do "this &= v" and return whether any bits have been removed.
bool setIntersection(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
if (l1_[i0].setIntersection(v.l1_[i0]))
res = true;
if (!l1_[i0].empty()) {
BV t = l1_[i0];
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
res = true;
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
}
}
return res;
}
// Do "this &= ~v" and return whether any bits have been removed.
bool setDifference(const TwoLevelBitVector &v) {
bool res = false;
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = l1_[i0];
t.setIntersection(v.l1_[i0]);
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
res = true;
if (l2_[i0][i1].empty())
l1_[i0].clearBit(i1);
}
}
return res;
}
void copyFrom(const TwoLevelBitVector &v) {
clear();
setUnion(v);
}
// Returns true if 'this' intersects with 'v'.
bool intersectsWith(const TwoLevelBitVector &v) const {
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
BV t = l1_[i0];
t.setIntersection(v.l1_[i0]);
while (!t.empty()) {
uptr i1 = t.getAndClearFirstOne();
if (!v.l1_[i0].getBit(i1)) continue;
if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
return true;
}
}
return false;
}
// for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
// uptr idx = it.next();
// use(idx);
// }
class Iterator {
public:
Iterator() { }
explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
it1_.clear();
it2_.clear();
}
bool hasNext() const {
if (it1_.hasNext()) return true;
for (uptr i = i0_; i < kLevel1Size; i++)
if (!bv_.l1_[i].empty()) return true;
return false;
}
uptr next() {
// Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
if (!it1_.hasNext() && !it2_.hasNext()) {
for (; i0_ < kLevel1Size; i0_++) {
if (bv_.l1_[i0_].empty()) continue;
it1_ = typename BV::Iterator(bv_.l1_[i0_]);
// Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
break;
}
}
if (!it2_.hasNext()) {
CHECK(it1_.hasNext());
i1_ = it1_.next();
it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
// Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
// it2_.hasNext(), kSize);
}
CHECK(it2_.hasNext());
uptr i2 = it2_.next();
uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
// Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
// it1_.hasNext(), it2_.hasNext(), kSize, res);
if (!it1_.hasNext() && !it2_.hasNext())
i0_++;
return res;
}
private:
const TwoLevelBitVector &bv_;
uptr i0_, i1_;
typename BV::Iterator it1_, it2_;
};
private:
void check(uptr idx) const { CHECK_LE(idx, size()); }
uptr idx0(uptr idx) const {
uptr res = idx / (BV::kSize * BV::kSize);
CHECK_LE(res, kLevel1Size);
return res;
}
uptr idx1(uptr idx) const {
uptr res = (idx / BV::kSize) % BV::kSize;
CHECK_LE(res, BV::kSize);
return res;
}
uptr idx2(uptr idx) const {
uptr res = idx % BV::kSize;
CHECK_LE(res, BV::kSize);
return res;
}
BV l1_[kLevel1Size];
BV l2_[kLevel1Size][BV::kSize];
};
} // namespace __sanitizer
#endif // SANITIZER_BITVECTOR_H

View File

@ -0,0 +1,163 @@
//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of Sanitizer runtime.
// BVGraph -- a directed graph.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_BVGRAPH_H
#define SANITIZER_BVGRAPH_H
#include "sanitizer_common.h"
#include "sanitizer_bitvector.h"
namespace __sanitizer {
// Directed graph of fixed size implemented as an array of bit vectors.
// Not thread-safe, all accesses should be protected by an external lock.
template<class BV>
class BVGraph {
public:
enum SizeEnum { kSize = BV::kSize };
uptr size() const { return kSize; }
// No CTOR.
void clear() {
for (uptr i = 0; i < size(); i++)
v[i].clear();
}
bool empty() const {
for (uptr i = 0; i < size(); i++)
if (!v[i].empty())
return false;
return true;
}
// Returns true if a new edge was added.
bool addEdge(uptr from, uptr to) {
check(from, to);
return v[from].setBit(to);
}
// Returns true if at least one new edge was added.
uptr addEdges(const BV &from, uptr to, uptr added_edges[],
uptr max_added_edges) {
uptr res = 0;
t1.copyFrom(from);
while (!t1.empty()) {
uptr node = t1.getAndClearFirstOne();
if (v[node].setBit(to))
if (res < max_added_edges)
added_edges[res++] = node;
}
return res;
}
// *EXPERIMENTAL*
// Returns true if an edge from=>to exist.
// This function does not use any global state except for 'this' itself,
// and thus can be called from different threads w/o locking.
// This would be racy.
// FIXME: investigate how much we can prove about this race being "benign".
bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
// Returns true if the edge from=>to was removed.
bool removeEdge(uptr from, uptr to) {
return v[from].clearBit(to);
}
// Returns true if at least one edge *=>to was removed.
bool removeEdgesTo(const BV &to) {
bool res = 0;
for (uptr from = 0; from < size(); from++) {
if (v[from].setDifference(to))
res = true;
}
return res;
}
// Returns true if at least one edge from=>* was removed.
bool removeEdgesFrom(const BV &from) {
bool res = false;
t1.copyFrom(from);
while (!t1.empty()) {
uptr idx = t1.getAndClearFirstOne();
if (!v[idx].empty()) {
v[idx].clear();
res = true;
}
}
return res;
}
void removeEdgesFrom(uptr from) {
return v[from].clear();
}
bool hasEdge(uptr from, uptr to) const {
check(from, to);
return v[from].getBit(to);
}
// Returns true if there is a path from the node 'from'
// to any of the nodes in 'targets'.
bool isReachable(uptr from, const BV &targets) {
BV &to_visit = t1,
&visited = t2;
to_visit.copyFrom(v[from]);
visited.clear();
visited.setBit(from);
while (!to_visit.empty()) {
uptr idx = to_visit.getAndClearFirstOne();
if (visited.setBit(idx))
to_visit.setUnion(v[idx]);
}
return targets.intersectsWith(visited);
}
// Finds a path from 'from' to one of the nodes in 'target',
// stores up to 'path_size' items of the path into 'path',
// returns the path length, or 0 if there is no path of size 'path_size'.
uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
if (path_size == 0)
return 0;
path[0] = from;
if (targets.getBit(from))
return 1;
// The function is recursive, so we don't want to create BV on stack.
// Instead of a getAndClearFirstOne loop we use the slower iterator.
for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
uptr idx = it.next();
if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
return res + 1;
}
return 0;
}
// Same as findPath, but finds a shortest path.
uptr findShortestPath(uptr from, const BV &targets, uptr *path,
uptr path_size) {
for (uptr p = 1; p <= path_size; p++)
if (findPath(from, targets, path, p) == p)
return p;
return 0;
}
private:
void check(uptr idx1, uptr idx2) const {
CHECK_LT(idx1, size());
CHECK_LT(idx2, size());
}
BV v[kSize];
// Keep temporary vectors here since we can not create large objects on stack.
BV t1, t2;
};
} // namespace __sanitizer
#endif // SANITIZER_BVGRAPH_H

View File

@ -91,7 +91,7 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
if (internal_iserror(openrv)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __FUNCTION__);
*buff = (char*)MmapOrDie(size, __func__);
*buff_size = size;
// Read up to one page at a time.
read_len = 0;
@ -200,11 +200,11 @@ void ReportErrorSummary(const char *error_type, StackTrace *stack) {
return;
AddressInfo ai;
#if !SANITIZER_GO
if (stack->size > 0 && Symbolizer::Get()->IsAvailable()) {
if (stack->size > 0 && Symbolizer::Get()->CanReturnFileLineInfo()) {
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
Symbolizer::Get()->SymbolizeCode(pc, &ai, 1);
Symbolizer::Get()->SymbolizePC(pc, &ai, 1);
}
#endif
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
@ -242,6 +242,30 @@ char *StripModuleName(const char *module) {
return internal_strdup(short_module_name);
}
static atomic_uintptr_t g_total_mmaped;
void IncreaseTotalMmap(uptr size) {
if (!common_flags()->mmap_limit_mb) return;
uptr total_mmaped =
atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
// Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
}
}
void DecreaseTotalMmap(uptr size) {
if (!common_flags()->mmap_limit_mb) return;
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
}
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@ -274,9 +298,11 @@ void __sanitizer_set_report_path(const char *path) {
}
}
void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
(void)reserved;
PrepareForSandboxing();
void NOINLINE
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
PrepareForSandboxing(args);
if (sandboxing_callback)
sandboxing_callback();
}
void __sanitizer_report_error_summary(const char *error_summary) {

View File

@ -17,6 +17,7 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_flags.h"
namespace __sanitizer {
struct StackTrace;
@ -25,14 +26,12 @@ struct StackTrace;
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
#if defined(__powerpc__) || defined(__powerpc64__)
const uptr kCacheLineSize = 128;
#else
const uptr kCacheLineSize = 64;
#endif
const uptr kMaxPathLength = 512;
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
extern const char *SanitizerToolName; // Can be changed by the tool.
uptr GetPageSize();
@ -51,6 +50,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
@ -58,6 +58,8 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@ -123,9 +125,18 @@ void RawWrite(const char *buffer);
bool PrintsToTty();
// Caching version of PrintsToTty(). Not thread-safe.
bool PrintsToTtyCached();
bool ColorizeReports();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
} while (0)
#define VPrintf(level, ...) \
do { \
if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
} while (0)
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
@ -169,7 +180,10 @@ u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
void PrepareForSandboxing();
void AdjustStackSize(void *attr);
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)());
void InitTlsSize();
uptr GetTlsSize();
@ -206,6 +220,14 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsDeadlySignal(int signum);
void InstallDeadlySignalHandlers(SignalHandlerType handler);
// Alternative signal stack (POSIX-only).
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
// We don't want a summary too long.
const int kMaxSummaryLength = 1024;
// Construct a one-line string:
@ -243,6 +265,19 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
INLINE uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up; // NOLINT
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
up = __builtin_ctzl(x);
#elif defined(_WIN64)
_BitScanForward64(&up, x);
#else
_BitScanForward(&up, x);
#endif
return up;
}
INLINE bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
@ -307,12 +342,6 @@ INLINE int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
#if SANITIZER_WORDSIZE == 64
# define FIRST_32_SECOND_64(a, b) (b)
#else
# define FIRST_32_SECOND_64(a, b) (a)
#endif
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
@ -477,6 +506,33 @@ const uptr kPthreadDestructorIterations = 0;
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO)
extern uptr indirect_call_wrapper;
void SetIndirectCallWrapper(uptr wrapper);
template <typename F>
F IndirectExternCall(F f) {
typedef F (*WrapF)(F);
return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f;
}
#else
INLINE void SetIndirectCallWrapper(uptr wrapper) {}
template <typename F>
F IndirectExternCall(F f) {
return f;
}
#endif
#if SANITIZER_ANDROID
void AndroidLogWrite(const char *buffer);
void GetExtraActivationFlags(char *buf, uptr size);
void SanitizerInitializeUnwinder();
#else
INLINE void AndroidLogWrite(const char *buffer_unused) {}
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {}
#endif
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,554 @@
//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Scanf/printf implementation for use in *Sanitizer interceptors.
// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
// with a few common GNU extensions.
//
//===----------------------------------------------------------------------===//
#include <stdarg.h>
static const char *parse_number(const char *p, int *out) {
*out = internal_atoll(p);
while (*p >= '0' && *p <= '9')
++p;
return p;
}
static const char *maybe_parse_param_index(const char *p, int *out) {
// n$
if (*p >= '0' && *p <= '9') {
int number;
const char *q = parse_number(p, &number);
CHECK(q);
if (*q == '$') {
*out = number;
p = q + 1;
}
}
// Otherwise, do not change p. This will be re-parsed later as the field
// width.
return p;
}
static bool char_is_one_of(char c, const char *s) {
return !!internal_strchr(s, c);
}
static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
if (char_is_one_of(*p, "jztLq")) {
ll[0] = *p;
++p;
} else if (*p == 'h') {
ll[0] = 'h';
++p;
if (*p == 'h') {
ll[1] = 'h';
++p;
}
} else if (*p == 'l') {
ll[0] = 'l';
++p;
if (*p == 'l') {
ll[1] = 'l';
++p;
}
}
return p;
}
// Returns true if the character is an integer conversion specifier.
static bool format_is_integer_conv(char c) {
return char_is_one_of(c, "diouxXn");
}
// Returns true if the character is an floating point conversion specifier.
static bool format_is_float_conv(char c) {
return char_is_one_of(c, "aAeEfFgG");
}
// Returns string output character size for string-like conversions,
// or 0 if the conversion is invalid.
static int format_get_char_size(char convSpecifier,
const char lengthModifier[2]) {
if (char_is_one_of(convSpecifier, "CS")) {
return sizeof(wchar_t);
}
if (char_is_one_of(convSpecifier, "cs[")) {
if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
return sizeof(wchar_t);
else if (lengthModifier[0] == '\0')
return sizeof(char);
}
return 0;
}
enum FormatStoreSize {
// Store size not known in advance; can be calculated as wcslen() of the
// destination buffer.
FSS_WCSLEN = -2,
// Store size not known in advance; can be calculated as strlen() of the
// destination buffer.
FSS_STRLEN = -1,
// Invalid conversion specifier.
FSS_INVALID = 0
};
// Returns the memory size of a format directive (if >0), or a value of
// FormatStoreSize.
static int format_get_value_size(char convSpecifier,
const char lengthModifier[2],
bool promote_float) {
if (format_is_integer_conv(convSpecifier)) {
switch (lengthModifier[0]) {
case 'h':
return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
case 'l':
return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
case 'q':
return sizeof(long long);
case 'L':
return sizeof(long long);
case 'j':
return sizeof(INTMAX_T);
case 'z':
return sizeof(SIZE_T);
case 't':
return sizeof(PTRDIFF_T);
case 0:
return sizeof(int);
default:
return FSS_INVALID;
}
}
if (format_is_float_conv(convSpecifier)) {
switch (lengthModifier[0]) {
case 'L':
case 'q':
return sizeof(long double);
case 'l':
return lengthModifier[1] == 'l' ? sizeof(long double)
: sizeof(double);
case 0:
// Printf promotes floats to doubles but scanf does not
return promote_float ? sizeof(double) : sizeof(float);
default:
return FSS_INVALID;
}
}
if (convSpecifier == 'p') {
if (lengthModifier[0] != 0)
return FSS_INVALID;
return sizeof(void *);
}
return FSS_INVALID;
}
struct ScanfDirective {
int argIdx; // argument index, or -1 if not specified ("%n$")
int fieldWidth;
const char *begin;
const char *end;
bool suppressed; // suppress assignment ("*")
bool allocate; // allocate space ("m")
char lengthModifier[2];
char convSpecifier;
bool maybeGnuMalloc;
};
// Parse scanf format string. If a valid directive in encountered, it is
// returned in dir. This function returns the pointer to the first
// unprocessed character, or 0 in case of error.
// In case of the end-of-string, a pointer to the closing \0 is returned.
static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
ScanfDirective *dir) {
internal_memset(dir, 0, sizeof(*dir));
dir->argIdx = -1;
while (*p) {
if (*p != '%') {
++p;
continue;
}
dir->begin = p;
++p;
// %%
if (*p == '%') {
++p;
continue;
}
if (*p == '\0') {
return 0;
}
// %n$
p = maybe_parse_param_index(p, &dir->argIdx);
CHECK(p);
// *
if (*p == '*') {
dir->suppressed = true;
++p;
}
// Field width
if (*p >= '0' && *p <= '9') {
p = parse_number(p, &dir->fieldWidth);
CHECK(p);
if (dir->fieldWidth <= 0) // Width if at all must be non-zero
return 0;
}
// m
if (*p == 'm') {
dir->allocate = true;
++p;
}
// Length modifier.
p = maybe_parse_length_modifier(p, dir->lengthModifier);
// Conversion specifier.
dir->convSpecifier = *p++;
// Consume %[...] expression.
if (dir->convSpecifier == '[') {
if (*p == '^')
++p;
if (*p == ']')
++p;
while (*p && *p != ']')
++p;
if (*p == 0)
return 0; // unexpected end of string
// Consume the closing ']'.
++p;
}
// This is unfortunately ambiguous between old GNU extension
// of %as, %aS and %a[...] and newer POSIX %a followed by
// letters s, S or [.
if (allowGnuMalloc && dir->convSpecifier == 'a' &&
!dir->lengthModifier[0]) {
if (*p == 's' || *p == 'S') {
dir->maybeGnuMalloc = true;
++p;
} else if (*p == '[') {
// Watch for %a[h-j%d], if % appears in the
// [...] range, then we need to give up, we don't know
// if scanf will parse it as POSIX %a [h-j %d ] or
// GNU allocation of string with range dh-j plus %.
const char *q = p + 1;
if (*q == '^')
++q;
if (*q == ']')
++q;
while (*q && *q != ']' && *q != '%')
++q;
if (*q == 0 || *q == '%')
return 0;
p = q + 1; // Consume the closing ']'.
dir->maybeGnuMalloc = true;
}
}
dir->end = p;
break;
}
return p;
}
static int scanf_get_value_size(ScanfDirective *dir) {
if (dir->allocate) {
if (!char_is_one_of(dir->convSpecifier, "cCsS["))
return FSS_INVALID;
return sizeof(char *);
}
if (dir->maybeGnuMalloc) {
if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
return FSS_INVALID;
// This is ambiguous, so check the smaller size of char * (if it is
// a GNU extension of %as, %aS or %a[...]) and float (if it is
// POSIX %a followed by s, S or [ letters).
return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
}
if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
if (charSize == 0)
return FSS_INVALID;
if (dir->fieldWidth == 0) {
if (!needsTerminator)
return charSize;
return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
}
return (dir->fieldWidth + needsTerminator) * charSize;
}
return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
}
// Common part of *scanf interceptors.
// Process format string and va_list, and report all store ranges.
// Stops when "consuming" n_inputs input items.
static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
const char *format, va_list aq) {
CHECK_GT(n_inputs, 0);
const char *p = format;
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
while (*p) {
ScanfDirective dir;
p = scanf_parse_next(p, allowGnuMalloc, &dir);
if (!p)
break;
if (dir.convSpecifier == 0) {
// This can only happen at the end of the format string.
CHECK_EQ(*p, 0);
break;
}
// Here the directive is valid. Do what it says.
if (dir.argIdx != -1) {
// Unsupported.
break;
}
if (dir.suppressed)
continue;
int size = scanf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("WARNING: unexpected format specifier in scanf interceptor: "
"%.*s\n", dir.end - dir.begin, dir.begin);
break;
}
void *argp = va_arg(aq, void *);
if (dir.convSpecifier != 'n')
--n_inputs;
if (n_inputs < 0)
break;
if (size == FSS_STRLEN) {
size = internal_strlen((const char *)argp) + 1;
} else if (size == FSS_WCSLEN) {
// FIXME: actually use wcslen() to calculate it.
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
}
}
#if SANITIZER_INTERCEPT_PRINTF
struct PrintfDirective {
int fieldWidth;
int fieldPrecision;
int argIdx; // width argument index, or -1 if not specified ("%*n$")
int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
const char *begin;
const char *end;
bool starredWidth;
bool starredPrecision;
char lengthModifier[2];
char convSpecifier;
};
static const char *maybe_parse_number(const char *p, int *out) {
if (*p >= '0' && *p <= '9')
p = parse_number(p, out);
return p;
}
static const char *maybe_parse_number_or_star(const char *p, int *out,
bool *star) {
if (*p == '*') {
*star = true;
++p;
} else {
*star = false;
p = maybe_parse_number(p, out);
}
return p;
}
// Parse printf format string. Same as scanf_parse_next.
static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
internal_memset(dir, 0, sizeof(*dir));
dir->argIdx = -1;
dir->precisionIdx = -1;
while (*p) {
if (*p != '%') {
++p;
continue;
}
dir->begin = p;
++p;
// %%
if (*p == '%') {
++p;
continue;
}
if (*p == '\0') {
return 0;
}
// %n$
p = maybe_parse_param_index(p, &dir->precisionIdx);
CHECK(p);
// Flags
while (char_is_one_of(*p, "'-+ #0")) {
++p;
}
// Field width
p = maybe_parse_number_or_star(p, &dir->fieldWidth,
&dir->starredWidth);
if (!p)
return 0;
// Precision
if (*p == '.') {
++p;
// Actual precision is optional (surprise!)
p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
&dir->starredPrecision);
if (!p)
return 0;
// m$
if (dir->starredPrecision) {
p = maybe_parse_param_index(p, &dir->precisionIdx);
CHECK(p);
}
}
// Length modifier.
p = maybe_parse_length_modifier(p, dir->lengthModifier);
// Conversion specifier.
dir->convSpecifier = *p++;
dir->end = p;
break;
}
return p;
}
static int printf_get_value_size(PrintfDirective *dir) {
if (dir->convSpecifier == 'm') {
return sizeof(char *);
}
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
if (charSize == 0)
return FSS_INVALID;
if (char_is_one_of(dir->convSpecifier, "sS")) {
return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
}
return charSize;
}
return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
}
#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
do { \
if (format_is_float_conv(convSpecifier)) { \
switch (size) { \
case 8: \
va_arg(*aq, double); \
break; \
case 16: \
va_arg(*aq, long double); \
break; \
default: \
Report("WARNING: unexpected floating-point arg size" \
" in printf interceptor: %d\n", size); \
return; \
} \
} else { \
switch (size) { \
case 1: \
case 2: \
case 4: \
va_arg(*aq, u32); \
break; \
case 8: \
va_arg(*aq, u64); \
break; \
default: \
Report("WARNING: unexpected arg size" \
" in printf interceptor: %d\n", size); \
return; \
} \
} \
} while (0)
// Common part of *printf interceptors.
// Process format string and va_list, and report all load ranges.
static void printf_common(void *ctx, const char *format, va_list aq) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
const char *p = format;
while (*p) {
PrintfDirective dir;
p = printf_parse_next(p, &dir);
if (!p)
break;
if (dir.convSpecifier == 0) {
// This can only happen at the end of the format string.
CHECK_EQ(*p, 0);
break;
}
// Here the directive is valid. Do what it says.
if (dir.argIdx != -1 || dir.precisionIdx != -1) {
// Unsupported.
break;
}
if (dir.starredWidth) {
// Dynamic width
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
if (dir.starredPrecision) {
// Dynamic precision
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
int size = printf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("WARNING: unexpected format specifier in printf "
"interceptor: %.*s\n", dir.end - dir.begin, dir.begin);
break;
}
if (dir.convSpecifier == 'n') {
void *argp = va_arg(aq, void *);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
continue;
} else if (size == FSS_STRLEN) {
if (void *argp = va_arg(aq, void *)) {
if (dir.starredPrecision) {
// FIXME: properly support starred precision for strings.
size = 0;
} else if (dir.fieldPrecision > 0) {
// Won't read more than "precision" symbols.
size = internal_strnlen((const char *)argp, dir.fieldPrecision);
if (size < dir.fieldPrecision) size++;
} else {
// Whole string will be accessed.
size = internal_strlen((const char *)argp) + 1;
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
}
} else if (size == FSS_WCSLEN) {
if (void *argp = va_arg(aq, void *)) {
// FIXME: Properly support wide-character strings (via wcsrtombs).
size = 0;
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
}
} else {
// Skip non-pointer args
SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
}
}
}
#endif // SANITIZER_INTERCEPT_PRINTF

View File

@ -12,14 +12,18 @@
struct ioctl_desc {
unsigned req;
// FIXME: support read+write arguments. Those are currently marked as WRITE.
// FIXME: support read+write arguments. Currently READWRITE and WRITE do the
// same thing.
// XXX: The declarations below may use WRITE instead of READWRITE, unless
// explicitly noted.
enum {
NONE,
READ,
WRITE,
READWRITE,
CUSTOM
} type : 2;
unsigned size : 30;
} type : 3;
unsigned size : 29;
const char* name;
};
@ -487,11 +491,15 @@ static void ioctl_init() {
// Handle the most evil ioctls that encode argument value as part of request id.
static unsigned ioctl_request_fixup(unsigned req) {
#if SANITIZER_LINUX
if ((req & ~0x3fff001fU) == IOCTL_EVIOCGBIT)
// Strip size and event number.
const unsigned kEviocgbitMask =
(IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
return IOCTL_EVIOCGBIT;
if ((req & ~0x3fU) == IOCTL_EVIOCGABS)
// Strip absolute axis number.
if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
return IOCTL_EVIOCGABS;
if ((req & ~0x3fU) == IOCTL_EVIOCSABS)
if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
return IOCTL_EVIOCSABS;
#endif
return req;
@ -513,24 +521,56 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
return 0;
}
static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
CHECK(desc);
desc->req = req;
desc->name = "<DECODED_IOCTL>";
desc->size = IOC_SIZE(req);
// Sanity check.
if (desc->size > 1024) return false;
unsigned dir = IOC_DIR(req);
switch (dir) {
case IOC_NONE:
desc->type = ioctl_desc::NONE;
break;
case IOC_READ | IOC_WRITE:
desc->type = ioctl_desc::READWRITE;
break;
case IOC_READ:
desc->type = ioctl_desc::WRITE;
break;
case IOC_WRITE:
desc->type = ioctl_desc::READ;
break;
default:
return false;
}
if (desc->type != IOC_NONE && desc->size == 0) return false;
char id = IOC_TYPE(req);
// Sanity check.
if (!(id >= 'a' && id <= 'z') && !(id >= 'A' && id <= 'Z')) return false;
return true;
}
static const ioctl_desc *ioctl_lookup(unsigned req) {
req = ioctl_request_fixup(req);
const ioctl_desc *desc = ioctl_table_lookup(req);
if (desc) return desc;
// Try stripping access size from the request id.
desc = ioctl_table_lookup(req & ~0x3fff0000U);
desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
// Sanity check: requests that encode access size are either read or write and
// have size of 0 in the table.
if (desc && desc->size == 0 &&
(desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ))
(desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
desc->type == ioctl_desc::READ))
return desc;
return 0;
}
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
unsigned request, void *arg) {
if (desc->type == ioctl_desc::READ) {
if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
}
@ -548,7 +588,7 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
unsigned request, void *arg) {
if (desc->type == ioctl_desc::WRITE) {
if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
// FIXME: add verbose output
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);

View File

@ -1,309 +0,0 @@
//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Scanf implementation for use in *Sanitizer interceptors.
// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
// with a few common GNU extensions.
//
//===----------------------------------------------------------------------===//
#include <stdarg.h>
struct ScanfDirective {
int argIdx; // argument index, or -1 of not specified ("%n$")
int fieldWidth;
bool suppressed; // suppress assignment ("*")
bool allocate; // allocate space ("m")
char lengthModifier[2];
char convSpecifier;
bool maybeGnuMalloc;
};
static const char *parse_number(const char *p, int *out) {
*out = internal_atoll(p);
while (*p >= '0' && *p <= '9')
++p;
return p;
}
static bool char_is_one_of(char c, const char *s) {
return !!internal_strchr(s, c);
}
// Parse scanf format string. If a valid directive in encountered, it is
// returned in dir. This function returns the pointer to the first
// unprocessed character, or 0 in case of error.
// In case of the end-of-string, a pointer to the closing \0 is returned.
static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
ScanfDirective *dir) {
internal_memset(dir, 0, sizeof(*dir));
dir->argIdx = -1;
while (*p) {
if (*p != '%') {
++p;
continue;
}
++p;
// %%
if (*p == '%') {
++p;
continue;
}
if (*p == '\0') {
return 0;
}
// %n$
if (*p >= '0' && *p <= '9') {
int number;
const char *q = parse_number(p, &number);
if (*q == '$') {
dir->argIdx = number;
p = q + 1;
}
// Otherwise, do not change p. This will be re-parsed later as the field
// width.
}
// *
if (*p == '*') {
dir->suppressed = true;
++p;
}
// Field width.
if (*p >= '0' && *p <= '9') {
p = parse_number(p, &dir->fieldWidth);
if (dir->fieldWidth <= 0)
return 0;
}
// m
if (*p == 'm') {
dir->allocate = true;
++p;
}
// Length modifier.
if (char_is_one_of(*p, "jztLq")) {
dir->lengthModifier[0] = *p;
++p;
} else if (*p == 'h') {
dir->lengthModifier[0] = 'h';
++p;
if (*p == 'h') {
dir->lengthModifier[1] = 'h';
++p;
}
} else if (*p == 'l') {
dir->lengthModifier[0] = 'l';
++p;
if (*p == 'l') {
dir->lengthModifier[1] = 'l';
++p;
}
}
// Conversion specifier.
dir->convSpecifier = *p++;
// Consume %[...] expression.
if (dir->convSpecifier == '[') {
if (*p == '^')
++p;
if (*p == ']')
++p;
while (*p && *p != ']')
++p;
if (*p == 0)
return 0; // unexpected end of string
// Consume the closing ']'.
++p;
}
// This is unfortunately ambiguous between old GNU extension
// of %as, %aS and %a[...] and newer POSIX %a followed by
// letters s, S or [.
if (allowGnuMalloc && dir->convSpecifier == 'a' &&
!dir->lengthModifier[0]) {
if (*p == 's' || *p == 'S') {
dir->maybeGnuMalloc = true;
++p;
} else if (*p == '[') {
// Watch for %a[h-j%d], if % appears in the
// [...] range, then we need to give up, we don't know
// if scanf will parse it as POSIX %a [h-j %d ] or
// GNU allocation of string with range dh-j plus %.
const char *q = p + 1;
if (*q == '^')
++q;
if (*q == ']')
++q;
while (*q && *q != ']' && *q != '%')
++q;
if (*q == 0 || *q == '%')
return 0;
p = q + 1; // Consume the closing ']'.
dir->maybeGnuMalloc = true;
}
}
break;
}
return p;
}
// Returns true if the character is an integer conversion specifier.
static bool scanf_is_integer_conv(char c) {
return char_is_one_of(c, "diouxXn");
}
// Returns true if the character is an floating point conversion specifier.
static bool scanf_is_float_conv(char c) {
return char_is_one_of(c, "aAeEfFgG");
}
// Returns string output character size for string-like conversions,
// or 0 if the conversion is invalid.
static int scanf_get_char_size(ScanfDirective *dir) {
if (char_is_one_of(dir->convSpecifier, "CS")) {
// wchar_t
return 0;
}
if (char_is_one_of(dir->convSpecifier, "cs[")) {
if (dir->lengthModifier[0] == 'l')
// wchar_t
return 0;
else if (dir->lengthModifier[0] == 0)
return sizeof(char);
else
return 0;
}
return 0;
}
enum ScanfStoreSize {
// Store size not known in advance; can be calculated as strlen() of the
// destination buffer.
SSS_STRLEN = -1,
// Invalid conversion specifier.
SSS_INVALID = 0
};
// Returns the store size of a scanf directive (if >0), or a value of
// ScanfStoreSize.
static int scanf_get_store_size(ScanfDirective *dir) {
if (dir->allocate) {
if (!char_is_one_of(dir->convSpecifier, "cCsS["))
return SSS_INVALID;
return sizeof(char *);
}
if (dir->maybeGnuMalloc) {
if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
return SSS_INVALID;
// This is ambiguous, so check the smaller size of char * (if it is
// a GNU extension of %as, %aS or %a[...]) and float (if it is
// POSIX %a followed by s, S or [ letters).
return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
}
if (scanf_is_integer_conv(dir->convSpecifier)) {
switch (dir->lengthModifier[0]) {
case 'h':
return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
case 'l':
return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
case 'L':
return sizeof(long long);
case 'j':
return sizeof(INTMAX_T);
case 'z':
return sizeof(SIZE_T);
case 't':
return sizeof(PTRDIFF_T);
case 0:
return sizeof(int);
default:
return SSS_INVALID;
}
}
if (scanf_is_float_conv(dir->convSpecifier)) {
switch (dir->lengthModifier[0]) {
case 'L':
case 'q':
return sizeof(long double);
case 'l':
return dir->lengthModifier[1] == 'l' ? sizeof(long double)
: sizeof(double);
case 0:
return sizeof(float);
default:
return SSS_INVALID;
}
}
if (char_is_one_of(dir->convSpecifier, "sS[")) {
unsigned charSize = scanf_get_char_size(dir);
if (charSize == 0)
return SSS_INVALID;
if (dir->fieldWidth == 0)
return SSS_STRLEN;
return (dir->fieldWidth + 1) * charSize;
}
if (char_is_one_of(dir->convSpecifier, "cC")) {
unsigned charSize = scanf_get_char_size(dir);
if (charSize == 0)
return SSS_INVALID;
if (dir->fieldWidth == 0)
return charSize;
return dir->fieldWidth * charSize;
}
if (dir->convSpecifier == 'p') {
if (dir->lengthModifier[1] != 0)
return SSS_INVALID;
return sizeof(void *);
}
return SSS_INVALID;
}
// Common part of *scanf interceptors.
// Process format string and va_list, and report all store ranges.
// Stops when "consuming" n_inputs input items.
static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
const char *format, va_list aq) {
CHECK_GT(n_inputs, 0);
const char *p = format;
while (*p) {
ScanfDirective dir;
p = scanf_parse_next(p, allowGnuMalloc, &dir);
if (!p)
break;
if (dir.convSpecifier == 0) {
// This can only happen at the end of the format string.
CHECK_EQ(*p, 0);
break;
}
// Here the directive is valid. Do what it says.
if (dir.argIdx != -1) {
// Unsupported.
break;
}
if (dir.suppressed)
continue;
int size = scanf_get_store_size(&dir);
if (size == SSS_INVALID)
break;
void *argp = va_arg(aq, void *);
if (dir.convSpecifier != 'n')
--n_inputs;
if (n_inputs < 0)
break;
if (size == SSS_STRLEN) {
size = internal_strlen((const char *)argp) + 1;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
}
}

View File

@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
namespace __sanitizer {
@ -32,4 +33,10 @@ bool PrintsToTtyCached() {
}
return prints_to_tty;
}
bool ColorizeReports() {
const char *flag = common_flags()->color;
return internal_strcmp(flag, "always") == 0 ||
(internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
}
} // namespace __sanitizer

View File

@ -232,6 +232,7 @@ POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
}
}
#if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
@ -239,6 +240,7 @@ POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
}
}
#endif
PRE_SYSCALL(times)(void *tbuf) {}
@ -384,24 +386,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
PRE_SYSCALL(capget)(void *header, void *dataptr) {}
PRE_SYSCALL(capget)(void *header, void *dataptr) {
if (header) PRE_READ(header, __user_cap_header_struct_sz);
}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
if (res >= 0) {
if (header) POST_WRITE(header, __user_cap_header_struct_sz);
if (res >= 0)
if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
}
}
PRE_SYSCALL(capset)(void *header, const void *data) {
if (header) PRE_READ(header, __user_cap_header_struct_sz);
if (data) PRE_READ(data, __user_cap_data_struct_sz);
}
POST_SYSCALL(capset)(long res, void *header, const void *data) {
if (res >= 0) {
if (header) POST_WRITE(header, __user_cap_header_struct_sz);
}
}
POST_SYSCALL(capset)(long res, void *header, const void *data) {}
PRE_SYSCALL(personality)(long personality) {}
@ -494,6 +493,7 @@ POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
}
}
#if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
@ -501,6 +501,7 @@ POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (tx) POST_WRITE(tx, struct_timex_sz);
}
}
#endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
@ -918,6 +919,7 @@ POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
}
}
#if !SANITIZER_ANDROID
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
@ -925,6 +927,7 @@ POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
}
}
#endif // !SANITIZER_ANDROID
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@ -1002,8 +1005,8 @@ PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
void *value, long size) {
if (res >= 0) {
if (value) POST_WRITE(value, size);
if (size && res > 0) {
if (value) POST_WRITE(value, res);
}
}
@ -1017,8 +1020,8 @@ PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
void *value, long size) {
if (res >= 0) {
if (value) POST_WRITE(value, size);
if (size && res > 0) {
if (value) POST_WRITE(value, res);
}
}
@ -1029,8 +1032,8 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
long size) {
if (res >= 0) {
if (value) POST_WRITE(value, size);
if (size && res > 0) {
if (value) POST_WRITE(value, res);
}
}
@ -1040,8 +1043,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
if (res >= 0) {
if (list) POST_WRITE(list, size);
if (size && res > 0) {
if (list) POST_WRITE(list, res);
}
}
@ -1051,16 +1054,16 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
}
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
if (res >= 0) {
if (list) POST_WRITE(list, size);
if (size && res > 0) {
if (list) POST_WRITE(list, res);
}
}
PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
if (res >= 0) {
if (list) POST_WRITE(list, size);
if (size && res > 0) {
if (list) POST_WRITE(list, res);
}
}
@ -2080,6 +2083,7 @@ POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
}
}
#if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
@ -2087,6 +2091,7 @@ POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
}
}
#endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}

View File

@ -9,15 +9,15 @@
// This file implements run-time support for a poor man's coverage tool.
//
// Compiler instrumentation:
// For every function F the compiler injects the following code:
// For every interesting basic block the compiler injects the following code:
// if (*Guard) {
// __sanitizer_cov(&F);
// __sanitizer_cov();
// *Guard = 1;
// }
// It's fine to call __sanitizer_cov more than once for a given function.
// It's fine to call __sanitizer_cov more than once for a given block.
//
// Run-time:
// - __sanitizer_cov(pc): record that we've executed a given PC.
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
@ -35,40 +35,103 @@
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_flags.h"
struct CovData {
BlockingMutex mu;
InternalMmapVector<uptr> v;
};
atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
static uptr cov_data_placeholder[sizeof(CovData) / sizeof(uptr)];
COMPILER_CHECK(sizeof(cov_data_placeholder) >= sizeof(CovData));
static CovData *cov_data = reinterpret_cast<CovData*>(cov_data_placeholder);
// pc_array is the array containing the covered PCs.
// To make the pc_array thread- and async-signal-safe it has to be large enough.
// 128M counters "ought to be enough for anybody" (4M on 32-bit).
// pc_array is allocated with MmapNoReserveOrDie and so it uses only as
// much RAM as it really needs.
static const uptr kPcArraySize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
static uptr *pc_array;
static atomic_uintptr_t pc_array_index;
static bool cov_sandboxed = false;
static int cov_fd = kInvalidFd;
static unsigned int cov_max_block_size = 0;
namespace __sanitizer {
// Simply add the pc into the vector under lock. If the function is called more
// than once for a given PC it will be inserted multiple times, which is fine.
static void CovAdd(uptr pc) {
BlockingMutexLock lock(&cov_data->mu);
cov_data->v.push_back(pc);
if (!pc_array) return;
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
CHECK_LT(idx, kPcArraySize);
pc_array[idx] = pc;
}
void CovInit() {
pc_array = reinterpret_cast<uptr *>(
MmapNoReserveOrDie(sizeof(uptr) * kPcArraySize, "CovInit"));
}
static inline bool CompareLess(const uptr &a, const uptr &b) {
return a < b;
}
// Block layout for packed file format: header, followed by module name (no
// trailing zero), followed by data blob.
struct CovHeader {
int pid;
unsigned int module_name_length;
unsigned int data_length;
};
static void CovWritePacked(int pid, const char *module, const void *blob,
unsigned int blob_size) {
CHECK_GE(cov_fd, 0);
unsigned module_name_length = internal_strlen(module);
CovHeader header = {pid, module_name_length, blob_size};
if (cov_max_block_size == 0) {
// Writing to a file. Just go ahead.
internal_write(cov_fd, &header, sizeof(header));
internal_write(cov_fd, module, module_name_length);
internal_write(cov_fd, blob, blob_size);
} else {
// Writing to a socket. We want to split the data into appropriately sized
// blocks.
InternalScopedBuffer<char> block(cov_max_block_size);
CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
uptr header_size_with_module = sizeof(header) + module_name_length;
CHECK_LT(header_size_with_module, cov_max_block_size);
unsigned int max_payload_size =
cov_max_block_size - header_size_with_module;
char *block_pos = block.data();
internal_memcpy(block_pos, &header, sizeof(header));
block_pos += sizeof(header);
internal_memcpy(block_pos, module, module_name_length);
block_pos += module_name_length;
char *block_data_begin = block_pos;
char *blob_pos = (char *)blob;
while (blob_size > 0) {
unsigned int payload_size = Min(blob_size, max_payload_size);
blob_size -= payload_size;
internal_memcpy(block_data_begin, blob_pos, payload_size);
blob_pos += payload_size;
((CovHeader *)block.data())->data_length = payload_size;
internal_write(cov_fd, block.data(),
header_size_with_module + payload_size);
}
}
}
// Dump the coverage on disk.
void CovDump() {
static void CovDump() {
if (!common_flags()->coverage) return;
#if !SANITIZER_WINDOWS
BlockingMutexLock lock(&cov_data->mu);
InternalMmapVector<uptr> &v = cov_data->v;
InternalSort(&v, v.size(), CompareLess);
InternalMmapVector<u32> offsets(v.size());
const uptr *vb = v.data();
const uptr *ve = vb + v.size();
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
return;
uptr size = atomic_load(&pc_array_index, memory_order_relaxed);
InternalSort(&pc_array, size, CompareLess);
InternalMmapVector<u32> offsets(size);
const uptr *vb = pc_array;
const uptr *ve = vb + size;
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr mb, me, off, prot;
InternalScopedBuffer<char> module(4096);
InternalScopedBuffer<char> path(4096 * 2);
@ -77,8 +140,9 @@ void CovDump() {
i++) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue;
while (vb < ve && *vb < mb) vb++;
if (vb >= ve) break;
if (mb <= *vb && *vb < me) {
if (*vb < me) {
offsets.clear();
const uptr *old_vb = vb;
CHECK_LE(off, *vb);
@ -88,24 +152,63 @@ void CovDump() {
offsets.push_back(static_cast<u32>(diff));
}
char *module_name = StripModuleName(module.data());
internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
module_name, internal_getpid());
if (cov_sandboxed) {
CovWritePacked(internal_getpid(), module_name, offsets.data(),
offsets.size() * sizeof(u32));
VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
} else {
// One file per module per process.
internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
module_name, internal_getpid());
uptr fd = OpenFile(path.data(), true);
if (internal_iserror(fd)) {
Report(" CovDump: failed to open %s for writing\n", path.data());
} else {
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
internal_close(fd);
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
vb - old_vb);
}
}
InternalFree(module_name);
uptr fd = OpenFile(path.data(), true);
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
internal_close(fd);
if (common_flags()->verbosity)
Report(" CovDump: %s: %zd PCs written\n", path.data(), vb - old_vb);
}
}
if (cov_fd >= 0)
internal_close(cov_fd);
#endif // !SANITIZER_WINDOWS
}
static void OpenPackedFileForWriting() {
CHECK(cov_fd == kInvalidFd);
InternalScopedBuffer<char> path(1024);
internal_snprintf((char *)path.data(), path.size(), "%zd.sancov.packed",
internal_getpid());
uptr fd = OpenFile(path.data(), true);
if (internal_iserror(fd)) {
Report(" Coverage: failed to open %s for writing\n", path.data());
Die();
}
cov_fd = fd;
}
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
if (!args) return;
if (!common_flags()->coverage) return;
cov_sandboxed = args->coverage_sandboxed;
if (!cov_sandboxed) return;
cov_fd = args->coverage_fd;
cov_max_block_size = args->coverage_max_block_size;
if (cov_fd < 0)
// Pre-open the file now. The sandbox won't allow us to do it later.
OpenPackedFileForWriting();
}
} // namespace __sanitizer
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc) {
CovAdd(reinterpret_cast<uptr>(pc));
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
CovAdd(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { CovInit(); }
} // extern "C"

View File

@ -0,0 +1,410 @@
//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of Sanitizer runtime.
// The deadlock detector maintains a directed graph of lock acquisitions.
// When a lock event happens, the detector checks if the locks already held by
// the current thread are reachable from the newly acquired lock.
//
// The detector can handle only a fixed amount of simultaneously live locks
// (a lock is alive if it has been locked at least once and has not been
// destroyed). When the maximal number of locks is reached the entire graph
// is flushed and the new lock epoch is started. The node ids from the old
// epochs can not be used with any of the detector methods except for
// nodeBelongsToCurrentEpoch().
//
// FIXME: this is work in progress, nothing really works yet.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_DEADLOCK_DETECTOR_H
#define SANITIZER_DEADLOCK_DETECTOR_H
#include "sanitizer_common.h"
#include "sanitizer_bvgraph.h"
namespace __sanitizer {
// Thread-local state for DeadlockDetector.
// It contains the locks currently held by the owning thread.
template <class BV>
class DeadlockDetectorTLS {
public:
// No CTOR.
void clear() {
bv_.clear();
epoch_ = 0;
n_recursive_locks = 0;
n_all_locks_ = 0;
}
bool empty() const { return bv_.empty(); }
void ensureCurrentEpoch(uptr current_epoch) {
if (epoch_ == current_epoch) return;
bv_.clear();
epoch_ = current_epoch;
}
uptr getEpoch() const { return epoch_; }
// Returns true if this is the first (non-recursive) acquisition of this lock.
bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
// Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
CHECK_EQ(epoch_, current_epoch);
if (!bv_.setBit(lock_id)) {
// The lock is already held by this thread, it must be recursive.
CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
recursive_locks[n_recursive_locks++] = lock_id;
return false;
}
CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
// lock_id < BV::kSize, can cast to a smaller int.
u32 lock_id_short = static_cast<u32>(lock_id);
LockWithContext l = {lock_id_short, stk};
all_locks_with_contexts_[n_all_locks_++] = l;
return true;
}
void removeLock(uptr lock_id) {
if (n_recursive_locks) {
for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
if (recursive_locks[i] == lock_id) {
n_recursive_locks--;
Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
return;
}
}
}
// Printf("remLock: %zx %zx\n", lock_id, epoch_);
CHECK(bv_.clearBit(lock_id));
if (n_all_locks_) {
for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
Swap(all_locks_with_contexts_[i],
all_locks_with_contexts_[n_all_locks_ - 1]);
n_all_locks_--;
break;
}
}
}
}
u32 findLockContext(uptr lock_id) {
for (uptr i = 0; i < n_all_locks_; i++)
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
return all_locks_with_contexts_[i].stk;
return 0;
}
const BV &getLocks(uptr current_epoch) const {
CHECK_EQ(epoch_, current_epoch);
return bv_;
}
uptr getNumLocks() const { return n_all_locks_; }
uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
private:
BV bv_;
uptr epoch_;
uptr recursive_locks[64];
uptr n_recursive_locks;
struct LockWithContext {
u32 lock;
u32 stk;
};
LockWithContext all_locks_with_contexts_[64];
uptr n_all_locks_;
};
// DeadlockDetector.
// For deadlock detection to work we need one global DeadlockDetector object
// and one DeadlockDetectorTLS object per evey thread.
// This class is not thread safe, all concurrent accesses should be guarded
// by an external lock.
// Most of the methods of this class are not thread-safe (i.e. should
// be protected by an external lock) unless explicitly told otherwise.
template <class BV>
class DeadlockDetector {
public:
typedef BV BitVector;
uptr size() const { return g_.size(); }
// No CTOR.
void clear() {
current_epoch_ = 0;
available_nodes_.clear();
recycled_nodes_.clear();
g_.clear();
n_edges_ = 0;
}
// Allocate new deadlock detector node.
// If we are out of available nodes first try to recycle some.
// If there is nothing to recycle, flush the graph and increment the epoch.
// Associate 'data' (opaque user's object) with the new node.
uptr newNode(uptr data) {
if (!available_nodes_.empty())
return getAvailableNode(data);
if (!recycled_nodes_.empty()) {
// Printf("recycling: n_edges_ %zd\n", n_edges_);
for (sptr i = n_edges_ - 1; i >= 0; i--) {
if (recycled_nodes_.getBit(edges_[i].from) ||
recycled_nodes_.getBit(edges_[i].to)) {
Swap(edges_[i], edges_[n_edges_ - 1]);
n_edges_--;
}
}
CHECK(available_nodes_.empty());
// removeEdgesFrom was called in removeNode.
g_.removeEdgesTo(recycled_nodes_);
available_nodes_.setUnion(recycled_nodes_);
recycled_nodes_.clear();
return getAvailableNode(data);
}
// We are out of vacant nodes. Flush and increment the current_epoch_.
current_epoch_ += size();
recycled_nodes_.clear();
available_nodes_.setAll();
g_.clear();
return getAvailableNode(data);
}
// Get data associated with the node created by newNode().
uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
bool nodeBelongsToCurrentEpoch(uptr node) {
return node && (node / size() * size()) == current_epoch_;
}
void removeNode(uptr node) {
uptr idx = nodeToIndex(node);
CHECK(!available_nodes_.getBit(idx));
CHECK(recycled_nodes_.setBit(idx));
g_.removeEdgesFrom(idx);
}
void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
dtls->ensureCurrentEpoch(current_epoch_);
}
// Returns true if there is a cycle in the graph after this lock event.
// Ideally should be called before the lock is acquired so that we can
// report a deadlock before a real deadlock happens.
bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
}
u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
return dtls->findLockContext(nodeToIndex(node));
}
// Add cur_node to the set of locks held currently by dtls.
void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
dtls->addLock(cur_idx, current_epoch_, stk);
}
// Experimental *racy* fast path function.
// Returns true if all edges from the currently held locks to cur_node exist.
bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
uptr local_epoch = dtls->getEpoch();
// Read from current_epoch_ is racy.
if (cur_node && local_epoch == current_epoch_ &&
local_epoch == nodeToEpoch(cur_node)) {
uptr cur_idx = nodeToIndexUnchecked(cur_node);
for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
if (!g_.hasEdge(dtls->getLock(i), cur_idx))
return false;
}
return true;
}
return false;
}
// Adds edges from currently held locks to cur_node,
// returns the number of added edges, and puts the sources of added edges
// into added_edges[].
// Should be called before onLockAfter.
uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
int unique_tid) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
uptr added_edges[40];
uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
added_edges, ARRAY_SIZE(added_edges));
for (uptr i = 0; i < n_added_edges; i++) {
if (n_edges_ < ARRAY_SIZE(edges_)) {
Edge e = {(u16)added_edges[i], (u16)cur_idx,
dtls->findLockContext(added_edges[i]), stk,
unique_tid};
edges_[n_edges_++] = e;
}
// Printf("Edge%zd: %u %zd=>%zd in T%d\n",
// n_edges_, stk, added_edges[i], cur_idx, unique_tid);
}
return n_added_edges;
}
bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
int *unique_tid) {
uptr from_idx = nodeToIndex(from_node);
uptr to_idx = nodeToIndex(to_node);
for (uptr i = 0; i < n_edges_; i++) {
if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
*stk_from = edges_[i].stk_from;
*stk_to = edges_[i].stk_to;
*unique_tid = edges_[i].unique_tid;
return true;
}
}
return false;
}
// Test-only function. Handles the before/after lock events,
// returns true if there is a cycle.
bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
addEdges(dtls, cur_node, stk, 0);
onLockAfter(dtls, cur_node, stk);
return is_reachable;
}
// Handles the try_lock event, returns false.
// When a try_lock event happens (i.e. a try_lock call succeeds) we need
// to add this lock to the currently held locks, but we should not try to
// change the lock graph or to detect a cycle. We may want to investigate
// whether a more aggressive strategy is possible for try_lock.
bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
dtls->addLock(cur_idx, current_epoch_, stk);
return false;
}
// Returns true iff dtls is empty (no locks are currently held) and we can
// add the node to the currently held locks w/o chanding the global state.
// This operation is thread-safe as it only touches the dtls.
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (!dtls->empty()) return false;
if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
return true;
}
return false;
}
// Finds a path between the lock 'cur_node' (currently not held in dtls)
// and some currently held lock, returns the length of the path
// or 0 on failure.
uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
uptr path_size) {
tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
uptr idx = nodeToIndex(cur_node);
CHECK(!tmp_bv_.getBit(idx));
uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
for (uptr i = 0; i < res; i++)
path[i] = indexToNode(path[i]);
if (res)
CHECK_EQ(path[0], cur_node);
return res;
}
// Handle the unlock event.
// This operation is thread-safe as it only touches the dtls.
void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
if (dtls->getEpoch() == nodeToEpoch(node))
dtls->removeLock(nodeToIndexUnchecked(node));
}
// Tries to handle the lock event w/o writing to global state.
// Returns true on success.
// This operation is thread-safe as it only touches the dtls
// (modulo racy nature of hasAllEdges).
bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (hasAllEdges(dtls, node)) {
dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
return true;
}
return false;
}
bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
}
uptr testOnlyGetEpoch() const { return current_epoch_; }
bool testOnlyHasEdge(uptr l1, uptr l2) {
return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
}
// idx1 and idx2 are raw indices to g_, not lock IDs.
bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
return g_.hasEdge(idx1, idx2);
}
void Print() {
for (uptr from = 0; from < size(); from++)
for (uptr to = 0; to < size(); to++)
if (g_.hasEdge(from, to))
Printf(" %zx => %zx\n", from, to);
}
private:
void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
void check_node(uptr node) const {
CHECK_GE(node, size());
CHECK_EQ(current_epoch_, nodeToEpoch(node));
}
uptr indexToNode(uptr idx) const {
check_idx(idx);
return idx + current_epoch_;
}
uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
uptr nodeToIndex(uptr node) const {
check_node(node);
return nodeToIndexUnchecked(node);
}
uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
uptr getAvailableNode(uptr data) {
uptr idx = available_nodes_.getAndClearFirstOne();
data_[idx] = data;
return indexToNode(idx);
}
struct Edge {
u16 from;
u16 to;
u32 stk_from;
u32 stk_to;
int unique_tid;
};
uptr current_epoch_;
BV available_nodes_;
BV recycled_nodes_;
BV tmp_bv_;
BVGraph<BV> g_;
uptr data_[BV::kSize];
Edge edges_[BV::kSize * 32];
uptr n_edges_;
};
} // namespace __sanitizer
#endif // SANITIZER_DEADLOCK_DETECTOR_H

View File

@ -0,0 +1,187 @@
//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Deadlock detector implementation based on NxN adjacency bit matrix.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_deadlock_detector_interface.h"
#include "sanitizer_deadlock_detector.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_mutex.h"
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
namespace __sanitizer {
typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
struct DDPhysicalThread {
};
struct DDLogicalThread {
u64 ctx;
DeadlockDetectorTLS<DDBV> dd;
DDReport rep;
bool report_pending;
};
struct DD : public DDetector {
SpinMutex mtx;
DeadlockDetector<DDBV> dd;
DDFlags flags;
explicit DD(const DDFlags *flags);
DDPhysicalThread* CreatePhysicalThread();
void DestroyPhysicalThread(DDPhysicalThread *pt);
DDLogicalThread* CreateLogicalThread(u64 ctx);
void DestroyLogicalThread(DDLogicalThread *lt);
void MutexInit(DDCallback *cb, DDMutex *m);
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock);
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
void MutexDestroy(DDCallback *cb, DDMutex *m);
DDReport *GetReport(DDCallback *cb);
void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
void ReportDeadlock(DDCallback *cb, DDMutex *m);
};
DDetector *DDetector::Create(const DDFlags *flags) {
(void)flags;
void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
return new(mem) DD(flags);
}
DD::DD(const DDFlags *flags)
: flags(*flags) {
dd.clear();
}
DDPhysicalThread* DD::CreatePhysicalThread() {
return 0;
}
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
}
DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
lt->ctx = ctx;
lt->dd.clear();
lt->report_pending = false;
return lt;
}
void DD::DestroyLogicalThread(DDLogicalThread *lt) {
lt->~DDLogicalThread();
InternalFree(lt);
}
void DD::MutexInit(DDCallback *cb, DDMutex *m) {
m->id = 0;
m->stk = cb->Unwind();
}
void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
if (!dd.nodeBelongsToCurrentEpoch(m->id))
m->id = dd.newNode(reinterpret_cast<uptr>(m));
dd.ensureCurrentEpoch(&lt->dd);
}
void DD::MutexBeforeLock(DDCallback *cb,
DDMutex *m, bool wlock) {
DDLogicalThread *lt = cb->lt;
if (lt->dd.empty()) return; // This will be the first lock held by lt.
if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
SpinMutexLock lk(&mtx);
MutexEnsureID(lt, m);
if (dd.isHeld(&lt->dd, m->id))
return; // FIXME: allow this only for recursive locks.
if (dd.onLockBefore(&lt->dd, m->id)) {
// Actually add this edge now so that we have all the stack traces.
dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
ReportDeadlock(cb, m);
}
}
void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
DDLogicalThread *lt = cb->lt;
uptr path[10];
uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
CHECK_EQ(m->id, path[0]);
lt->report_pending = true;
DDReport *rep = &lt->rep;
rep->n = len;
for (uptr i = 0; i < len; i++) {
uptr from = path[i];
uptr to = path[(i + 1) % len];
DDMutex *m0 = (DDMutex*)dd.getData(from);
DDMutex *m1 = (DDMutex*)dd.getData(to);
u32 stk_from = -1U, stk_to = -1U;
int unique_tid = 0;
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
// unique_tid);
rep->loop[i].thr_ctx = unique_tid;
rep->loop[i].mtx_ctx0 = m0->ctx;
rep->loop[i].mtx_ctx1 = m1->ctx;
rep->loop[i].stk[0] = stk_to;
rep->loop[i].stk[1] = stk_from;
}
}
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
DDLogicalThread *lt = cb->lt;
u32 stk = 0;
if (flags.second_deadlock_stack)
stk = cb->Unwind();
// Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
if (dd.onFirstLock(&lt->dd, m->id, stk))
return;
if (dd.onLockFast(&lt->dd, m->id, stk))
return;
SpinMutexLock lk(&mtx);
MutexEnsureID(lt, m);
if (wlock) // Only a recursive rlock may be held.
CHECK(!dd.isHeld(&lt->dd, m->id));
if (!trylock)
dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
dd.onLockAfter(&lt->dd, m->id, stk);
}
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
// Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
dd.onUnlock(&cb->lt->dd, m->id);
}
void DD::MutexDestroy(DDCallback *cb,
DDMutex *m) {
if (!m->id) return;
SpinMutexLock lk(&mtx);
if (dd.nodeBelongsToCurrentEpoch(m->id))
dd.removeNode(m->id);
m->id = 0;
}
DDReport *DD::GetReport(DDCallback *cb) {
if (!cb->lt->report_pending)
return 0;
cb->lt->report_pending = false;
return &cb->lt->rep;
}
} // namespace __sanitizer
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1

View File

@ -0,0 +1,427 @@
//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Deadlock detector implementation based on adjacency lists.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_mutex.h"
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
namespace __sanitizer {
const int kMaxNesting = 64;
const u32 kNoId = -1;
const u32 kEndId = -2;
const int kMaxLink = 8;
const int kL1Size = 1024;
const int kL2Size = 1024;
const int kMaxMutex = kL1Size * kL2Size;
struct Id {
u32 id;
u32 seq;
explicit Id(u32 id = 0, u32 seq = 0)
: id(id)
, seq(seq) {
}
};
struct Link {
u32 id;
u32 seq;
u32 tid;
u32 stk0;
u32 stk1;
explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
: id(id)
, seq(seq)
, tid(tid)
, stk0(s0)
, stk1(s1) {
}
};
struct DDPhysicalThread {
DDReport rep;
bool report_pending;
bool visited[kMaxMutex];
Link pending[kMaxMutex];
Link path[kMaxMutex];
};
struct ThreadMutex {
u32 id;
u32 stk;
};
struct DDLogicalThread {
u64 ctx;
ThreadMutex locked[kMaxNesting];
int nlocked;
};
struct Mutex {
StaticSpinMutex mtx;
u32 seq;
int nlink;
Link link[kMaxLink];
};
struct DD : public DDetector {
explicit DD(const DDFlags *flags);
DDPhysicalThread* CreatePhysicalThread();
void DestroyPhysicalThread(DDPhysicalThread *pt);
DDLogicalThread* CreateLogicalThread(u64 ctx);
void DestroyLogicalThread(DDLogicalThread *lt);
void MutexInit(DDCallback *cb, DDMutex *m);
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
bool trylock);
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
void MutexDestroy(DDCallback *cb, DDMutex *m);
DDReport *GetReport(DDCallback *cb);
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
u32 allocateId(DDCallback *cb);
Mutex *getMutex(u32 id);
u32 getMutexId(Mutex *m);
DDFlags flags;
Mutex* mutex[kL1Size];
SpinMutex mtx;
InternalMmapVector<u32> free_id;
int id_gen;
};
DDetector *DDetector::Create(const DDFlags *flags) {
(void)flags;
void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
return new(mem) DD(flags);
}
DD::DD(const DDFlags *flags)
: flags(*flags)
, free_id(1024) {
id_gen = 0;
}
DDPhysicalThread* DD::CreatePhysicalThread() {
DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
"deadlock detector (physical thread)");
return pt;
}
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
pt->~DDPhysicalThread();
UnmapOrDie(pt, sizeof(DDPhysicalThread));
}
DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
sizeof(DDLogicalThread));
lt->ctx = ctx;
lt->nlocked = 0;
return lt;
}
void DD::DestroyLogicalThread(DDLogicalThread *lt) {
lt->~DDLogicalThread();
InternalFree(lt);
}
void DD::MutexInit(DDCallback *cb, DDMutex *m) {
VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
m->id = kNoId;
m->recursion = 0;
atomic_store(&m->owner, 0, memory_order_relaxed);
}
Mutex *DD::getMutex(u32 id) {
return &mutex[id / kL2Size][id % kL2Size];
}
u32 DD::getMutexId(Mutex *m) {
for (int i = 0; i < kL1Size; i++) {
Mutex *tab = mutex[i];
if (tab == 0)
break;
if (m >= tab && m < tab + kL2Size)
return i * kL2Size + (m - tab);
}
return -1;
}
u32 DD::allocateId(DDCallback *cb) {
u32 id = -1;
SpinMutexLock l(&mtx);
if (free_id.size() > 0) {
id = free_id.back();
free_id.pop_back();
} else {
CHECK_LT(id_gen, kMaxMutex);
if ((id_gen % kL2Size) == 0) {
mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
"deadlock detector (mutex table)");
}
id = id_gen++;
}
CHECK_LE(id, kMaxMutex);
VPrintf(3, "#%llu: DD::allocateId assign id %d\n",
cb->lt->ctx, id);
return id;
}
void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
cb->lt->ctx, m, wlock, cb->lt->nlocked);
DDPhysicalThread *pt = cb->pt;
DDLogicalThread *lt = cb->lt;
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
if (owner == (uptr)cb->lt) {
VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
cb->lt->ctx);
return;
}
CHECK_LE(lt->nlocked, kMaxNesting);
// FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
if (m->id == kNoId)
m->id = allocateId(cb);
ThreadMutex *tm = &lt->locked[lt->nlocked++];
tm->id = m->id;
if (flags.second_deadlock_stack)
tm->stk = cb->Unwind();
if (lt->nlocked == 1) {
VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
cb->lt->ctx);
return;
}
bool added = false;
Mutex *mtx = getMutex(m->id);
for (int i = 0; i < lt->nlocked - 1; i++) {
u32 id1 = lt->locked[i].id;
u32 stk1 = lt->locked[i].stk;
Mutex *mtx1 = getMutex(id1);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->nlink == kMaxLink) {
// FIXME(dvyukov): check stale links
continue;
}
int li = 0;
for (; li < mtx1->nlink; li++) {
Link *link = &mtx1->link[li];
if (link->id == m->id) {
if (link->seq != mtx->seq) {
link->seq = mtx->seq;
link->tid = lt->ctx;
link->stk0 = stk1;
link->stk1 = cb->Unwind();
added = true;
VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
cb->lt->ctx, getMutexId(mtx1), m->id);
}
break;
}
}
if (li == mtx1->nlink) {
// FIXME(dvyukov): check stale links
Link *link = &mtx1->link[mtx1->nlink++];
link->id = m->id;
link->seq = mtx->seq;
link->tid = lt->ctx;
link->stk0 = stk1;
link->stk1 = cb->Unwind();
added = true;
VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
cb->lt->ctx, getMutexId(mtx1), m->id);
}
}
if (!added || mtx->nlink == 0) {
VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
cb->lt->ctx);
return;
}
CycleCheck(pt, lt, m);
}
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
bool trylock) {
VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
DDLogicalThread *lt = cb->lt;
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
if (owner == (uptr)cb->lt) {
VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
CHECK(wlock);
m->recursion++;
return;
}
CHECK_EQ(owner, 0);
if (wlock) {
VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
CHECK_EQ(m->recursion, 0);
m->recursion = 1;
atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
}
if (!trylock)
return;
CHECK_LE(lt->nlocked, kMaxNesting);
if (m->id == kNoId)
m->id = allocateId(cb);
ThreadMutex *tm = &lt->locked[lt->nlocked++];
tm->id = m->id;
if (flags.second_deadlock_stack)
tm->stk = cb->Unwind();
}
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
cb->lt->ctx, m, wlock, cb->lt->nlocked);
DDLogicalThread *lt = cb->lt;
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
if (owner == (uptr)cb->lt) {
VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
if (--m->recursion > 0)
return;
VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
atomic_store(&m->owner, 0, memory_order_relaxed);
}
CHECK_NE(m->id, kNoId);
int last = lt->nlocked - 1;
for (int i = last; i >= 0; i--) {
if (cb->lt->locked[i].id == m->id) {
lt->locked[i] = lt->locked[last];
lt->nlocked--;
break;
}
}
}
void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
cb->lt->ctx, m);
DDLogicalThread *lt = cb->lt;
if (m->id == kNoId)
return;
// Remove the mutex from lt->locked if there.
int last = lt->nlocked - 1;
for (int i = last; i >= 0; i--) {
if (lt->locked[i].id == m->id) {
lt->locked[i] = lt->locked[last];
lt->nlocked--;
break;
}
}
// Clear and invalidate the mutex descriptor.
{
Mutex *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
mtx->seq++;
mtx->nlink = 0;
}
// Return id to cache.
{
SpinMutexLock l(&mtx);
free_id.push_back(m->id);
}
}
void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
DDMutex *m) {
internal_memset(pt->visited, 0, sizeof(pt->visited));
int npath = 0;
int npending = 0;
{
Mutex *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
for (int li = 0; li < mtx->nlink; li++)
pt->pending[npending++] = mtx->link[li];
}
while (npending > 0) {
Link link = pt->pending[--npending];
if (link.id == kEndId) {
npath--;
continue;
}
if (pt->visited[link.id])
continue;
Mutex *mtx1 = getMutex(link.id);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->seq != link.seq)
continue;
pt->visited[link.id] = true;
if (mtx1->nlink == 0)
continue;
pt->path[npath++] = link;
pt->pending[npending++] = Link(kEndId);
if (link.id == m->id)
return Report(pt, lt, npath); // Bingo!
for (int li = 0; li < mtx1->nlink; li++) {
Link *link1 = &mtx1->link[li];
// Mutex *mtx2 = getMutex(link->id);
// FIXME(dvyukov): fast seq check
// FIXME(dvyukov): fast nlink != 0 check
// FIXME(dvyukov): fast pending check?
// FIXME(dvyukov): npending can be larger than kMaxMutex
pt->pending[npending++] = *link1;
}
}
}
void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
DDReport *rep = &pt->rep;
rep->n = npath;
for (int i = 0; i < npath; i++) {
Link *link = &pt->path[i];
Link *link0 = &pt->path[i ? i - 1 : npath - 1];
rep->loop[i].thr_ctx = link->tid;
rep->loop[i].mtx_ctx0 = link0->id;
rep->loop[i].mtx_ctx1 = link->id;
rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
rep->loop[i].stk[1] = link->stk1;
}
pt->report_pending = true;
}
DDReport *DD::GetReport(DDCallback *cb) {
if (!cb->pt->report_pending)
return 0;
cb->pt->report_pending = false;
return &cb->pt->rep;
}
} // namespace __sanitizer
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2

View File

@ -0,0 +1,91 @@
//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of Sanitizer runtime.
// Abstract deadlock detector interface.
// FIXME: this is work in progress, nothing really works yet.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
#endif
#include "sanitizer_internal_defs.h"
#include "sanitizer_atomic.h"
namespace __sanitizer {
// dd - deadlock detector.
// lt - logical (user) thread.
// pt - physical (OS) thread.
struct DDPhysicalThread;
struct DDLogicalThread;
struct DDMutex {
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
uptr id;
u32 stk; // creation stack
#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
u32 id;
u32 recursion;
atomic_uintptr_t owner;
#else
# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
#endif
u64 ctx;
};
struct DDFlags {
bool second_deadlock_stack;
};
struct DDReport {
enum { kMaxLoopSize = 8 };
int n; // number of entries in loop
struct {
u64 thr_ctx; // user thread context
u64 mtx_ctx0; // user mutex context, start of the edge
u64 mtx_ctx1; // user mutex context, end of the edge
u32 stk[2]; // stack ids for the edge
} loop[kMaxLoopSize];
};
struct DDCallback {
DDPhysicalThread *pt;
DDLogicalThread *lt;
virtual u32 Unwind() { return 0; }
virtual int UniqueTid() { return 0; }
};
struct DDetector {
static DDetector *Create(const DDFlags *flags);
virtual DDPhysicalThread* CreatePhysicalThread() { return 0; }
virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; }
virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
bool trylock) {}
virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
virtual DDReport *GetReport(DDCallback *cb) { return 0; }
};
} // namespace __sanitizer
#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H

View File

@ -13,12 +13,24 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
namespace __sanitizer {
CommonFlags common_flags_dont_use;
struct FlagDescription {
const char *name;
const char *description;
FlagDescription *next;
};
IntrusiveList<FlagDescription> flag_descriptions;
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
f->allow_addr2line = false;
f->strip_path_prefix = "";
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
@ -26,26 +38,96 @@ void SetCommonFlagsDefaults(CommonFlags *f) {
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
f->detect_leaks = false;
f->detect_leaks = true;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
f->check_printf = true;
// TODO(glider): tools may want to set different defaults for handle_segv.
f->handle_segv = SANITIZER_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = true;
f->detect_deadlocks = false;
f->clear_shadow_mmap_threshold = 64 * 1024;
f->color = "auto";
f->legacy_pthread_cond = false;
f->intercept_tls_get_addr = false;
f->coverage = false;
f->full_address_space = false;
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->detect_leaks, "detect_leaks");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
ParseFlag(str, &f->print_summary, "print_summary");
ParseFlag(str, &f->symbolize, "symbolize",
"If set, use the online symbolizer from common sanitizer runtime to turn "
"virtual addresses to file/line locations.");
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
"Path to external symbolizer. If empty, the tool will search $PATH for "
"the symbolizer.");
ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
"If set, allows online symbolizer to run addr2line binary to symbolize "
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
"unavailable.");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
"Strips this prefix from file paths in error reports.");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
"Intercept and handle ioctl requests.");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
"Max number of stack frames kept for each allocation/deallocation.");
ParseFlag(str, &f->log_path, "log_path",
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
"\"stderr\". The default is \"stderr\".");
ParseFlag(str, &f->verbosity, "verbosity",
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
ParseFlag(str, &f->detect_leaks, "detect_leaks",
"Enable memory leak detection.");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
"Invoke leak checking in an atexit handler. Has no effect if "
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
"handler has a chance to run.");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
"If false, the allocator will crash instead of returning 0 on "
"out-of-memory.");
ParseFlag(str, &f->print_summary, "print_summary",
"If false, disable printing error summaries in addition to error "
"reports.");
ParseFlag(str, &f->check_printf, "check_printf",
"Check printf arguments.");
ParseFlag(str, &f->handle_segv, "handle_segv",
"If set, registers the tool's custom SEGV handler (both SIGBUS and "
"SIGSEGV on OSX).");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
"If set, allows user to register a SEGV handler even if the tool "
"registers one.");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
"If set, uses alternate stack for signal handling.");
ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
"If set, deadlock detection is enabled.");
ParseFlag(str, &f->clear_shadow_mmap_threshold,
"clear_shadow_mmap_threshold",
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
"memset(). This is the threshold size in bytes.");
ParseFlag(str, &f->color, "color",
"Colorize reports: (always|never|auto).");
ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
"Enables support for dynamic libraries linked with libpthread 2.2.5.");
ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
"Intercept __tls_get_addr.");
ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
"not a user-facing flag, used mosly for testing the tools");
ParseFlag(str, &f->coverage, "coverage",
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).");
ParseFlag(str, &f->full_address_space, "full_address_space",
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
@ -100,9 +182,40 @@ static bool StartsWith(const char *flag, int flag_length, const char *value) {
(0 == internal_strncmp(flag, value, value_length));
}
void ParseFlag(const char *env, bool *flag, const char *name) {
static LowLevelAllocator allocator_for_flags;
// The linear scan is suboptimal, but the number of flags is relatively small.
bool FlagInDescriptionList(const char *name) {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
while (it.hasNext()) {
if (!internal_strcmp(it.next()->name, name)) return true;
}
return false;
}
void AddFlagDescription(const char *name, const char *description) {
if (FlagInDescriptionList(name)) return;
FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
new_description->name = name;
new_description->description = description;
flag_descriptions.push_back(new_description);
}
// TODO(glider): put the descriptions inside CommonFlags.
void PrintFlagDescriptions() {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
Printf("Available flags for %s:\n", SanitizerToolName);
while (it.hasNext()) {
FlagDescription *descr = it.next();
Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
}
}
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
if (StartsWith(value, value_length, "0") ||
@ -115,19 +228,31 @@ void ParseFlag(const char *env, bool *flag, const char *name) {
*flag = true;
}
void ParseFlag(const char *env, int *flag, const char *name) {
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<int>(internal_atoll(value));
}
static LowLevelAllocator allocator_for_flags;
void ParseFlag(const char *env, const char **flag, const char *name) {
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<uptr>(internal_atoll(value));
}
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
// Copy the flag value. Don't use locks here, as flags are parsed at

View File

@ -16,51 +16,53 @@
namespace __sanitizer {
void ParseFlag(const char *env, bool *flag, const char *name);
void ParseFlag(const char *env, int *flag, const char *name);
void ParseFlag(const char *env, const char **flag, const char *name);
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr);
struct CommonFlags {
// If set, use the online symbolizer from common sanitizer runtime.
bool symbolize;
// Path to external symbolizer. If it is NULL, symbolizer will be looked for
// in PATH. If it is empty, external symbolizer will not be started.
const char *external_symbolizer_path;
// Strips this prefix from file paths in error reports.
bool allow_addr2line;
const char *strip_path_prefix;
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
// Intercept and handle ioctl requests.
bool handle_ioctl;
// Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// Write logs to "log_path.pid".
// The special values are "stdout" and "stderr".
// The default is "stderr".
const char *log_path;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Enable memory leak detection.
bool detect_leaks;
// Invoke leak checking in an atexit handler. Has no effect if
// detect_leaks=false, or if __lsan_do_leak_check() is called before the
// handler has a chance to run.
bool leak_check_at_exit;
// If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
// If false, disable printing error summaries in addition to error reports.
bool print_summary;
bool check_printf;
bool handle_segv;
bool allow_user_segv_handler;
bool use_sigaltstack;
bool detect_deadlocks;
uptr clear_shadow_mmap_threshold;
const char *color;
bool legacy_pthread_cond;
bool intercept_tls_get_addr;
bool help;
uptr mmap_limit_mb;
bool coverage;
bool full_address_space;
};
inline CommonFlags *common_flags() {
static CommonFlags f;
return &f;
extern CommonFlags common_flags_dont_use;
return &common_flags_dont_use;
}
void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
void PrintFlagDescriptions();
} // namespace __sanitizer

View File

@ -0,0 +1,23 @@
//===-- sanitizer_interception.h --------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common macro definitions for interceptors.
// Always use this headers instead of interception/interception.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_INTERCEPTION_H
#define SANITIZER_INTERCEPTION_H
#include "interception/interception.h"
#include "sanitizer_common.h"
#if SANITIZER_LINUX && !defined(SANITIZER_GO)
#undef REAL
#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x))
#endif
#endif // SANITIZER_INTERCEPTION_H

View File

@ -32,10 +32,9 @@
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
// If set, the tool will install its own SEGV signal handler.
#ifndef SANITIZER_NEEDS_SEGV
# define SANITIZER_NEEDS_SEGV 1
#endif
// GCC does not understand __has_feature
@ -57,7 +56,7 @@ typedef unsigned long uptr; // NOLINT
typedef signed long sptr; // NOLINT
#endif // defined(_WIN64)
#if defined(__x86_64__)
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
// 64-bit pointer to unwind stack frame.
typedef unsigned long long uhwptr; // NOLINT
#else
@ -97,11 +96,15 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_sandbox_on_notify(void *reserved);
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
@ -110,12 +113,16 @@ extern "C" {
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(void *pc);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
} // extern "C"
@ -141,8 +148,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
# define UNUSED
# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
@ -157,8 +162,6 @@ using namespace __sanitizer; // NOLINT
# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
# if defined(__i386__) || defined(__x86_64__)
// __builtin_prefetch(x) generates prefetchnt0 on x86
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
@ -167,6 +170,14 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
#if !defined(_MSC_VER) || defined(__clang__)
# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
#else
# define UNUSED
# define USED
#endif
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@ -197,7 +208,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
if (!(expr)) { \
if (UNLIKELY(!(expr))) { \
RawWrite(msg); \
Die(); \
} \
@ -209,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
do { \
__sanitizer::u64 v1 = (u64)(c1); \
__sanitizer::u64 v2 = (u64)(c2); \
if (!(v1 op v2)) \
if (UNLIKELY(!(v1 op v2))) \
__sanitizer::CheckFailed(__FILE__, __LINE__, \
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
} while (false) \

View File

@ -16,7 +16,7 @@ namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
#if SANITIZER_WINDOWS
#if _MSC_VER
// FIXME: make sure this is actually enough.
__asm;
#else

View File

@ -89,12 +89,16 @@ uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid();
uptr internal_getppid();
int internal_fork();
// Threading
uptr internal_sched_yield();
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);
int internal_sigaction(int signum, const void *act, void *oldact);
} // namespace __sanitizer
#endif // SANITIZER_LIBC_H

View File

@ -74,9 +74,10 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
loaded = true;
if (lib->loaded)
continue;
if (common_flags()->verbosity)
Report("Matched called_from_lib suppression '%s' against library"
" '%s'\n", lib->templ, module.data());
VReport(1,
"Matched called_from_lib suppression '%s' against library"
" '%s'\n",
lib->templ, module.data());
lib->loaded = true;
lib->name = internal_strdup(module.data());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);

View File

@ -11,9 +11,10 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
@ -23,7 +24,10 @@
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
#if !SANITIZER_FREEBSD
#include <asm/param.h>
#endif
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
@ -42,10 +46,25 @@
#include <unistd.h>
#include <unwind.h>
#if SANITIZER_FREEBSD
#include <machine/atomic.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
// FreeBSD 9.2 and 10.0.
#include <sys/umtx.h>
}
#endif // SANITIZER_FREEBSD
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
#if SANITIZER_ANDROID
#include <android/log.h>
#include <sys/system_properties.h>
#endif
#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@ -55,11 +74,12 @@ struct kernel_timeval {
// <linux/futex.h> is broken on some linux distributions.
const int FUTEX_WAIT = 0;
const int FUTEX_WAKE = 1;
#endif // SANITIZER_LINUX
// Are we using 32-bit or 64-bit syscalls?
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
#if defined(__x86_64__) || SANITIZER_WORDSIZE == 64
#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64)
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
@ -67,7 +87,7 @@ const int FUTEX_WAKE = 1;
namespace __sanitizer {
#ifdef __x86_64__
#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
#else
#include "sanitizer_syscall_generic.inc"
@ -76,28 +96,38 @@ namespace __sanitizer {
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_mmap, (uptr)addr, length, prot, flags, fd,
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
offset);
#endif
}
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(__NR_munmap, (uptr)addr, length);
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
uptr internal_close(fd_t fd) {
return internal_syscall(__NR_close, fd);
return internal_syscall(SYSCALL(close), fd);
}
uptr internal_open(const char *filename, int flags) {
return internal_syscall(__NR_open, (uptr)filename, flags);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
#else
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
#endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
return internal_syscall(__NR_open, (uptr)filename, flags, mode);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
mode);
#else
return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
#endif
}
uptr OpenFile(const char *filename, bool write) {
@ -107,17 +137,19 @@ uptr OpenFile(const char *filename, bool write) {
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, (uptr)buf, count));
HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, (uptr)buf, count));
HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
count));
return res;
}
#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@ -138,33 +170,43 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(stat), path, buf);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, 0);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
int res = internal_syscall(__NR_stat64, path, &buf64);
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_lstat(const char *path, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(lstat), path, buf);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
#else
struct stat64 buf64;
int res = internal_syscall(__NR_lstat64, path, &buf64);
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_fstat(fd_t fd, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(__NR_fstat, fd, (uptr)buf);
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
#else
struct stat64 buf64;
int res = internal_syscall(__NR_fstat64, fd, &buf64);
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
@ -178,48 +220,80 @@ uptr internal_filesize(fd_t fd) {
}
uptr internal_dup2(int oldfd, int newfd) {
return internal_syscall(__NR_dup2, oldfd, newfd);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
#else
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
#endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
(uptr)path, (uptr)buf, bufsize);
#else
return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
#endif
}
uptr internal_unlink(const char *path) {
return internal_syscall(__NR_unlink, (uptr)path);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
#else
return internal_syscall(SYSCALL(unlink), (uptr)path);
#endif
}
uptr internal_sched_yield() {
return internal_syscall(__NR_sched_yield);
return internal_syscall(SYSCALL(sched_yield));
}
void internal__exit(int exitcode) {
internal_syscall(__NR_exit_group, exitcode);
#if SANITIZER_FREEBSD
internal_syscall(SYSCALL(exit), exitcode);
#else
internal_syscall(SYSCALL(exit_group), exitcode);
#endif
Die(); // Unreachable.
}
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
return internal_syscall(__NR_execve, (uptr)filename, (uptr)argv, (uptr)envp);
return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
(uptr)envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
struct stat st;
if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
return false;
#else
struct stat st;
if (internal_stat(filename, &st))
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
#endif
}
uptr GetTid() {
return internal_syscall(__NR_gettid);
#if SANITIZER_FREEBSD
return (uptr)pthread_self();
#else
return internal_syscall(SYSCALL(gettid));
#endif
}
u64 NanoTime() {
#if SANITIZER_FREEBSD
timeval tv;
#else
kernel_timeval tv;
#endif
internal_memset(&tv, 0, sizeof(tv));
internal_syscall(__NR_gettimeofday, (uptr)&tv, 0);
internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
@ -303,7 +377,18 @@ void ReExec() {
Die();
}
void PrepareForSandboxing() {
// Stub implementation of GetThreadStackAndTls for Go.
#if SANITIZER_GO
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
*tls_size = 0;
}
#endif // SANITIZER_GO
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
@ -313,216 +398,10 @@ void PrepareForSandboxing() {
#if !SANITIZER_GO
if (Symbolizer *sym = Symbolizer::GetOrNull())
sym->PrepareForSandboxing();
CovPrepareForSandboxing(args);
#endif
}
// ----------------- sanitizer_procmaps.h
// Linker initialized.
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
&proc_self_maps_.mmaped_size, 1 << 26);
if (cache_enabled) {
if (proc_self_maps_.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
}
} else {
CHECK_GT(proc_self_maps_.mmaped_size, 0);
}
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
if (cache_enabled)
CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
// Only unmap the buffer if it is different from the cached one. Otherwise
// it will be unmapped when the cache is refreshed.
if (proc_self_maps_.data != cached_proc_self_maps_.data) {
UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
}
}
void MemoryMappingLayout::Reset() {
current_ = proc_self_maps_.data;
}
// static
void MemoryMappingLayout::CacheMemoryMappings() {
SpinMutexLock l(&cache_lock_);
// Don't invalidate the cache if the mappings are unavailable.
ProcSelfMapsBuff old_proc_self_maps;
old_proc_self_maps = cached_proc_self_maps_;
cached_proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_.data,
&cached_proc_self_maps_.mmaped_size, 1 << 26);
if (cached_proc_self_maps_.mmaped_size == 0) {
cached_proc_self_maps_ = old_proc_self_maps;
} else {
if (old_proc_self_maps.mmaped_size) {
UnmapOrDie(old_proc_self_maps.data,
old_proc_self_maps.mmaped_size);
}
}
}
void MemoryMappingLayout::LoadFromCache() {
SpinMutexLock l(&cache_lock_);
if (cached_proc_self_maps_.data) {
proc_self_maps_ = cached_proc_self_maps_;
}
}
// Parse a hex value in str and update str.
static uptr ParseHex(char **str) {
uptr x = 0;
char *s;
for (s = *str; ; s++) {
char c = *s;
uptr v = 0;
if (c >= '0' && c <= '9')
v = c - '0';
else if (c >= 'a' && c <= 'f')
v = c - 'a' + 10;
else if (c >= 'A' && c <= 'F')
v = c - 'A' + 10;
else
break;
x = x * 16 + v;
}
*str = s;
return x;
}
static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
static bool IsDecimal(char c) {
return c >= '0' && c <= '9';
}
static bool IsHex(char c) {
return (c >= '0' && c <= '9')
|| (c >= 'a' && c <= 'f');
}
static uptr ReadHex(const char *p) {
uptr v = 0;
for (; IsHex(p[0]); p++) {
if (p[0] >= '0' && p[0] <= '9')
v = v * 16 + p[0] - '0';
else
v = v * 16 + p[0] - 'a' + 10;
}
return v;
}
static uptr ReadDecimal(const char *p) {
uptr v = 0;
for (; IsDecimal(p[0]); p++)
v = v * 10 + p[0] - '0';
return v;
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
uptr *protection) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
if (!start) start = &dummy;
if (!end) end = &dummy;
if (!offset) offset = &dummy;
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
if (next_line == 0)
next_line = last;
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
*start = ParseHex(&current_);
CHECK_EQ(*current_++, '-');
*end = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
uptr local_protection = 0;
CHECK(IsOneOf(*current_, '-', 'r'));
if (*current_++ == 'r')
local_protection |= kProtectionRead;
CHECK(IsOneOf(*current_, '-', 'w'));
if (*current_++ == 'w')
local_protection |= kProtectionWrite;
CHECK(IsOneOf(*current_, '-', 'x'));
if (*current_++ == 'x')
local_protection |= kProtectionExecute;
CHECK(IsOneOf(*current_, 's', 'p'));
if (*current_++ == 's')
local_protection |= kProtectionShared;
if (protection) {
*protection = local_protection;
}
CHECK_EQ(*current_++, ' ');
*offset = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
ParseHex(&current_);
CHECK_EQ(*current_++, ':');
ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
while (IsDecimal(*current_))
current_++;
// Qemu may lack the trailing space.
// http://code.google.com/p/address-sanitizer/issues/detail?id=160
// CHECK_EQ(*current_++, ' ');
// Skip spaces.
while (current_ < next_line && *current_ == ' ')
current_++;
// Fill in the filename.
uptr i = 0;
while (current_ < next_line) {
if (filename && i < filename_size - 1)
filename[i++] = *current_;
current_++;
}
if (filename && i < filename_size)
filename[i] = 0;
current_ = next_line + 1;
return true;
}
// Gets the object name and the offset by walking MemoryMappingLayout.
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
uptr filename_size,
uptr *protection) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
protection);
}
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
char *smaps = 0;
uptr smaps_cap = 0;
uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
&smaps, &smaps_cap, 64<<20);
uptr start = 0;
bool file = false;
const char *pos = smaps;
while (pos < smaps + smaps_len) {
if (IsHex(pos[0])) {
start = ReadHex(pos);
for (; *pos != '/' && *pos > '\n'; pos++) {}
file = *pos == '/';
} else if (internal_strncmp(pos, "Rss:", 4) == 0) {
for (; *pos < '0' || *pos > '9'; pos++) {}
uptr rss = ReadDecimal(pos) * 1024;
cb(start, rss, file, stats, stats_size);
}
while (*pos++ != '\n') {}
}
UnmapOrDie(smaps, smaps_cap);
}
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
@ -541,16 +420,26 @@ void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
internal_syscall(__NR_futex, (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
#endif
}
}
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping)
internal_syscall(__NR_futex, (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
if (v == MtxSleeping) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
#endif
}
}
void BlockingMutex::CheckLocked() {
@ -563,71 +452,133 @@ void BlockingMutex::CheckLocked() {
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
struct linux_dirent {
#if SANITIZER_X32
u64 d_ino;
u64 d_off;
#else
unsigned long d_ino;
unsigned long d_off;
#endif
unsigned short d_reclen;
char d_name[256];
};
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
return internal_syscall(__NR_ptrace, request, pid, (uptr)addr, (uptr)data);
return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
(uptr)data);
}
uptr internal_waitpid(int pid, int *status, int options) {
return internal_syscall(__NR_wait4, pid, (uptr)status, options,
return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
0 /* rusage */);
}
uptr internal_getpid() {
return internal_syscall(__NR_getpid);
return internal_syscall(SYSCALL(getpid));
}
uptr internal_getppid() {
return internal_syscall(__NR_getppid);
return internal_syscall(SYSCALL(getppid));
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
#else
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
#endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
return internal_syscall(__NR_lseek, fd, offset, whence);
return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
#endif
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
return internal_syscall(__NR_sigaltstack, (uptr)ss, (uptr)oss);
return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact) {
return internal_syscall(__NR_rt_sigaction, signum, act, oldact,
sizeof(__sanitizer_kernel_sigset_t));
int internal_fork() {
return internal_syscall(SYSCALL(fork));
}
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset) {
return internal_syscall(__NR_rt_sigprocmask, (uptr)how, &set->sig[0],
&oldset->sig[0], sizeof(__sanitizer_kernel_sigset_t));
#if SANITIZER_LINUX
// Doesn't set sa_restorer, use with caution (see below).
int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
__sanitizer_kernel_sigaction_t k_act, k_oldact;
internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
const __sanitizer_sigaction *u_act = (__sanitizer_sigaction *)act;
__sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
if (u_act) {
k_act.handler = u_act->handler;
k_act.sigaction = u_act->sigaction;
internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
sizeof(__sanitizer_kernel_sigset_t));
k_act.sa_flags = u_act->sa_flags;
// FIXME: most often sa_restorer is unset, however the kernel requires it
// to point to a valid signal restorer that calls the rt_sigreturn syscall.
// If sa_restorer passed to the kernel is NULL, the program may crash upon
// signal delivery or fail to unwind the stack in the signal handler.
// libc implementation of sigaction() passes its own restorer to
// rt_sigaction, so we need to do the same (we'll need to reimplement the
// restorers; for x86_64 the restorer address can be obtained from
// oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
k_act.sa_restorer = u_act->sa_restorer;
}
uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
(uptr)(u_act ? &k_act : NULL),
(uptr)(u_oldact ? &k_oldact : NULL),
(uptr)sizeof(__sanitizer_kernel_sigset_t));
if ((result == 0) && u_oldact) {
u_oldact->handler = k_oldact.handler;
u_oldact->sigaction = k_oldact.sigaction;
internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
sizeof(__sanitizer_kernel_sigset_t));
u_oldact->sa_flags = k_oldact.sa_flags;
u_oldact->sa_restorer = k_oldact.sa_restorer;
}
return result;
}
#endif // SANITIZER_LINUX
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
#else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
__sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
(uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
sizeof(__sanitizer_kernel_sigset_t));
#endif
}
void internal_sigfillset(__sanitizer_kernel_sigset_t *set) {
void internal_sigfillset(__sanitizer_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum) {
#if SANITIZER_LINUX
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
CHECK_LT(signum, sizeof(*set) * 8);
const uptr idx = signum / (sizeof(set->sig[0]) * 8);
const uptr bit = signum % (sizeof(set->sig[0]) * 8);
set->sig[idx] &= ~(1 << bit);
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
k_set->sig[idx] &= ~(1 << bit);
}
#endif // SANITIZER_LINUX
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
@ -698,7 +649,7 @@ bool ThreadLister::GetDirectoryEntries() {
}
uptr GetPageSize() {
#if defined(__x86_64__) || defined(__i386__)
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@ -753,8 +704,10 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
#endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@ -788,7 +741,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
#endif
#if defined(__x86_64__)
#if defined(__x86_64__) && SANITIZER_LINUX
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@ -807,7 +760,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
/* %rax = syscall(%rax = __NR_clone,
/* %rax = syscall(%rax = SYSCALL(clone),
* %rdi = flags,
* %rsi = child_stack,
* %rdx = parent_tidptr,
@ -841,7 +794,7 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
/* Return to parent. */
"1:\n"
: "=a" (res)
: "a"(__NR_clone), "i"(__NR_exit),
: "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
"S"(child_stack),
"D"(flags),
"d"(parent_tidptr),
@ -850,7 +803,38 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "rsp", "memory", "r11", "rcx");
return res;
}
#endif // defined(__x86_64__)
#endif // defined(__x86_64__) && SANITIZER_LINUX
#if SANITIZER_ANDROID
// This thing is not, strictly speaking, async signal safe, but it does not seem
// to cause any issues. Alternative is writing to log devices directly, but
// their location and message format might change in the future, so we'd really
// like to avoid that.
void AndroidLogWrite(const char *buffer) {
char *copy = internal_strdup(buffer);
char *p = copy;
char *q;
// __android_log_write has an implicit message length limit.
// Print one line at a time.
do {
q = internal_strchr(p, '\n');
if (q) *q = '\0';
__android_log_write(ANDROID_LOG_INFO, NULL, p);
if (q) p = q + 1;
} while (q);
InternalFree(copy);
}
void GetExtraActivationFlags(char *buf, uptr size) {
CHECK(size > PROP_VALUE_MAX);
__system_property_get("asan.options", buf);
}
#endif
bool IsDeadlySignal(int signum) {
return (signum == SIGSEGV) && common_flags()->handle_segv;
}
} // namespace __sanitizer
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -12,7 +12,7 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_posix.h"
@ -27,20 +27,25 @@ struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
uptr internal_sigaction(int signum, const __sanitizer_kernel_sigaction_t *act,
__sanitizer_kernel_sigaction_t *oldact);
uptr internal_sigprocmask(int how, __sanitizer_kernel_sigset_t *set,
__sanitizer_kernel_sigset_t *oldset);
void internal_sigfillset(__sanitizer_kernel_sigset_t *set);
void internal_sigdelset(__sanitizer_kernel_sigset_t *set, int signum);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
void internal_sigfillset(__sanitizer_sigset_t *set);
#ifdef __x86_64__
// Linux-only syscalls.
#if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
#endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@ -64,8 +69,6 @@ class ThreadLister {
int bytes_read_;
};
void AdjustStackSizeLinux(void *attr);
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
@ -84,5 +87,5 @@ void CacheBinaryName();
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_LINUX_H

View File

@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
@ -23,27 +23,53 @@
#include <dlfcn.h>
#include <pthread.h>
#include <sys/prctl.h>
#include <signal.h>
#include <sys/resource.h>
#if SANITIZER_FREEBSD
#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
#endif
#include <unwind.h>
#if SANITIZER_FREEBSD
#include <pthread_np.h>
#define pthread_getattr_np pthread_attr_get_np
#endif
#if SANITIZER_LINUX
#include <sys/prctl.h>
#endif
#if !SANITIZER_ANDROID
#include <elf.h>
#include <link.h>
#include <unistd.h>
#endif
namespace __sanitizer {
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
SANITIZER_WEAK_ATTRIBUTE
int __sanitizer_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
return pthread_attr_getstack((pthread_attr_t*)attr, addr, size);
extern "C" {
SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
if (real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
}
namespace __sanitizer {
SANITIZER_WEAK_ATTRIBUTE int
real_sigaction(int signum, const void *act, void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
if (real_sigaction)
return real_sigaction(signum, act, oldact);
return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact);
}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
static const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
@ -77,10 +103,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
return;
}
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
__sanitizer_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
my_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
@ -88,8 +115,6 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)stackaddr;
}
// Does not compile for Go because dlsym() requires -ldl
#ifndef SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
@ -98,9 +123,8 @@ bool SetEnv(const char *name, const char *value) {
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
return IndirectExternCall(setenv_f)(name, value, 1) == 0;
}
#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
@ -123,8 +147,52 @@ bool SanitizerGetThreadName(char *name, int max_len) {
#endif
}
#ifndef SANITIZER_GO
//------------------------- SlowUnwindStack -----------------------------------
typedef struct {
uptr absolute_pc;
uptr stack_top;
uptr stack_size;
} backtrace_frame_t;
extern "C" {
typedef void *(*acquire_my_map_info_list_func)();
typedef void (*release_my_map_info_list_func)(void *map);
typedef sptr (*unwind_backtrace_signal_arch_func)(
void *siginfo, void *sigcontext, void *map_info_list,
backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
acquire_my_map_info_list_func acquire_my_map_info_list;
release_my_map_info_list_func release_my_map_info_list;
unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
} // extern "C"
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder() {
void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
if (!p) {
VReport(1,
"Failed to open libcorkscrew.so. You may see broken stack traces "
"in SEGV reports.");
return;
}
acquire_my_map_info_list =
(acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
release_my_map_info_list =
(release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
p, "unwind_backtrace_signal_arch");
if (!acquire_my_map_info_list || !release_my_map_info_list ||
!unwind_backtrace_signal_arch) {
VReport(1,
"Failed to find one of the required symbols in libcorkscrew.so. "
"You may see broken stack traces in SEGV reports.");
acquire_my_map_info_list = NULL;
unwind_backtrace_signal_arch = NULL;
release_my_map_info_list = NULL;
}
}
#endif
#ifdef __arm__
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
@ -161,9 +229,8 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
if (max_depth == 0)
return;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top.
@ -175,9 +242,35 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
trace[0] = pc;
}
#endif // !SANITIZER_GO
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
CHECK_GE(max_depth, 2);
if (!unwind_backtrace_signal_arch) {
SlowUnwindStack(pc, max_depth);
return;
}
void *map = acquire_my_map_info_list();
CHECK(map);
InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
// siginfo argument appears to be unused.
sptr res = unwind_backtrace_signal_arch(/* siginfo */ NULL, context, map,
frames.data(),
/* ignore_depth */ 0, max_depth);
release_my_map_info_list(map);
if (res < 0) return;
CHECK_LE((uptr)res, kStackTraceMax);
size = 0;
// +2 compensate for libcorkscrew unwinder returning addresses of call
// instructions instead of raw return addresses.
for (sptr i = 0; i < res; ++i)
trace[size++] = frames[i].absolute_pc + 2;
}
#if !SANITIZER_FREEBSD
static uptr g_tls_size;
#endif
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
@ -186,7 +279,7 @@ static uptr g_tls_size;
#endif
void InitTlsSize() {
#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@ -196,16 +289,12 @@ void InitTlsSize() {
CHECK_NE(get_tls, 0);
size_t tls_size = 0;
size_t tls_align = 0;
get_tls(&tls_size, &tls_align);
IndirectExternCall(get_tls)(&tls_size, &tls_align);
g_tls_size = tls_size;
#endif
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
}
uptr GetTlsSize() {
return g_tls_size;
}
#if defined(__x86_64__) || defined(__i386__)
#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
// sizeof(struct thread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@ -221,7 +310,9 @@ uptr ThreadDescriptorSize() {
int minor = internal_simple_strtoll(buf + 8, &end, 10);
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
/* sizeof(struct thread) values from various glibc versions. */
if (minor <= 3)
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
val = FIRST_32_SECOND_64(1120, 1728);
@ -253,27 +344,79 @@ uptr ThreadSelfOffset() {
uptr ThreadSelf() {
uptr descr_addr;
#ifdef __i386__
# if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
#else
# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
#endif
# else
# error "unsupported CPU arch"
# endif
return descr_addr;
}
#endif // defined(__x86_64__) || defined(__i386__)
#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
void **segbase = 0;
# if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
# elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
# else
# error "unsupported CPU arch for FreeBSD platform"
# endif
return segbase;
}
uptr ThreadSelf() {
return (uptr)ThreadSelfSegbase()[2];
}
#endif // SANITIZER_FREEBSD
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX
# if defined(__x86_64__) || defined(__i386__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
# else
*addr = 0;
*size = 0;
# endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
*size = 0;
if (segbase != 0) {
// tcbalign = 16
// tls_size = round(tls_static_space, tcbalign);
// dtv = segbase[1];
// dtv[2] = segbase - tls_static_space;
void **dtv = (void**) segbase[1];
*addr = (uptr) dtv[2];
*size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
}
#else
# error "Unknown OS"
#endif
}
uptr GetTlsSize() {
#if SANITIZER_FREEBSD
uptr addr, size;
GetTls(&addr, &size);
return size;
#else
return g_tls_size;
#endif
}
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#ifndef SANITIZER_GO
#if defined(__x86_64__) || defined(__i386__)
*tls_addr = ThreadSelf();
*tls_size = GetTlsSize();
*tls_addr -= *tls_size;
*tls_addr += ThreadDescriptorSize();
#else
*tls_addr = 0;
*tls_size = 0;
#endif
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
@ -289,19 +432,13 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*tls_addr = *stk_addr + *stk_size;
}
}
#else // SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
*tls_size = 0;
#endif // SANITIZER_GO
}
void AdjustStackSizeLinux(void *attr_) {
void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
size_t stacksize = 0;
__sanitizer_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
@ -309,10 +446,11 @@ void AdjustStackSizeLinux(void *attr_) {
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
if (common_flags()->verbosity && stacksize != 0)
Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize);
pthread_attr_setstacksize(attr, minstacksize);
if (stacksize != 0) {
VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize);
pthread_attr_setstacksize(attr, minstacksize);
}
} else {
Printf("Sanitizer: pre-allocated stack size is insufficient: "
"%zu < %zu\n", stacksize, minstacksize);
@ -324,10 +462,13 @@ void AdjustStackSizeLinux(void *attr_) {
#if SANITIZER_ANDROID
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
return 0;
MemoryMappingLayout memory_mapping(false);
return memory_mapping.DumpListOfModules(modules, max_modules, filter);
}
#else // SANITIZER_ANDROID
# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
# endif
struct DlIteratePhdrData {
LoadedModule *modules;
@ -378,6 +519,14 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
}
#endif // SANITIZER_ANDROID
uptr indirect_call_wrapper;
void SetIndirectCallWrapper(uptr wrapper) {
CHECK(!indirect_call_wrapper);
CHECK(wrapper);
indirect_call_wrapper = wrapper;
}
} // namespace __sanitizer
#endif // SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -24,6 +24,8 @@ namespace __sanitizer {
// non-zero-initialized objects before using.
template<class Item>
struct IntrusiveList {
friend class Iterator;
void clear() {
first_ = last_ = 0;
size_ = 0;
@ -111,6 +113,21 @@ struct IntrusiveList {
}
}
class Iterator {
public:
explicit Iterator(IntrusiveList<Item> *list)
: list_(list), current_(list->first_) { }
Item *next() {
Item *ret = current_;
if (current_) current_ = current_->next;
return ret;
}
bool hasNext() const { return current_ != 0; }
private:
IntrusiveList<Item> *list_;
Item *current_;
};
// private, don't use directly.
uptr size_;
Item *first_;

View File

@ -5,9 +5,8 @@
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries and implements mac-specific functions from
// sanitizer_libc.h.
// This file is shared between various sanitizers' runtime libraries and
// implements OSX-specific functions.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
@ -21,20 +20,22 @@
#include <stdio.h>
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mac.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include <crt_externs.h> // for _NSGetEnviron
#include <fcntl.h>
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
@ -118,6 +119,16 @@ uptr internal_getpid() {
return getpid();
}
int internal_sigaction(int signum, const void *act, void *oldact) {
return sigaction(signum,
(struct sigaction *)act, (struct sigaction *)oldact);
}
int internal_fork() {
// TODO(glider): this may call user's pthread_atfork() handlers which is bad.
return fork();
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@ -136,6 +147,20 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK(stack_top);
CHECK(stack_bottom);
uptr stacksize = pthread_get_stacksize_np(pthread_self());
// pthread_get_stacksize_np() returns an incorrect stack size for the main
// thread on Mavericks. See
// https://code.google.com/p/address-sanitizer/issues/detail?id=261
if ((GetMacosVersion() == MACOS_VERSION_MAVERICKS) && at_initialization &&
stacksize == (1 << 19)) {
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Most often rl.rlim_cur will be the desired 8M.
if (rl.rlim_cur < kMaxThreadStackSize) {
stacksize = rl.rlim_cur;
} else {
stacksize = kMaxThreadStackSize;
}
}
void *stackaddr = pthread_get_stackaddr_np(pthread_self());
*stack_top = (uptr)stackaddr;
*stack_bottom = *stack_top - stacksize;
@ -169,7 +194,8 @@ void ReExec() {
UNIMPLEMENTED();
}
void PrepareForSandboxing() {
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
(void)args;
// Nothing here for now.
}
@ -177,148 +203,6 @@ uptr GetPageSize() {
return sysconf(_SC_PAGESIZE);
}
// ----------------- sanitizer_procmaps.h
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
MemoryMappingLayout::~MemoryMappingLayout() {
}
// More information about Mach-O headers can be found in mach-o/loader.h
// Each Mach-O image has a header (mach_header or mach_header_64) starting with
// a magic number, and a list of linker load commands directly following the
// header.
// A load command is at least two 32-bit words: the command type and the
// command size in bytes. We're interested only in segment load commands
// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
// into the task's address space.
// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
// segment_command_64 correspond to the memory address, memory size and the
// file offset of the current memory segment.
// Because these fields are taken from the images as is, one needs to add
// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
void MemoryMappingLayout::Reset() {
// Count down from the top.
// TODO(glider): as per man 3 dyld, iterating over the headers with
// _dyld_image_count is thread-unsafe. We need to register callbacks for
// adding and removing images which will invalidate the MemoryMappingLayout
// state.
current_image_ = _dyld_image_count();
current_load_cmd_count_ = -1;
current_load_cmd_addr_ = 0;
current_magic_ = 0;
current_filetype_ = 0;
}
// static
void MemoryMappingLayout::CacheMemoryMappings() {
// No-op on Mac for now.
}
void MemoryMappingLayout::LoadFromCache() {
// No-op on Mac for now.
}
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Google Perftools, http://code.google.com/p/google-perftools.
// NextSegmentLoad scans the current image for the next segment load command
// and returns the start and end addresses and file offset of the corresponding
// segment.
// Note that the segment addresses are not necessarily sorted.
template<u32 kLCSegment, typename SegmentCommand>
bool MemoryMappingLayout::NextSegmentLoad(
uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection) {
if (protection)
UNIMPLEMENTED();
const char* lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
const SegmentCommand* sc = (const SegmentCommand *)lc;
if (start) *start = sc->vmaddr + dlloff;
if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
if (offset) {
if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
*offset = sc->vmaddr;
} else {
*offset = sc->fileoff;
}
}
if (filename) {
internal_strncpy(filename, _dyld_get_image_name(current_image_),
filename_size);
}
return true;
}
return false;
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
uptr *protection) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
if (current_load_cmd_count_ < 0) {
// Set up for this image;
current_load_cmd_count_ = hdr->ncmds;
current_magic_ = hdr->magic;
current_filetype_ = hdr->filetype;
switch (current_magic_) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
break;
}
#endif
case MH_MAGIC: {
current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
break;
}
default: {
continue;
}
}
}
for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
switch (current_magic_) {
// current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
start, end, offset, filename, filename_size, protection))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
start, end, offset, filename, filename_size, protection))
return true;
break;
}
}
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
}
return false;
}
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
uptr filename_size,
uptr *protection) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
protection);
}
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
@ -377,32 +261,49 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
MemoryMappingLayout memory_mapping(false);
memory_mapping.Reset();
uptr cur_beg, cur_end, cur_offset;
InternalScopedBuffer<char> module_name(kMaxPathLength);
uptr n_modules = 0;
for (uptr i = 0;
n_modules < max_modules &&
memory_mapping.Next(&cur_beg, &cur_end, &cur_offset,
module_name.data(), module_name.size(), 0);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
continue;
if (filter && !filter(cur_name))
continue;
LoadedModule *cur_module = 0;
if (n_modules > 0 &&
0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
cur_module = &modules[n_modules - 1];
} else {
void *mem = &modules[n_modules];
cur_module = new(mem) LoadedModule(cur_name, cur_beg);
n_modules++;
return memory_mapping.DumpListOfModules(modules, max_modules, filter);
}
bool IsDeadlySignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
switch (version[0]) {
case '9': return MACOS_VERSION_LEOPARD;
case '1': {
switch (version[1]) {
case '0': return MACOS_VERSION_SNOW_LEOPARD;
case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
default: return MACOS_VERSION_UNKNOWN;
}
}
cur_module->addAddressRange(cur_beg, cur_end);
default: return MACOS_VERSION_UNKNOWN;
}
return n_modules;
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
}
return result;
}
} // namespace __sanitizer

View File

@ -0,0 +1,34 @@
//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries and
// provides definitions for OSX-specific functions.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_MAC_H
#define SANITIZER_MAC_H
#include "sanitizer_platform.h"
#if SANITIZER_MAC
namespace __sanitizer {
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LEOPARD,
MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS
};
MacosVersion GetMacosVersion();
} // namespace __sanitizer
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H

View File

@ -81,6 +81,88 @@ class BlockingMutex {
uptr owner_; // for debugging
};
// Reader-writer spin mutex.
class RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
}
~RWMutex() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
void Lock() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
LockSlow();
}
void Unlock() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
void ReadLock() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
void ReadUnlock() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
void CheckLocked() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
private:
atomic_uint32_t state_;
enum {
kUnlocked = 0,
kWriteLock = 1,
kReadLock = 2
};
void NOINLINE LockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 cmp = atomic_load(&state_, memory_order_relaxed);
if (cmp == kUnlocked &&
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
}
}
void NOINLINE ReadLockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
u32 prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
}
}
RWMutex(const RWMutex&);
void operator = (const RWMutex&);
};
template<typename MutexType>
class GenericScopedLock {
public:
@ -121,6 +203,8 @@ class GenericScopedReadLock {
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
} // namespace __sanitizer

View File

@ -11,7 +11,8 @@
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
#if !defined(__linux__) && !defined(__FreeBSD__) && \
!defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported"
#endif
@ -21,6 +22,12 @@
# define SANITIZER_LINUX 0
#endif
#if defined(__FreeBSD__)
# define SANITIZER_FREEBSD 1
#else
# define SANITIZER_FREEBSD 0
#endif
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
@ -46,6 +53,58 @@
# define SANITIZER_ANDROID 0
#endif
#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC)
#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
#else
# define SANITIZER_WORDSIZE 32
#endif
#if SANITIZER_WORDSIZE == 64
# define FIRST_32_SECOND_64(a, b) (b)
#else
# define FIRST_32_SECOND_64(a, b) (a)
#endif
#if defined(__x86_64__) && !defined(_LP64)
# define SANITIZER_X32 1
#else
# define SANITIZER_X32 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
# endif
#endif
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms,
// e.g. on AArch64 it is most likely (1ULL << 39). Larger values will still work
// but will consume more memory for TwoLevelByteMap.
#if defined(__aarch64__)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 39)
#else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
// The AArch64 linux port uses the canonical syscall set as mandated by
// the upstream linux community for all new ports. Other ports may still
// use legacy syscalls.
#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
# if defined(__aarch64__) && SANITIZER_LINUX
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
# else
# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
# endif
#endif
#endif // SANITIZER_PLATFORM_H

View File

@ -45,14 +45,16 @@
# define SI_IOS 0
#endif
# define SANITIZER_INTERCEPT_STRCMP 1
# define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRCMP 1
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MEMCHR 1
#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX
# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
@ -65,109 +67,139 @@
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PRCTL SI_LINUX
#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
# define SANITIZER_INTERCEPT_FREXP 1
# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
#ifndef SANITIZER_INTERCEPT_PRINTF
# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX
#endif
# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
# define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
# define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined (__x86_64)) // NOLINT
# define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
# define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
# define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_WORDEXP SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
# define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_SHMCTL \
(SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
# define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
#define SANITIZER_INTERCEPT_FREXP 1
#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
SI_MAC || SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_SINCOS SI_LINUX
# define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
# define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWENT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SETPWENT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined (__x86_64)) // NOLINT
#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SHMCTL \
(SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
// FIXME: getline seems to be available on OSX 10.7
# define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
# define SANITIZER_INTERCEPT__EXIT SI_LINUX
#define SANITIZER_INTERCEPT__EXIT SI_LINUX
# define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_COND SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
#define SANITIZER_INTERCEPT_GETIFADDRS SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_IF_INDEXTONAME SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__)
#define SANITIZER_INTERCEPT___BZERO SI_MAC
#define SANITIZER_INTERCEPT_FTIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H

View File

@ -62,7 +62,7 @@ namespace __sanitizer {
unsigned struct_statfs64_sz = sizeof(struct statfs64);
} // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__)
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif

Some files were not shown because too many files have changed in this diff Show More