libsanitizer merge from upstream r191666

This may break gcc-asan on Mac, will follow up separately.

From-SVN: r204368
This commit is contained in:
Kostya Serebryany 2013-11-04 21:33:31 +00:00 committed by Kostya Serebryany
parent fd5564d3c7
commit ef1b3fda32
171 changed files with 21186 additions and 4909 deletions

View File

@ -1,3 +1,17 @@
2013-11-04 Kostya Serebryany <kcc@google.com>
Update to match the changed asan API.
* asan.c (asan_function_start): New function.
(asan_emit_stack_protection): Update the string stored in the
stack red zone to match new API. Store the PC of the current
function in the red zone.
(asan_global_struct): Update the __asan_global definition to match
the new API.
(asan_add_global): Ditto.
* asan.h (asan_function_start): New prototype.
* final.c (final_start_function): Call asan_function_start.
* sanitizer.def (BUILT_IN_ASAN_INIT): Rename __asan_init_v1 to __asan_init_v3.
2013-11-04 Wei Mi <wmi@google.com>
* gcc/config/i386/i386-c.c (ix86_target_macros_internal): Separate

View File

@ -59,11 +59,13 @@ along with GCC; see the file COPYING3. If not see
if ((X & 7) + N - 1 > ShadowValue)
__asan_report_loadN(X);
Stores are instrumented similarly, but using __asan_report_storeN functions.
A call too __asan_init() is inserted to the list of module CTORs.
A call too __asan_init_vN() is inserted to the list of module CTORs.
N is the version number of the AddressSanitizer API. The changes between the
API versions are listed in libsanitizer/asan/asan_interface_internal.h.
The run-time library redefines malloc (so that redzone are inserted around
the allocated memory) and free (so that reuse of free-ed memory is delayed),
provides __asan_report* and __asan_init functions.
provides __asan_report* and __asan_init_vN functions.
Read more:
http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
@ -125,9 +127,11 @@ along with GCC; see the file COPYING3. If not see
where '(...){n}' means the content inside the parenthesis occurs 'n'
times, with 'n' being the number of variables on the stack.
3/ The following 8 bytes contain the PC of the current function which
will be used by the run-time library to print an error message.
3/ The following 16 bytes of the red zone have no particular
format.
4/ The following 8 bytes are reserved for internal use by the run-time.
The shadow memory for that stack layout is going to look like this:
@ -205,6 +209,9 @@ along with GCC; see the file COPYING3. If not see
// Name of the global variable.
const void *__name;
// Name of the module where the global variable is declared.
const void *__module_name;
// This is always set to NULL for now.
uptr __has_dynamic_init;
}
@ -914,6 +921,15 @@ asan_clear_shadow (rtx shadow_mem, HOST_WIDE_INT len)
add_int_reg_note (jump, REG_BR_PROB, REG_BR_PROB_BASE * 80 / 100);
}
void
asan_function_start (void)
{
section *fnsec = function_section (current_function_decl);
switch_to_section (fnsec);
ASM_OUTPUT_DEBUG_LABEL (asm_out_file, "LASANPC",
current_function_funcdef_no);
}
/* Insert code to protect stack vars. The prologue sequence should be emitted
directly, epilogue sequence returned. BASE is the register holding the
stack base, against which OFFSETS array offsets are relative to, OFFSETS
@ -929,12 +945,13 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
int length)
{
rtx shadow_base, shadow_mem, ret, mem;
char buf[30];
unsigned char shadow_bytes[4];
HOST_WIDE_INT base_offset = offsets[length - 1], offset, prev_offset;
HOST_WIDE_INT last_offset, last_size;
int l;
unsigned char cur_shadow_byte = ASAN_STACK_MAGIC_LEFT;
tree str_cst;
tree str_cst, decl, id;
if (shadow_ptr_types[0] == NULL_TREE)
asan_init_shadow_ptr_types ();
@ -942,11 +959,6 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
/* First of all, prepare the description string. */
pretty_printer asan_pp;
if (DECL_NAME (current_function_decl))
pp_tree_identifier (&asan_pp, DECL_NAME (current_function_decl));
else
pp_string (&asan_pp, "<unknown>");
pp_space (&asan_pp);
pp_decimal_int (&asan_pp, length / 2 - 1);
pp_space (&asan_pp);
for (l = length - 2; l; l -= 2)
@ -976,6 +988,20 @@ asan_emit_stack_protection (rtx base, HOST_WIDE_INT *offsets, tree *decls,
emit_move_insn (mem, gen_int_mode (ASAN_STACK_FRAME_MAGIC, ptr_mode));
mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode));
emit_move_insn (mem, expand_normal (str_cst));
mem = adjust_address (mem, VOIDmode, GET_MODE_SIZE (ptr_mode));
ASM_GENERATE_INTERNAL_LABEL (buf, "LASANPC", current_function_funcdef_no);
id = get_identifier (buf);
decl = build_decl (DECL_SOURCE_LOCATION (current_function_decl),
VAR_DECL, id, char_type_node);
SET_DECL_ASSEMBLER_NAME (decl, id);
TREE_ADDRESSABLE (decl) = 1;
TREE_READONLY (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
TREE_STATIC (decl) = 1;
TREE_PUBLIC (decl) = 0;
TREE_USED (decl) = 1;
emit_move_insn (mem, expand_normal (build_fold_addr_expr (decl)));
shadow_base = expand_binop (Pmode, lshr_optab, base,
GEN_INT (ASAN_SHADOW_SHIFT),
NULL_RTX, 1, OPTAB_DIRECT);
@ -1924,20 +1950,21 @@ transform_statements (void)
uptr __size;
uptr __size_with_redzone;
const void *__name;
const void *__module_name;
uptr __has_dynamic_init;
} type. */
static tree
asan_global_struct (void)
{
static const char *field_names[5]
static const char *field_names[6]
= { "__beg", "__size", "__size_with_redzone",
"__name", "__has_dynamic_init" };
tree fields[5], ret;
"__name", "__module_name", "__has_dynamic_init" };
tree fields[6], ret;
int i;
ret = make_node (RECORD_TYPE);
for (i = 0; i < 5; i++)
for (i = 0; i < 6; i++)
{
fields[i]
= build_decl (UNKNOWN_LOCATION, FIELD_DECL,
@ -1962,21 +1989,20 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
{
tree init, uptr = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (type)));
unsigned HOST_WIDE_INT size;
tree str_cst, refdecl = decl;
tree str_cst, module_name_cst, refdecl = decl;
vec<constructor_elt, va_gc> *vinner = NULL;
pretty_printer asan_pp;
pretty_printer asan_pp, module_name_pp;
if (DECL_NAME (decl))
pp_tree_identifier (&asan_pp, DECL_NAME (decl));
else
pp_string (&asan_pp, "<unknown>");
pp_space (&asan_pp);
pp_left_paren (&asan_pp);
pp_string (&asan_pp, main_input_filename);
pp_right_paren (&asan_pp);
str_cst = asan_pp_string (&asan_pp);
pp_string (&module_name_pp, main_input_filename);
module_name_cst = asan_pp_string (&module_name_pp);
if (asan_needs_local_alias (decl))
{
char buf[20];
@ -2004,6 +2030,8 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v)
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
fold_convert (const_ptr_type_node, str_cst));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
fold_convert (const_ptr_type_node, module_name_cst));
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, 0));
init = build_constructor (type, vinner);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
@ -2158,7 +2186,7 @@ add_string_csts (void **slot, void *data)
static GTY(()) tree asan_ctor_statements;
/* Module-level instrumentation.
- Insert __asan_init() into the list of CTORs.
- Insert __asan_init_vN() into the list of CTORs.
- TODO: insert redzones around globals.
*/

View File

@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef TREE_ASAN
#define TREE_ASAN
extern void asan_function_start (void);
extern void asan_finish_file (void);
extern rtx asan_emit_stack_protection (rtx, HOST_WIDE_INT *, tree *, int);
extern bool asan_protect_global (tree);

View File

@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see
#include "cfgloop.h"
#include "params.h"
#include "tree-pretty-print.h" /* for dump_function_header */
#include "asan.h"
#ifdef XCOFF_DEBUGGING_INFO
#include "xcoffout.h" /* Needed for external data
@ -1738,6 +1739,9 @@ final_start_function (rtx first, FILE *file,
high_block_linenum = high_function_linenum = last_linenum;
if (flag_sanitize & SANITIZE_ADDRESS)
asan_function_start ();
if (!DECL_IGNORED_P (current_function_decl))
debug_hooks->begin_prologue (last_linenum, last_filename);

View File

@ -27,7 +27,7 @@ along with GCC; see the file COPYING3. If not see
for other FEs by asan.c. */
/* Address Sanitizer */
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v1",
DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v3",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
/* Do not reorder the BUILT_IN_ASAN_REPORT* builtins, e.g. cfgcleanup.c
relies on this order. */

View File

@ -1,3 +1,9 @@
2013-11-04 Kostya Serebryany <kcc@google.com>
* g++.dg/asan/asan_test.cc: Update the test
to match the fresh asan run-time.
* c-c++-common/asan/stack-overflow-1.c: Ditto.
2013-11-04 Ian Lance Taylor <iant@google.com>
* g++.dg/ext/sync-4.C: New test.

View File

@ -19,4 +19,5 @@ int main() {
/* { dg-output "READ of size 1 at 0x\[0-9a-f\]+ thread T0\[^\n\r]*(\n|\r\n|\r)" } */
/* { dg-output " #0 0x\[0-9a-f\]+ (in _*main (\[^\n\r]*stack-overflow-1.c:16|\[^\n\r]*:0)|\[(\]).*(\n|\r\n|\r)" } */
/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is\[^\n\r]*frame <main>" } */
/* { dg-output "\[^\n\r]*Address 0x\[0-9a-f\]+ is located in stack of thread T0.*(\n|\r\n|\r)" */
/* { dg-output "\[^\n\r]*in main.*stack-overflow-1.c.*(\n|\r\n|\r)" */

View File

@ -204,16 +204,6 @@ TEST(AddressSanitizer, BitFieldNegativeTest) {
delete Ident(x);
}
TEST(AddressSanitizer, OutOfMemoryTest) {
size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 48) : (0xf0000000);
EXPECT_EQ(0, realloc(0, size));
EXPECT_EQ(0, realloc(0, ~Ident(0)));
EXPECT_EQ(0, malloc(size));
EXPECT_EQ(0, malloc(~Ident(0)));
EXPECT_EQ(0, calloc(1, size));
EXPECT_EQ(0, calloc(1, ~Ident(0)));
}
#if ASAN_NEEDS_SEGV
namespace {
@ -497,42 +487,6 @@ TEST(AddressSanitizer, ManyStackObjectsTest) {
EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ");
}
NOINLINE static void Frame0(int frame, char *a, char *b, char *c) {
char d[4] = {0};
char *D = Ident(d);
switch (frame) {
case 3: a[5]++; break;
case 2: b[5]++; break;
case 1: c[5]++; break;
case 0: D[5]++; break;
}
}
NOINLINE static void Frame1(int frame, char *a, char *b) {
char c[4] = {0}; Frame0(frame, a, b, c);
break_optimization(0);
}
NOINLINE static void Frame2(int frame, char *a) {
char b[4] = {0}; Frame1(frame, a, b);
break_optimization(0);
}
NOINLINE static void Frame3(int frame) {
char a[4] = {0}; Frame2(frame, a);
break_optimization(0);
}
TEST(AddressSanitizer, GuiltyStackFrame0Test) {
EXPECT_DEATH(Frame3(0), "located .*in frame <.*Frame0");
}
TEST(AddressSanitizer, GuiltyStackFrame1Test) {
EXPECT_DEATH(Frame3(1), "located .*in frame <.*Frame1");
}
TEST(AddressSanitizer, GuiltyStackFrame2Test) {
EXPECT_DEATH(Frame3(2), "located .*in frame <.*Frame2");
}
TEST(AddressSanitizer, GuiltyStackFrame3Test) {
EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3");
}
NOINLINE void LongJmpFunc1(jmp_buf buf) {
// create three red zones for these two stack objects.
int a;

View File

@ -1,3 +1,19 @@
2013-11-04 Kostya Serebryany <kcc@google.com>
* All source files: Merge from upstream r191666.
* merge.sh: Added lsan.
* configure.ac (AC_CONFIG_FILES): Added lsan.
* Makefile.am (SUBDIRS): Added lsan.
* sanitizer_common/Makefile.am (sanitizer_common_files): Added new fles.
* asan/Makefile.am (asan_files): Added new files.
(libasan_la_LIBADD): Added a dependency on lsan.
* lsan/Makefile.am: New file.
* asan/Makefile.in: Regenerate.
* lsan/Makefile.in: Regenerate.
* Makefile.in: Regenerate.
* configure: Regenerate.
* sanitizer_common/Makefile.in: Regenerate.
2013-09-20 Alan Modra <amodra@gmail.com>
* configure: Regenerate.

View File

@ -1,4 +1,4 @@
175733
191666
The first line of this file holds the svn revision number of the
last merge done from the master library sources.

View File

@ -1,13 +1,13 @@
ACLOCAL_AMFLAGS = -I .. -I ../config
if TSAN_SUPPORTED
SUBDIRS = interception sanitizer_common asan tsan ubsan
SUBDIRS = interception sanitizer_common lsan asan tsan ubsan
else
SUBDIRS = interception sanitizer_common asan ubsan
SUBDIRS = interception sanitizer_common lsan asan ubsan
endif
if USING_MAC_INTERPOSE
SUBDIRS = sanitizer_common asan ubsan
SUBDIRS = sanitizer_common lsan asan ubsan
endif
# Work around what appears to be a GNU make bug handling MAKEFLAGS

View File

@ -76,7 +76,7 @@ AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = interception sanitizer_common asan ubsan tsan
DIST_SUBDIRS = interception sanitizer_common lsan asan ubsan tsan
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AR = @AR@
@ -209,9 +209,9 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
ACLOCAL_AMFLAGS = -I .. -I ../config
@TSAN_SUPPORTED_FALSE@SUBDIRS = interception sanitizer_common asan ubsan
@TSAN_SUPPORTED_TRUE@SUBDIRS = interception sanitizer_common asan tsan ubsan
@USING_MAC_INTERPOSE_TRUE@SUBDIRS = sanitizer_common asan ubsan
@TSAN_SUPPORTED_FALSE@SUBDIRS = interception sanitizer_common lsan asan ubsan
@TSAN_SUPPORTED_TRUE@SUBDIRS = interception sanitizer_common lsan asan tsan ubsan
@USING_MAC_INTERPOSE_TRUE@SUBDIRS = sanitizer_common lsan asan ubsan
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and

View File

@ -15,32 +15,31 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
asan_new_delete.cc \
asan_posix.cc \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
asan_win.cc
libasan_la_SOURCES = $(asan_files)
if USING_MAC_INTERPOSE
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la
else
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la $(top_builddir)/interception/libinterception.la
endif
libasan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)

View File

@ -81,17 +81,18 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
@USING_MAC_INTERPOSE_FALSE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/interception/libinterception.la \
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
am__objects_1 = asan_allocator.lo asan_allocator2.lo \
asan_interceptors.lo asan_mac.lo asan_malloc_mac.lo \
asan_new_delete.lo asan_posix.lo asan_rtl.lo asan_stats.lo \
asan_thread_registry.lo asan_fake_stack.lo asan_globals.lo \
asan_linux.lo asan_malloc_linux.lo asan_malloc_win.lo \
asan_poisoning.lo asan_report.lo asan_stack.lo asan_thread.lo \
asan_win.lo
am__objects_1 = asan_allocator2.lo asan_dll_thunk.lo \
asan_fake_stack.lo asan_globals.lo asan_interceptors.lo \
asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
asan_malloc_mac.lo asan_malloc_win.lo asan_new_delete.lo \
asan_poisoning.lo asan_posix.lo asan_report.lo asan_rtl.lo \
asan_stack.lo asan_stats.lo asan_thread.lo asan_win.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@ -260,32 +261,33 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \
asan_allocator.cc \
asan_allocator2.cc \
asan_interceptors.cc \
asan_mac.cc \
asan_malloc_mac.cc \
asan_new_delete.cc \
asan_posix.cc \
asan_rtl.cc \
asan_stats.cc \
asan_thread_registry.cc \
asan_dll_thunk.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
asan_win.cc
libasan_la_SOURCES = $(asan_files)
@USING_MAC_INTERPOSE_FALSE@libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_FALSE@ $(top_builddir)/interception/libinterception.la \
@USING_MAC_INTERPOSE_FALSE@ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
@USING_MAC_INTERPOSE_TRUE@libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
@USING_MAC_INTERPOSE_TRUE@ $(top_builddir)/lsan/libsanitizer_lsan.la \
@USING_MAC_INTERPOSE_TRUE@ $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libasan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` -lpthread -ldl
@ -402,8 +404,8 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@ -420,7 +422,6 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
.cc.o:

View File

@ -1,811 +0,0 @@
//===-- asan_allocator.cc -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Implementation of ASan's memory allocator.
// Evey piece of memory (AsanChunk) allocated by the allocator
// has a left redzone of REDZONE bytes and
// a right redzone such that the end of the chunk is aligned by REDZONE
// (i.e. the right redzone is between 0 and REDZONE-1).
// The left redzone is always poisoned.
// The right redzone is poisoned on malloc, the body is poisoned on free.
// Once freed, a chunk is moved to a quarantine (fifo list).
// After quarantine, a chunk is returned to freelists.
//
// The left redzone contains ASan's internal data and the stack trace of
// the malloc call.
// Once freed, the body of the chunk contains the stack trace of the free call.
//
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#if ASAN_ALLOCATOR_VERSION == 1
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_stats.h"
#include "asan_report.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
#define REDZONE ((uptr)(flags()->redzone))
static const uptr kMinAllocSize = REDZONE * 2;
static const u64 kMaxAvailableRam = 128ULL << 30; // 128G
static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M
static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
static const uptr kMaxSizeForThreadLocalFreeList =
(ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
// Size classes less than kMallocSizeClassStep are powers of two.
// All other size classes are multiples of kMallocSizeClassStep.
static const uptr kMallocSizeClassStepLog = 26;
static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
static const uptr kMaxAllowedMallocSize =
(SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
static inline uptr SizeClassToSize(u8 size_class) {
CHECK(size_class < kNumberOfSizeClasses);
if (size_class <= kMallocSizeClassStepLog) {
return 1UL << size_class;
} else {
return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
}
}
static inline u8 SizeToSizeClass(uptr size) {
u8 res = 0;
if (size <= kMallocSizeClassStep) {
uptr rounded = RoundUpToPowerOfTwo(size);
res = Log2(rounded);
} else {
res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
+ kMallocSizeClassStepLog;
}
CHECK(res < kNumberOfSizeClasses);
CHECK(size <= SizeClassToSize(res));
return res;
}
// Given REDZONE bytes, we need to mark first size bytes
// as addressable and the rest REDZONE-size bytes as unaddressable.
static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
CHECK(size <= REDZONE);
CHECK(IsAligned(mem, REDZONE));
CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
CHECK(IsPowerOfTwo(REDZONE));
CHECK(REDZONE >= SHADOW_GRANULARITY);
PoisonShadowPartialRightRedzone(mem, size, REDZONE,
kAsanHeapRightRedzoneMagic);
}
static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
CHECK(IsAligned(size, GetPageSizeCached()));
u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
if (flags()->debug) {
Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
}
return res;
}
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
//
// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
// the beginning of a AsanChunk (in which the actual chunk resides at
// this - this->used_size).
//
// The magic numbers for the enum values are taken randomly.
enum {
CHUNK_AVAILABLE = 0x57,
CHUNK_ALLOCATED = 0x32,
CHUNK_QUARANTINE = 0x19,
CHUNK_MEMALIGN = 0xDC
};
struct ChunkBase {
// First 8 bytes.
uptr chunk_state : 8;
uptr alloc_tid : 24;
uptr size_class : 8;
uptr free_tid : 24;
// Second 8 bytes.
uptr alignment_log : 8;
uptr alloc_type : 2;
uptr used_size : FIRST_32_SECOND_64(32, 54); // Size requested by the user.
// This field may overlap with the user area and thus should not
// be used while the chunk is in CHUNK_ALLOCATED state.
AsanChunk *next;
// Typically the beginning of the user-accessible memory is 'this'+REDZONE
// and is also aligned by REDZONE. However, if the memory is allocated
// by memalign, the alignment might be higher and the user-accessible memory
// starts at the first properly aligned address after 'this'.
uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
uptr Size() { return SizeClassToSize(size_class); }
u8 SizeClass() { return size_class; }
};
struct AsanChunk: public ChunkBase {
u32 *compressed_alloc_stack() {
return (u32*)((uptr)this + sizeof(ChunkBase));
}
u32 *compressed_free_stack() {
return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
}
// The left redzone after the ChunkBase is given to the alloc stack trace.
uptr compressed_alloc_stack_size() {
if (REDZONE < sizeof(ChunkBase)) return 0;
return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
}
uptr compressed_free_stack_size() {
if (REDZONE < sizeof(ChunkBase)) return 0;
return (REDZONE) / sizeof(u32);
}
};
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->used_size; }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
void AsanChunkView::GetAllocStack(StackTrace *stack) {
StackTrace::UncompressStack(stack, chunk_->compressed_alloc_stack(),
chunk_->compressed_alloc_stack_size());
}
void AsanChunkView::GetFreeStack(StackTrace *stack) {
StackTrace::UncompressStack(stack, chunk_->compressed_free_stack(),
chunk_->compressed_free_stack_size());
}
static AsanChunk *PtrToChunk(uptr ptr) {
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
if (m->chunk_state == CHUNK_MEMALIGN) {
m = (AsanChunk*)((uptr)m - m->used_size);
}
return m;
}
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
CHECK(q->size() > 0);
size_ += q->size();
append_back(q);
q->clear();
}
void AsanChunkFifoList::Push(AsanChunk *n) {
push_back(n);
size_ += n->Size();
}
// Interesting performance observation: this function takes up to 15% of overal
// allocator time. That's because *first_ has been evicted from cache long time
// ago. Not sure if we can or want to do anything with this.
AsanChunk *AsanChunkFifoList::Pop() {
CHECK(first_);
AsanChunk *res = front();
size_ -= res->Size();
pop_front();
return res;
}
// All pages we ever allocated.
struct PageGroup {
uptr beg;
uptr end;
uptr size_of_chunk;
uptr last_chunk;
bool InRange(uptr addr) {
return addr >= beg && addr < end;
}
};
class MallocInfo {
public:
explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
AsanChunk *m = 0;
AsanChunk **fl = &free_lists_[size_class];
{
BlockingMutexLock lock(&mu_);
for (uptr i = 0; i < n_chunks; i++) {
if (!(*fl)) {
*fl = GetNewChunks(size_class);
}
AsanChunk *t = *fl;
*fl = t->next;
t->next = m;
CHECK(t->chunk_state == CHUNK_AVAILABLE);
m = t;
}
}
return m;
}
void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
bool eat_free_lists) {
CHECK(flags()->quarantine_size > 0);
BlockingMutexLock lock(&mu_);
AsanChunkFifoList *q = &x->quarantine_;
if (q->size() > 0) {
quarantine_.PushList(q);
while (quarantine_.size() > (uptr)flags()->quarantine_size) {
QuarantinePop();
}
}
if (eat_free_lists) {
for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
size_class++) {
AsanChunk *m = x->free_lists_[size_class];
while (m) {
AsanChunk *t = m->next;
m->next = free_lists_[size_class];
free_lists_[size_class] = m;
m = t;
}
x->free_lists_[size_class] = 0;
}
}
}
void BypassThreadLocalQuarantine(AsanChunk *chunk) {
BlockingMutexLock lock(&mu_);
quarantine_.Push(chunk);
}
AsanChunk *FindChunkByAddr(uptr addr) {
BlockingMutexLock lock(&mu_);
return FindChunkByAddrUnlocked(addr);
}
uptr AllocationSize(uptr ptr) {
if (!ptr) return 0;
BlockingMutexLock lock(&mu_);
// Make sure this is our chunk and |ptr| actually points to the beginning
// of the allocated memory.
AsanChunk *m = FindChunkByAddrUnlocked(ptr);
if (!m || m->Beg() != ptr) return 0;
if (m->chunk_state == CHUNK_ALLOCATED) {
return m->used_size;
} else {
return 0;
}
}
void ForceLock() {
mu_.Lock();
}
void ForceUnlock() {
mu_.Unlock();
}
void PrintStatus() {
BlockingMutexLock lock(&mu_);
uptr malloced = 0;
Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
quarantine_.size() >> 20, malloced >> 20);
for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
AsanChunk *i = free_lists_[j];
if (!i) continue;
uptr t = 0;
for (; i; i = i->next) {
t += i->Size();
}
Printf("%zu:%zu ", j, t >> 20);
}
Printf("\n");
}
PageGroup *FindPageGroup(uptr addr) {
BlockingMutexLock lock(&mu_);
return FindPageGroupUnlocked(addr);
}
private:
PageGroup *FindPageGroupUnlocked(uptr addr) {
int n = atomic_load(&n_page_groups_, memory_order_relaxed);
// If the page groups are not sorted yet, sort them.
if (n_sorted_page_groups_ < n) {
SortArray((uptr*)page_groups_, n);
n_sorted_page_groups_ = n;
}
// Binary search over the page groups.
int beg = 0, end = n;
while (beg < end) {
int med = (beg + end) / 2;
uptr g = (uptr)page_groups_[med];
if (addr > g) {
// 'g' points to the end of the group, so 'addr'
// may not belong to page_groups_[med] or any previous group.
beg = med + 1;
} else {
// 'addr' may belong to page_groups_[med] or a previous group.
end = med;
}
}
if (beg >= n)
return 0;
PageGroup *g = page_groups_[beg];
CHECK(g);
if (g->InRange(addr))
return g;
return 0;
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr,
AsanChunk *left_chunk, AsanChunk *right_chunk) {
// Prefer an allocated chunk or a chunk from quarantine.
if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
right_chunk->chunk_state != CHUNK_AVAILABLE)
return right_chunk;
if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
left_chunk->chunk_state != CHUNK_AVAILABLE)
return left_chunk;
// Choose based on offset.
sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
return left_chunk;
return right_chunk;
}
AsanChunk *FindChunkByAddrUnlocked(uptr addr) {
PageGroup *g = FindPageGroupUnlocked(addr);
if (!g) return 0;
CHECK(g->size_of_chunk);
uptr offset_from_beg = addr - g->beg;
uptr this_chunk_addr = g->beg +
(offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
CHECK(g->InRange(this_chunk_addr));
AsanChunk *m = (AsanChunk*)this_chunk_addr;
CHECK(m->chunk_state == CHUNK_ALLOCATED ||
m->chunk_state == CHUNK_AVAILABLE ||
m->chunk_state == CHUNK_QUARANTINE);
sptr offset = 0;
AsanChunkView m_view(m);
if (m_view.AddrIsInside(addr, 1, &offset))
return m;
if (m_view.AddrIsAtRight(addr, 1, &offset)) {
if (this_chunk_addr == g->last_chunk) // rightmost chunk
return m;
uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
CHECK(g->InRange(right_chunk_addr));
return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
} else {
CHECK(m_view.AddrIsAtLeft(addr, 1, &offset));
if (this_chunk_addr == g->beg) // leftmost chunk
return m;
uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
CHECK(g->InRange(left_chunk_addr));
return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
}
}
void QuarantinePop() {
CHECK(quarantine_.size() > 0);
AsanChunk *m = quarantine_.Pop();
CHECK(m);
// if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
CHECK(m->alloc_tid >= 0);
CHECK(m->free_tid >= 0);
uptr size_class = m->SizeClass();
m->next = free_lists_[size_class];
free_lists_[size_class] = m;
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->used_size;
thread_stats.really_freed_redzones += m->Size() - m->used_size;
thread_stats.really_freed_by_size[m->SizeClass()]++;
}
// Get a list of newly allocated chunks.
AsanChunk *GetNewChunks(u8 size_class) {
uptr size = SizeClassToSize(size_class);
CHECK(IsPowerOfTwo(kMinMmapSize));
CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
uptr mmap_size = Max(size, kMinMmapSize);
uptr n_chunks = mmap_size / size;
CHECK(n_chunks * size == mmap_size);
uptr PageSize = GetPageSizeCached();
if (size < PageSize) {
// Size is small, just poison the last chunk.
n_chunks--;
} else {
// Size is large, allocate an extra page at right and poison it.
mmap_size += PageSize;
}
CHECK(n_chunks > 0);
u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += mmap_size;
thread_stats.mmaped_by_size[size_class] += n_chunks;
AsanChunk *res = 0;
for (uptr i = 0; i < n_chunks; i++) {
AsanChunk *m = (AsanChunk*)(mem + i * size);
m->chunk_state = CHUNK_AVAILABLE;
m->size_class = size_class;
m->next = res;
res = m;
}
PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
// This memory is already poisoned, no need to poison it again.
pg->beg = (uptr)mem;
pg->end = pg->beg + mmap_size;
pg->size_of_chunk = size;
pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
CHECK(idx < (int)ARRAY_SIZE(page_groups_));
page_groups_[idx] = pg;
return res;
}
AsanChunk *free_lists_[kNumberOfSizeClasses];
AsanChunkFifoList quarantine_;
BlockingMutex mu_;
PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
atomic_uint32_t n_page_groups_;
int n_sorted_page_groups_;
};
static MallocInfo malloc_info(LINKER_INITIALIZED);
void AsanThreadLocalMallocStorage::CommitBack() {
malloc_info.SwallowThreadLocalMallocStorage(this, true);
}
AsanChunkView FindHeapChunkByAddress(uptr address) {
return AsanChunkView(malloc_info.FindChunkByAddr(address));
}
static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
__asan_init();
CHECK(stack);
if (size == 0) {
size = 1; // TODO(kcc): do something smarter
}
CHECK(IsPowerOfTwo(alignment));
uptr rounded_size = RoundUpTo(size, REDZONE);
uptr needed_size = rounded_size + REDZONE;
if (alignment > REDZONE) {
needed_size += alignment;
}
CHECK(IsAligned(needed_size, REDZONE));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
return 0;
}
u8 size_class = SizeToSizeClass(needed_size);
uptr size_to_allocate = SizeClassToSize(size_class);
CHECK(size_to_allocate >= kMinAllocSize);
CHECK(size_to_allocate >= needed_size);
CHECK(IsAligned(size_to_allocate, REDZONE));
if (flags()->verbosity >= 3) {
Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
alignment, size, size_class, size_to_allocate);
}
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
// Statistics
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += size_to_allocate - size;
thread_stats.malloced_by_size[size_class]++;
AsanChunk *m = 0;
if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
// get directly from global storage.
m = malloc_info.AllocateChunks(size_class, 1);
thread_stats.malloc_large++;
} else {
// get from the thread-local storage.
AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
if (!*fl) {
uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
*fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
thread_stats.malloc_small_slow++;
}
m = *fl;
*fl = (*fl)->next;
}
CHECK(m);
CHECK(m->chunk_state == CHUNK_AVAILABLE);
m->chunk_state = CHUNK_ALLOCATED;
m->alloc_type = alloc_type;
m->next = 0;
CHECK(m->Size() == size_to_allocate);
uptr addr = (uptr)m + REDZONE;
CHECK(addr <= (uptr)m->compressed_free_stack());
if (alignment > REDZONE && (addr & (alignment - 1))) {
addr = RoundUpTo(addr, alignment);
CHECK((addr & (alignment - 1)) == 0);
AsanChunk *p = (AsanChunk*)(addr - REDZONE);
p->chunk_state = CHUNK_MEMALIGN;
p->used_size = (uptr)p - (uptr)m;
m->alignment_log = Log2(alignment);
CHECK(m->Beg() == addr);
} else {
m->alignment_log = Log2(REDZONE);
}
CHECK(m == PtrToChunk(addr));
m->used_size = size;
CHECK(m->Beg() == addr);
m->alloc_tid = t ? t->tid() : 0;
m->free_tid = kInvalidTid;
StackTrace::CompressStack(stack, m->compressed_alloc_stack(),
m->compressed_alloc_stack_size());
PoisonShadow(addr, rounded_size, 0);
if (size < rounded_size) {
PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
size & (REDZONE - 1));
}
if (size <= (uptr)(flags()->max_malloc_fill_size)) {
REAL(memset)((void*)addr, 0, rounded_size);
}
return (u8*)addr;
}
static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) {
if (!ptr) return;
CHECK(stack);
if (flags()->debug) {
CHECK(malloc_info.FindPageGroup((uptr)ptr));
}
// Printf("Deallocate %p\n", ptr);
AsanChunk *m = PtrToChunk((uptr)ptr);
// Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
memory_order_acq_rel);
if (old_chunk_state == CHUNK_QUARANTINE) {
ReportDoubleFree((uptr)ptr, stack);
} else if (old_chunk_state != CHUNK_ALLOCATED) {
ReportFreeNotMalloced((uptr)ptr, stack);
}
CHECK(old_chunk_state == CHUNK_ALLOCATED);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
ReportAllocTypeMismatch((uptr)ptr, stack,
(AllocType)m->alloc_type, (AllocType)alloc_type);
// With REDZONE==16 m->next is in the user area, otherwise it should be 0.
CHECK(REDZONE <= 16 || !m->next);
CHECK(m->free_tid == kInvalidTid);
CHECK(m->alloc_tid >= 0);
AsanThread *t = asanThreadRegistry().GetCurrent();
m->free_tid = t ? t->tid() : 0;
StackTrace::CompressStack(stack, m->compressed_free_stack(),
m->compressed_free_stack_size());
uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->used_size;
thread_stats.freed_by_size[m->SizeClass()]++;
CHECK(m->chunk_state == CHUNK_QUARANTINE);
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
ms->quarantine_.Push(m);
if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
malloc_info.SwallowThreadLocalMallocStorage(ms, false);
}
} else {
malloc_info.BypassThreadLocalQuarantine(m);
}
}
static u8 *Reallocate(u8 *old_ptr, uptr new_size,
StackTrace *stack) {
CHECK(old_ptr && new_size);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
AsanChunk *m = PtrToChunk((uptr)old_ptr);
CHECK(m->chunk_state == CHUNK_ALLOCATED);
uptr old_size = m->used_size;
uptr memcpy_size = Min(new_size, old_size);
u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC);
if (new_ptr) {
CHECK(REAL(memcpy) != 0);
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, stack, FROM_MALLOC);
}
return new_ptr;
}
} // namespace __asan
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif
namespace __asan {
void InitializeAllocator() { }
void PrintInternalAllocatorStats() {
}
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
void *ptr = (void*)Allocate(alignment, size, stack, alloc_type);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
SANITIZER_INTERFACE_ATTRIBUTE
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
ASAN_FREE_HOOK(ptr);
Deallocate((u8*)ptr, stack, alloc_type);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_malloc(uptr size, StackTrace *stack) {
void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC);
if (ptr)
REAL(memset)(ptr, 0, nmemb * size);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
if (p == 0) {
void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
} else if (size == 0) {
ASAN_FREE_HOOK(p);
Deallocate((u8*)p, stack, FROM_MALLOC);
return 0;
}
return Reallocate((u8*)p, size, stack);
}
void *asan_valloc(uptr size, StackTrace *stack) {
void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC);
ASAN_MALLOC_HOOK(ptr, size);
return ptr;
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
void *ptr = Allocate(alignment, size, stack, FROM_MALLOC);
CHECK(IsAligned((uptr)ptr, alignment));
ASAN_MALLOC_HOOK(ptr, size);
*memptr = ptr;
return 0;
}
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
CHECK(stack);
if (ptr == 0) return 0;
uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
}
return usable_size;
}
uptr asan_mz_size(const void *ptr) {
return malloc_info.AllocationSize((uptr)ptr);
}
void asan_mz_force_lock() {
malloc_info.ForceLock();
}
void asan_mz_force_unlock() {
malloc_info.ForceUnlock();
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size".
uptr __asan_get_estimated_allocated_size(uptr size) {
if (size == 0) return 1;
return Min(size, kMaxAllowedMallocSize);
}
bool __asan_get_ownership(const void *p) {
return malloc_info.AllocationSize((uptr)p) > 0;
}
uptr __asan_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr allocated_size = malloc_info.AllocationSize((uptr)p);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
}
return allocated_size;
}
#endif // ASAN_ALLOCATOR_VERSION

View File

@ -7,7 +7,7 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_allocator.cc.
// ASan-private header for asan_allocator2.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
@ -17,18 +17,6 @@
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_list.h"
// We are in the process of transitioning from the old allocator (version 1)
// to a new one (version 2). The change is quite intrusive so both allocators
// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macro.
#ifndef ASAN_ALLOCATOR_VERSION
# if (ASAN_LINUX && !ASAN_ANDROID) || ASAN_MAC || ASAN_WINDOWS
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
# endif
#endif // ASAN_ALLOCATOR_VERSION
namespace __asan {
enum AllocType {
@ -101,109 +89,17 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanThreadLocalMallocStorage {
explicit AsanThreadLocalMallocStorage(LinkerInitialized x)
#if ASAN_ALLOCATOR_VERSION == 1
: quarantine_(x)
#endif
{ }
AsanThreadLocalMallocStorage() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage));
}
#if ASAN_ALLOCATOR_VERSION == 1
AsanChunkFifoList quarantine_;
AsanChunk *free_lists_[kNumberOfSizeClasses];
#else
uptr quarantine_cache[16];
uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque.
#endif
void CommitBack();
};
// Fake stack frame contains local variables of one function.
// This struct should fit into a stack redzone (32 bytes).
struct FakeFrame {
uptr magic; // Modified by the instrumented code.
uptr descr; // Modified by the instrumented code.
FakeFrame *next;
u64 real_stack : 48;
u64 size_minus_one : 16;
};
struct FakeFrameFifo {
public:
void FifoPush(FakeFrame *node);
FakeFrame *FifoPop();
private:
FakeFrame *first_, *last_;
};
class FakeFrameLifo {
public:
void LifoPush(FakeFrame *node) {
node->next = top_;
top_ = node;
}
void LifoPop() {
CHECK(top_);
top_ = top_->next;
}
FakeFrame *top() { return top_; }
private:
FakeFrame *top_;
};
// For each thread we create a fake stack and place stack objects on this fake
// stack instead of the real stack. The fake stack is not really a stack but
// a fast malloc-like allocator so that when a function exits the fake stack
// is not poped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
// We can not rely on __asan_stack_free being called on every function exit,
// so we maintain a lifo list of all current fake frames and update it on every
// call to __asan_stack_malloc.
class FakeStack {
public:
FakeStack();
explicit FakeStack(LinkerInitialized) {}
void Init(uptr stack_size);
void StopUsingFakeStack() { alive_ = false; }
void Cleanup();
uptr AllocateStack(uptr size, uptr real_stack);
static void OnFree(uptr ptr, uptr size, uptr real_stack);
// Return the bottom of the maped region.
uptr AddrIsInFakeStack(uptr addr);
bool StackSize() { return stack_size_; }
private:
static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
bool AddrIsInSizeClass(uptr addr, uptr size_class);
// Each size class should be large enough to hold all frames.
uptr ClassMmapSize(uptr size_class);
uptr ClassSize(uptr size_class) {
return 1UL << (size_class + kMinStackFrameSizeLog);
}
void DeallocateFrame(FakeFrame *fake_frame);
uptr ComputeSizeClass(uptr alloc_size);
void AllocateOneSizeClass(uptr size_class);
uptr stack_size_;
bool alive_;
uptr allocated_size_classes_[kNumberOfSizeClasses];
FakeFrameFifo size_classes_[kNumberOfSizeClasses];
FakeFrameLifo call_stack_;
};
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);

View File

@ -11,20 +11,20 @@
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
// Status: under development, not enabled by default yet.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#if ASAN_ALLOCATOR_VERSION == 2
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
namespace __asan {
@ -32,7 +32,7 @@ struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
@ -47,7 +47,7 @@ struct AsanMapUnmapCallback {
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
@ -56,18 +56,23 @@ struct AsanMapUnmapCallback {
#if SANITIZER_WORDSIZE == 64
#if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
#else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
#endif
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
#elif SANITIZER_WORDSIZE == 32
static const u64 kAddressSpaceSize = 1ULL << 32;
typedef CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
SizeClassMap, kRegionSizeLog,
FlatByteMap<kFlatByteMapSize>,
AsanMapUnmapCallback> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
@ -139,14 +144,15 @@ static uptr ComputeRZLog(uptr user_requested_size) {
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
// memory.
// If a memory chunk is allocated by memalign and we had to increase the
// allocation size to achieve the proper alignment, then we store this magic
// If the left redzone is greater than the ChunkHeader size we store a magic
// value in the first uptr word of the memory block and store the address of
// ChunkBase in the next uptr.
// M B ? ? ? L L L L L L H H U U U U U U
// M -- magic value kMemalignMagic
// M B L L L L L L L L L H H U U U U U U
// | ^
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kMemalignMagic = 0xCC6E96B9;
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
@ -157,6 +163,7 @@ struct ChunkHeader {
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
@ -167,7 +174,6 @@ struct ChunkHeader {
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
AsanChunk *next;
u32 free_context_id;
};
@ -188,7 +194,8 @@ struct AsanChunk: ChunkBase {
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
// We store the alloc/free stack traces in the chunk itself.
// If we don't use stack depot, we store the alloc/free stack traces
// in the chunk itself.
u32 *AllocStackBeg() {
return (u32*)(Beg() - RZLog2Size(rz_log));
}
@ -204,6 +211,9 @@ struct AsanChunk: ChunkBase {
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
return (available - kChunkHeader2Size) / sizeof(u32);
}
bool AddrIsInside(uptr addr) {
return (addr >= Beg()) && (addr < Beg() + UsedSize());
}
};
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
@ -257,22 +267,25 @@ struct QuarantineCallback {
}
void Recycle(AsanChunk *m) {
CHECK(m->chunk_state == CHUNK_QUARANTINE);
m->chunk_state = CHUNK_AVAILABLE;
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (m->from_memalign) {
uptr *memalign_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(memalign_magic[0], kMemalignMagic);
CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
@ -296,9 +309,10 @@ void InitializeAllocator() {
}
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
AllocType alloc_type) {
AllocType alloc_type, bool can_fill) {
if (!asan_inited)
__asan_init();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
@ -314,9 +328,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
uptr rounded_size = RoundUpTo(size, alignment);
if (rounded_size < kChunkHeader2Size)
rounded_size = kChunkHeader2Size;
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
@ -331,10 +343,10 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
return 0;
return AllocatorReturnNull();
}
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
@ -345,8 +357,6 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
// Clear the first allocated word (an old kMemalignMagic may still be there).
reinterpret_cast<uptr *>(alloc_beg)[0] = 0;
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
@ -356,7 +366,6 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->chunk_state = CHUNK_ALLOCATED;
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
@ -364,11 +373,10 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (m->from_memalign) {
CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
memalign_magic[0] = kMemalignMagic;
memalign_magic[1] = chunk_beg;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
@ -382,7 +390,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
meta[1] = chunk_beg;
}
if (flags()->use_stack_depot) {
if (fl.use_stack_depot) {
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
} else {
m->alloc_context_id = 0;
@ -394,12 +402,12 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = size & (SHADOW_GRANULARITY - 1);
}
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
@ -409,26 +417,43 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
thread_stats.malloc_large++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
}
#if CAN_SANITIZE_LEAKS
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
ASAN_FREE_HOOK(ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
// Flip the chunk_state atomically to avoid race on double-free.
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
memory_order_relaxed);
if (old_chunk_state == CHUNK_QUARANTINE)
static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else if (old_chunk_state != CHUNK_ALLOCATED)
else
ReportFreeNotMalloced((uptr)ptr, stack);
CHECK(old_chunk_state == CHUNK_ALLOCATED);
}
static void AtomicallySetQuarantineFlag(AsanChunk *m,
void *ptr, StackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
ReportInvalidFree(ptr, old_chunk_state, stack);
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
static void QuarantineChunk(AsanChunk *m, void *ptr,
StackTrace *stack, AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
ReportAllocTypeMismatch((uptr)ptr, stack,
(AllocType)m->alloc_type, (AllocType)alloc_type);
@ -436,7 +461,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
if (flags()->use_stack_depot) {
m->free_context_id = StackDepotPut(stack->trace, stack->size);
@ -444,13 +469,12 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
m->free_context_id = 0;
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
}
CHECK(m->chunk_state == CHUNK_QUARANTINE);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
@ -468,57 +492,67 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
}
}
static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
CHECK(m->chunk_state == CHUNK_ALLOCATED);
uptr old_size = m->UsedSize();
uptr memcpy_size = Min(new_size, old_size);
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
CHECK(REAL(memcpy) != 0);
u8 chunk_state = m->chunk_state;
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), (void*)0);
uptr memcpy_size = Min(new_size, m->UsedSize());
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, stack, FROM_MALLOC);
}
return new_ptr;
}
static AsanChunk *GetAsanChunkByAddr(uptr p) {
void *ptr = reinterpret_cast<void *>(p);
uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
static AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
if (memalign_magic[0] == kMemalignMagic) {
AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
CHECK(m->from_memalign);
return m;
}
if (!allocator.FromPrimary(ptr)) {
uptr *meta = reinterpret_cast<uptr *>(
allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
CHECK_LE(actual_size, SizeClassMap::kMaxSize);
// We know the actually allocted size, but we don't know the redzone size.
// Just try all possible redzone sizes.
for (u32 rz_log = 0; rz_log < 8; rz_log++) {
u32 rz_size = RZLog2Size(rz_log);
uptr max_possible_size = actual_size - rz_size;
if (ComputeRZLog(max_possible_size) != rz_log)
continue;
return reinterpret_cast<AsanChunk *>(
alloc_beg + rz_size - kChunkHeaderSize);
}
return 0;
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
static AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
// Allocator must be locked when this function is called.
static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
static uptr AllocationSize(uptr p) {
@ -583,33 +617,33 @@ void PrintInternalAllocatorStats() {
allocator.PrintStats();
}
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type);
return Allocate(size, alignment, stack, alloc_type, true);
}
SANITIZER_INTERFACE_ATTRIBUTE
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
Deallocate(ptr, stack, alloc_type);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_malloc(uptr size, StackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC);
return Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
if (ptr)
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC);
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
Deallocate(p, stack, FROM_MALLOC);
return 0;
@ -618,7 +652,7 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
}
void *asan_valloc(uptr size, StackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
@ -628,12 +662,12 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(size, PageSize, stack, FROM_MALLOC);
return Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
@ -664,6 +698,86 @@ void asan_mz_force_unlock() {
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
void LockAllocator() {
__asan::allocator.ForceLock();
}
void UnlockAllocator() {
__asan::allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&__asan::allocator;
*end = *begin + sizeof(__asan::allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m =
__asan::GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
}
bool LsanMetadata::allocated() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return static_cast<ChunkTag>(m->lsan_tag);
}
void LsanMetadata::set_tag(ChunkTag value) {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
m->lsan_tag = value;
}
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize();
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
@ -693,17 +807,14 @@ uptr __asan_get_allocated_size(const void *p) {
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __asan_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __asan_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif
#endif // ASAN_ALLOCATOR_VERSION

View File

@ -0,0 +1,194 @@
//===-- asan_dll_thunk.cc -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This file defines a family of thunks that should be statically linked into
// the DLLs that have ASan instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
// details.
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
// ----------------- Helper functions and macros --------------------- {{{1
extern "C" {
void *__stdcall GetModuleHandleA(const char *module_name);
void *__stdcall GetProcAddress(void *module, const char *proc_name);
void abort();
}
static void *getRealProcAddressOrDie(const char *name) {
void *ret = GetProcAddress(GetModuleHandleA(0), name);
if (!ret)
abort();
return ret;
}
#define WRAP_V_V(name) \
extern "C" void name() { \
typedef void (*fntype)(); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
}
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
typedef void (*fntype)(void *arg); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
}
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
typedef void (*fntype)(void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
}
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
}
#define WRAP_W_V(name) \
extern "C" void *name() { \
typedef void *(*fntype)(); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
}
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
typedef void *(*fntype)(void *arg); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
}
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
typedef void *(*fntype)(void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
}
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
}
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
typedef void *(*fntype)(void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
}
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
}
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
}
// }}}
// ----------------- ASan own interface functions --------------------
WRAP_W_V(__asan_should_detect_stack_use_after_return)
extern "C" {
int __asan_option_detect_stack_use_after_return;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
void __asan_init_v3() {
typedef void (*fntype)();
static fntype fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
}
}
WRAP_V_W(__asan_report_store1)
WRAP_V_W(__asan_report_store2)
WRAP_V_W(__asan_report_store4)
WRAP_V_W(__asan_report_store8)
WRAP_V_W(__asan_report_store16)
WRAP_V_WW(__asan_report_store_n)
WRAP_V_W(__asan_report_load1)
WRAP_V_W(__asan_report_load2)
WRAP_V_W(__asan_report_load4)
WRAP_V_W(__asan_report_load8)
WRAP_V_W(__asan_report_load16)
WRAP_V_WW(__asan_report_load_n)
WRAP_V_WW(__asan_register_globals)
WRAP_V_WW(__asan_unregister_globals)
WRAP_W_WW(__asan_stack_malloc_0)
WRAP_W_WW(__asan_stack_malloc_1)
WRAP_W_WW(__asan_stack_malloc_2)
WRAP_W_WW(__asan_stack_malloc_3)
WRAP_W_WW(__asan_stack_malloc_4)
WRAP_W_WW(__asan_stack_malloc_5)
WRAP_W_WW(__asan_stack_malloc_6)
WRAP_W_WW(__asan_stack_malloc_7)
WRAP_W_WW(__asan_stack_malloc_8)
WRAP_W_WW(__asan_stack_malloc_9)
WRAP_W_WW(__asan_stack_malloc_10)
WRAP_V_WWW(__asan_stack_free_0)
WRAP_V_WWW(__asan_stack_free_1)
WRAP_V_WWW(__asan_stack_free_2)
WRAP_V_WWW(__asan_stack_free_4)
WRAP_V_WWW(__asan_stack_free_5)
WRAP_V_WWW(__asan_stack_free_6)
WRAP_V_WWW(__asan_stack_free_7)
WRAP_V_WWW(__asan_stack_free_8)
WRAP_V_WWW(__asan_stack_free_9)
WRAP_V_WWW(__asan_stack_free_10)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
// ----------------- Memory allocation functions ---------------------
WRAP_V_W(free)
WRAP_V_WW(_free_dbg)
WRAP_W_W(malloc)
WRAP_W_WWWW(_malloc_dbg)
WRAP_W_WW(calloc)
WRAP_W_WWWWW(_calloc_dbg)
WRAP_W_WWW(_calloc_impl)
WRAP_W_WW(realloc)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_W(_msize)
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
#endif // ASAN_DLL_THUNK

View File

@ -10,170 +10,195 @@
// FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_poisoning.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
namespace __asan {
FakeStack::FakeStack() {
CHECK(REAL(memset) != 0);
REAL(memset)(this, 0, sizeof(*this));
}
static const u64 kMagic1 = kAsanStackAfterReturnMagic;
static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
uptr mem = allocated_size_classes_[size_class];
uptr size = ClassMmapSize(size_class);
bool res = mem && addr >= mem && addr < mem + size;
return res;
}
uptr FakeStack::AddrIsInFakeStack(uptr addr) {
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
}
return 0;
}
// We may want to compute this during compilation.
inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
uptr log = Log2(rounded_size);
CHECK(alloc_size <= (1UL << log));
if (!(alloc_size > (1UL << (log-1)))) {
Printf("alloc_size %zu log %zu\n", alloc_size, log);
}
CHECK(alloc_size > (1UL << (log-1)));
uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
CHECK(res < kNumberOfSizeClasses);
CHECK(ClassSize(res) >= rounded_size);
return res;
}
void FakeFrameFifo::FifoPush(FakeFrame *node) {
CHECK(node);
node->next = 0;
if (first_ == 0 && last_ == 0) {
first_ = last_ = node;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
for (uptr i = 0; i < (1U << class_id); i++)
shadow[i] = magic;
} else {
CHECK(first_);
CHECK(last_);
last_->next = node;
last_ = node;
// The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic));
}
}
FakeFrame *FakeFrameFifo::FifoPop() {
CHECK(first_ && last_ && "Exhausted fake stack");
FakeFrame *res = 0;
if (first_ == last_) {
res = first_;
first_ = last_ = 0;
} else {
res = first_;
first_ = first_->next;
FakeStack *FakeStack::Create(uptr stack_size_log) {
static uptr kMinStackSizeLog = 16;
static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
if (stack_size_log < kMinStackSizeLog)
stack_size_log = kMinStackSizeLog;
if (stack_size_log > kMaxStackSizeLog)
stack_size_log = kMaxStackSizeLog;
FakeStack *res = reinterpret_cast<FakeStack *>(
MmapOrDie(RequiredSize(stack_size_log), "FakeStack"));
res->stack_size_log_ = stack_size_log;
if (flags()->verbosity) {
u8 *p = reinterpret_cast<u8 *>(res);
Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
GetCurrentTidOrInvalid(), p,
p + FakeStack::RequiredSize(stack_size_log), stack_size_log);
}
return res;
}
void FakeStack::Init(uptr stack_size) {
stack_size_ = stack_size;
alive_ = true;
void FakeStack::Destroy() {
PoisonAll(0);
UnmapOrDie(this, RequiredSize(stack_size_log_));
}
void FakeStack::Cleanup() {
alive_ = false;
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
uptr mem = allocated_size_classes_[i];
if (mem) {
PoisonShadow(mem, ClassMmapSize(i), 0);
allocated_size_classes_[i] = 0;
UnmapOrDie((void*)mem, ClassMmapSize(i));
void FakeStack::PoisonAll(u8 magic) {
PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
magic);
}
ALWAYS_INLINE USED
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses);
if (needs_gc_)
GC(real_stack);
uptr &hint_position = hint_position_[class_id];
const int num_iter = NumberOfFrames(stack_size_log, class_id);
u8 *flags = GetFlags(stack_size_log, class_id);
for (int i = 0; i < num_iter; i++) {
uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
// This part is tricky. On one hand, checking and setting flags[pos]
// should be atomic to ensure async-signal safety. But on the other hand,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atimic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
FakeFrame *res = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log, class_id, pos));
res->real_stack = real_stack;
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res;
}
return 0; // We are out of fake stack.
}
uptr FakeStack::AddrIsInFakeStack(uptr ptr) {
uptr stack_size_log = this->stack_size_log();
uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
if (ptr < beg || ptr >= end) return 0;
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
CHECK_LT(ptr, base + (1UL << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
return base + pos * BytesInSizeClass(class_id);
}
void FakeStack::HandleNoReturn() {
needs_gc_ = true;
}
// When throw, longjmp or some such happens we don't call OnFree() and
// as the result may leak one or more fake frames, but the good news is that
// we are notified about all such events by HandleNoReturn().
// If we recently had such no-return event we need to collect garbage frames.
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
uptr collected = 0;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
i++) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
if (ff->real_stack < real_stack) {
flags[i] = 0;
collected++;
}
}
}
needs_gc_ = false;
}
uptr FakeStack::ClassMmapSize(uptr size_class) {
return RoundUpToPowerOfTwo(stack_size_);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
static THREADLOCAL FakeStack *fake_stack_tls;
FakeStack *GetTLSFakeStack() {
return fake_stack_tls;
}
void SetTLSFakeStack(FakeStack *fs) {
fake_stack_tls = fs;
}
#else
FakeStack *GetTLSFakeStack() { return 0; }
void SetTLSFakeStack(FakeStack *fs) { }
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();
if (!t) return 0;
return t->fake_stack();
}
void FakeStack::AllocateOneSizeClass(uptr size_class) {
CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
// asanThreadRegistry().GetCurrent()->tid(),
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
// ClassMmapSize(size_class));
uptr i;
for (i = 0; i < ClassMmapSize(size_class);
i += ClassSize(size_class)) {
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
}
CHECK(i == ClassMmapSize(size_class));
allocated_size_classes_[size_class] = new_mem;
static FakeStack *GetFakeStackFast() {
if (FakeStack *fs = GetTLSFakeStack())
return fs;
if (!__asan_option_detect_stack_use_after_return)
return 0;
return GetFakeStack();
}
uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
if (!alive_) return real_stack;
CHECK(size <= kMaxStackMallocSize && size > 1);
uptr size_class = ComputeSizeClass(size);
if (!allocated_size_classes_[size_class]) {
AllocateOneSizeClass(size_class);
}
FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
CHECK(fake_frame);
fake_frame->size_minus_one = size - 1;
fake_frame->real_stack = real_stack;
while (FakeFrame *top = call_stack_.top()) {
if (top->real_stack > real_stack) break;
call_stack_.LifoPop();
DeallocateFrame(top);
}
call_stack_.LifoPush(fake_frame);
uptr ptr = (uptr)fake_frame;
PoisonShadow(ptr, size, 0);
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
FakeStack *fs = GetFakeStackFast();
if (!fs) return real_stack;
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
if (!ff)
return real_stack; // Out of fake stack, return the real one.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
CHECK(alive_);
uptr size = fake_frame->size_minus_one + 1;
uptr size_class = ComputeSizeClass(size);
CHECK(allocated_size_classes_[size_class]);
uptr ptr = (uptr)fake_frame;
CHECK(AddrIsInSizeClass(ptr, size_class));
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
size_classes_[size_class].FifoPush(fake_frame);
}
void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
FakeFrame *fake_frame = (FakeFrame*)ptr;
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
CHECK(fake_frame->descr != 0);
CHECK(fake_frame->size_minus_one == size - 1);
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
if (ptr == real_stack)
return;
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
uptr __asan_stack_malloc(uptr size, uptr real_stack) {
if (!flags()->use_fake_stack) return real_stack;
AsanThread *t = asanThreadRegistry().GetCurrent();
if (!t) {
// TSD is gone, use the real stack.
return real_stack;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
return __asan::OnMalloc(class_id, size, real_stack); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
__asan::OnFree(ptr, class_id, size, real_stack); \
}
uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
// Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
return ptr;
}
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
if (!flags()->use_fake_stack) return;
if (ptr != real_stack) {
FakeStack::OnFree(ptr, size, real_stack);
}
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)

View File

@ -0,0 +1,167 @@
//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_fake_stack.cc, implements FakeStack.
//===----------------------------------------------------------------------===//
#ifndef ASAN_FAKE_STACK_H
#define ASAN_FAKE_STACK_H
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
// Fake stack frame contains local variables of one function.
struct FakeFrame {
uptr magic; // Modified by the instrumented code.
uptr descr; // Modified by the instrumented code.
uptr pc; // Modified by the instrumented code.
uptr real_stack;
};
// For each thread we create a fake stack and place stack objects on this fake
// stack instead of the real stack. The fake stack is not really a stack but
// a fast malloc-like allocator so that when a function exits the fake stack
// is not popped but remains there for quite some time until gets used again.
// So, we poison the objects on the fake stack when function returns.
// It helps us find use-after-return bugs.
//
// The FakeStack objects is allocated by a single mmap call and has no other
// pointers. The size of the fake stack depends on the actual thread stack size
// and thus can not be a constant.
// stack_size is a power of two greater or equal to the thread's stack size;
// we store it as its logarithm (stack_size_log).
// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
// is a power of two, starting from 64 bytes. Each size class occupies
// stack_size bytes and thus can allocate
// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2).
// For each size class we have NumberOfFrames allocation flags,
// each flag indicates whether the given frame is currently allocated.
// All flags for size classes 0 .. 10 are stored in a single contiguous region
// followed by another contiguous region which contains the actual memory for
// size classes. The addresses are computed by GetFlags and GetFrame without
// any memory accesses solely based on 'this' and stack_size_log.
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fasion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
public:
static const uptr kNumberOfSizeClasses =
kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
// CTOR: create the FakeStack as a single mmap-ed object.
static FakeStack *Create(uptr stack_size_log);
void Destroy();
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
// Each size class occupies stack_size bytes.
static uptr SizeRequiredForFrames(uptr stack_size_log) {
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
}
// Number of bytes requires for the whole object.
static uptr RequiredSize(uptr stack_size_log) {
return kFlagsOffset + SizeRequiredForFlags(stack_size_log) +
SizeRequiredForFrames(stack_size_log);
}
// Offset of the given flag from the first flag.
// The flags for class 0 begin at offset 000000000
// The flags for class 1 begin at offset 100000000
// ....................2................ 110000000
// ....................3................ 111000000
// and so on.
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
uptr t = kNumberOfSizeClasses - 1 - class_id;
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
return ((all_ones >> t) << t) << (stack_size_log - 15);
}
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the numbe of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The the pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
}
// Get frame by class_id and pos.
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
}
// Allocate the fake frame.
FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack);
// Deallocate the fake frame: read the saved flag address and write 0 there.
static void Deallocate(uptr x, uptr class_id) {
**SavedFlagPtr(x, class_id) = 0;
}
// Poison the entire FakeStack's shadow with the magic value.
void PoisonAll(u8 magic);
// Return the beginning of the FakeFrame or 0 if the address is not ours.
uptr AddrIsInFakeStack(uptr addr);
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
return 1UL << (class_id + kMinStackFrameSizeLog);
}
// The fake frame is guaranteed to have a right redzone.
// We use the last word of that redzone to store the address of the flag
// that corresponds to the current frame to make faster deallocation.
static u8 **SavedFlagPtr(uptr x, uptr class_id) {
return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x));
}
uptr stack_size_log() const { return stack_size_log_; }
void HandleNoReturn();
void GC(uptr real_stack);
private:
FakeStack() { }
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
// a bit is set if something was allocated from the corresponding size class.
bool needs_gc_;
};
FakeStack *GetTLSFakeStack();
void SetTLSFakeStack(FakeStack *fs);
} // namespace __asan
#endif // ASAN_FAKE_STACK_H

View File

@ -30,8 +30,6 @@ struct Flags {
// Lower value may reduce memory usage but increase the chance of
// false negatives.
int quarantine_size;
// If set, uses in-process symbolizer from common sanitizer runtime.
bool symbolize;
// Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
int verbosity;
// Size (in bytes) of redzones around heap objects.
@ -45,8 +43,6 @@ struct Flags {
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
// Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
bool replace_str;
@ -54,11 +50,13 @@ struct Flags {
bool replace_intrin;
// Used on Mac only.
bool mac_ignore_invalid_free;
// ASan allocator flag. See asan_allocator.cc.
bool use_fake_stack;
// ASan allocator flag. Sets the maximal size of allocation request
// that would return memory filled with zero bytes.
int max_malloc_fill_size;
// Enables stack-use-after-return checking at run-time.
bool detect_stack_use_after_return;
// The minimal fake stack size log.
int uar_stack_size_log;
// ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes
// that will be filled with malloc_fill_byte on malloc.
int max_malloc_fill_size, malloc_fill_byte;
// Override exit status if something was reported.
int exitcode;
// If set, user may manually mark memory regions as poisoned or unpoisoned.
@ -69,6 +67,8 @@ struct Flags {
int sleep_before_dying;
// If set, registers ASan custom segv handler.
bool handle_segv;
// If set, allows user register segv handler even if ASan registers one.
bool allow_user_segv_handler;
// If set, uses alternate stack for signal handling.
bool use_sigaltstack;
// Allow the users to work around the bug in Nvidia drivers prior to 295.*.
@ -89,18 +89,10 @@ struct Flags {
// Allow the tool to re-exec the program. This may interfere badly with the
// debugger.
bool allow_reexec;
// Strips this prefix from file paths in error reports.
const char *strip_path_prefix;
// If set, prints not only thread creation stacks for threads in error report,
// but also thread creation stacks for threads that created those threads,
// etc. up to main thread.
bool print_full_thread_history;
// ASan will write logs to "log_path.pid" instead of stderr.
const char *log_path;
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
// for benchmarking the allocator or instrumentator.
bool poison_heap;
@ -108,9 +100,18 @@ struct Flags {
bool alloc_dealloc_mismatch;
// Use stack depot instead of storing stacks in the redzones.
bool use_stack_depot;
// If true, assume that memcmp(p1, p2, n) always reads n bytes before
// comparing p1 and p2.
bool strict_memcmp;
// If true, assume that dynamic initializers can never access globals from
// other modules, even if the latter are already initialized.
bool strict_init_order;
};
Flags *flags();
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
void InitializeFlags(Flags *f, const char *env);
} // namespace __asan

View File

@ -12,11 +12,14 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
namespace __asan {
@ -30,15 +33,26 @@ struct ListOfGlobals {
static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
static LowLevelAllocator allocator_for_globals;
static ListOfGlobals *list_of_all_globals;
static ListOfGlobals *list_of_dynamic_init_globals;
void PoisonRedZones(const Global &g) {
static const int kDynamicInitGlobalsInitialCapacity = 512;
struct DynInitGlobal {
Global g;
bool initialized;
};
typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
ALWAYS_INLINE void PoisonRedZones(const Global &g) {
uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
PoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
if (g.size != aligned_size) {
// partial right redzone
PoisonShadowPartialRightRedzone(
FastPoisonShadowPartialRightRedzone(
g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
g.size % SHADOW_GRANULARITY,
SHADOW_GRANULARITY,
@ -46,6 +60,12 @@ void PoisonRedZones(const Global &g) {
}
}
static void ReportGlobal(const Global &g, const char *prefix) {
Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name,
g.module_name, g.has_dynamic_init);
}
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
if (!flags()->report_globals) return false;
BlockingMutexLock lock(&mu_for_globals);
@ -53,8 +73,7 @@ bool DescribeAddressIfGlobal(uptr addr, uptr size) {
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
if (flags()->report_globals >= 2)
Report("Search Global: beg=%p size=%zu name=%s\n",
(void*)g.beg, g.size, (char*)g.name);
ReportGlobal(g, "Search");
res |= DescribeAddressRelativeToGlobal(addr, size, g);
}
return res;
@ -66,24 +85,26 @@ bool DescribeAddressIfGlobal(uptr addr, uptr size) {
static void RegisterGlobal(const Global *g) {
CHECK(asan_inited);
if (flags()->report_globals >= 2)
Report("Added Global: beg=%p size=%zu/%zu name=%s dyn.init=%zu\n",
(void*)g->beg, g->size, g->size_with_redzone, g->name,
g->has_dynamic_init);
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
PoisonRedZones(*g);
if (flags()->poison_heap)
PoisonRedZones(*g);
ListOfGlobals *l =
(ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
l = (ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals));
l->g = g;
l->next = list_of_dynamic_init_globals;
list_of_dynamic_init_globals = l;
if (dynamic_init_globals == 0) {
void *mem = allocator_for_globals.Allocate(sizeof(VectorOfGlobals));
dynamic_init_globals = new(mem)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };
dynamic_init_globals->push_back(dyn_global);
}
}
@ -93,34 +114,26 @@ static void UnregisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
PoisonShadow(g->beg, g->size_with_redzone, 0);
if (flags()->poison_heap)
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
// implementation. It might not be worth doing anyway.
}
// Poison all shadow memory for a single global.
static void PoisonGlobalAndRedzones(const Global *g) {
CHECK(asan_inited);
CHECK(flags()->check_initialization_order);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->report_globals >= 3)
Printf("DynInitPoison : %s\n", g->name);
PoisonShadow(g->beg, g->size_with_redzone, kAsanInitializationOrderMagic);
}
static void UnpoisonGlobal(const Global *g) {
CHECK(asan_inited);
CHECK(flags()->check_initialization_order);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->report_globals >= 3)
Printf("DynInitUnpoison: %s\n", g->name);
PoisonShadow(g->beg, g->size_with_redzone, 0);
PoisonRedZones(*g);
void StopInitOrderChecking() {
BlockingMutexLock lock(&mu_for_globals);
if (!flags()->check_initialization_order || !dynamic_init_globals)
return;
flags()->check_initialization_order = false;
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
}
} // namespace __asan
@ -151,31 +164,47 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// when all dynamically initialized globals are unpoisoned. This method
// poisons all global variables not defined in this TU, so that a dynamic
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) {
if (!flags()->check_initialization_order) return;
CHECK(list_of_dynamic_init_globals);
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
bool from_current_tu = false;
// The list looks like:
// a => ... => b => last_addr => ... => first_addr => c => ...
// The globals of the current TU reside between last_addr and first_addr.
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next) {
if (l->g->beg == last_addr)
from_current_tu = true;
if (!from_current_tu)
PoisonGlobalAndRedzones(l->g);
if (l->g->beg == first_addr)
from_current_tu = false;
if (flags()->report_globals >= 3)
Printf("DynInitPoison module: %s\n", module_name);
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (dyn_g.initialized)
continue;
if (g->module_name != module_name)
PoisonShadowForGlobal(g, kAsanInitializationOrderMagic);
else if (!strict_init_order)
dyn_g.initialized = true;
}
CHECK(!from_current_tu);
}
// This method runs immediately after dynamic initialization in each TU, when
// all dynamically initialized globals except for those defined in the current
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order) return;
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next)
UnpoisonGlobal(l->g);
// FIXME: Optionally report that we're unpoisoning globals from a module.
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
const Global *g = &dyn_g.g;
if (!dyn_g.initialized) {
// Unpoison the whole global.
PoisonShadowForGlobal(g, 0);
// Poison redzones back.
PoisonRedZones(*g);
}
}
}

View File

@ -12,22 +12,14 @@
#ifndef ASAN_INTERCEPTED_FUNCTIONS_H
#define ASAN_INTERCEPTED_FUNCTIONS_H
#include "asan_internal.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include <stdarg.h>
#include <stddef.h>
using __sanitizer::uptr;
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !defined(_WIN32)
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_MLOCKX 1
@ -35,290 +27,51 @@ using __sanitizer::uptr;
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_MLOCKX 0
#endif
#if defined(__linux__)
#if SANITIZER_LINUX
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !defined(__APPLE__)
#if !SANITIZER_MAC
# define ASAN_INTERCEPT_STRNLEN 1
#else
# define ASAN_INTERCEPT_STRNLEN 0
#endif
#if defined(__linux__) && !defined(ANDROID)
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !defined(ANDROID) && !defined(_WIN32)
#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !defined(_WIN32)
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
#if ASAN_HAS_EXCEPTIONS && !defined(_WIN32)
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
#endif
#define INTERPOSE_FUNCTION(function) \
{ reinterpret_cast<const uptr>(WRAP(function)), \
reinterpret_cast<const uptr>(function) }
#define INTERPOSE_FUNCTION_2(function, wrapper) \
{ reinterpret_cast<const uptr>(wrapper), \
reinterpret_cast<const uptr>(function) }
struct interpose_substitution {
const uptr replacement;
const uptr original;
};
#define INTERPOSER(func) __attribute__((used)) \
const interpose_substitution substitution_##func[] \
__attribute__((section("__DATA, __interpose"))) = { \
INTERPOSE_FUNCTION(func), \
}
#define INTERPOSER_2(func, wrapper) __attribute__((used)) \
const interpose_substitution substitution_##func[] \
__attribute__((section("__DATA, __interpose"))) = { \
INTERPOSE_FUNCTION_2(func, wrapper), \
}
#define DECLARE_FUNCTION_AND_WRAPPER(ret_type, func, ...) \
ret_type func(__VA_ARGS__); \
ret_type WRAP(func)(__VA_ARGS__); \
INTERPOSER(func)
// Use extern declarations of intercepted functions on Mac and Windows
// to avoid including system headers.
#if defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
extern "C" {
// signal.h
# if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
struct sigaction;
DECLARE_FUNCTION_AND_WRAPPER(int, sigaction, int sig,
const struct sigaction *act,
struct sigaction *oldact);
DECLARE_FUNCTION_AND_WRAPPER(void*, signal, int signum, void *handler);
# endif
// setjmp.h
DECLARE_FUNCTION_AND_WRAPPER(void, longjmp, void *env, int value);
# if ASAN_INTERCEPT__LONGJMP
DECLARE_FUNCTION_AND_WRAPPER(void, _longjmp, void *env, int value);
# endif
# if ASAN_INTERCEPT_SIGLONGJMP
DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value);
# endif
# if ASAN_INTERCEPT___CXA_THROW
DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c);
# endif
// string.h / strings.h
DECLARE_FUNCTION_AND_WRAPPER(int, memcmp,
const void *a1, const void *a2, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(void*, memmove,
void *to, const void *from, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(void*, memcpy,
void *to, const void *from, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(void*, memset, void *block, int c, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(char*, strchr, const char *str, int c);
DECLARE_FUNCTION_AND_WRAPPER(char*, strcat, /* NOLINT */
char *to, const char* from);
DECLARE_FUNCTION_AND_WRAPPER(char*, strncat,
char *to, const char* from, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(char*, strcpy, /* NOLINT */
char *to, const char* from);
DECLARE_FUNCTION_AND_WRAPPER(char*, strncpy,
char *to, const char* from, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(int, strcmp, const char *s1, const char* s2);
DECLARE_FUNCTION_AND_WRAPPER(int, strncmp,
const char *s1, const char* s2, uptr size);
DECLARE_FUNCTION_AND_WRAPPER(uptr, strlen, const char *s);
# if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP
DECLARE_FUNCTION_AND_WRAPPER(int, strcasecmp, const char *s1, const char *s2);
DECLARE_FUNCTION_AND_WRAPPER(int, strncasecmp,
const char *s1, const char *s2, uptr n);
# endif
# if ASAN_INTERCEPT_STRDUP
DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s);
# endif
# if ASAN_INTERCEPT_STRNLEN
DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen);
# endif
# if ASAN_INTERCEPT_INDEX
char* index(const char *string, int c);
INTERPOSER_2(index, WRAP(strchr));
# endif
// stdlib.h
DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr);
DECLARE_FUNCTION_AND_WRAPPER(long, atol, const char *nptr); // NOLINT
DECLARE_FUNCTION_AND_WRAPPER(long, strtol, const char *nptr, char **endptr, int base); // NOLINT
# if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT
DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT
# endif
// unistd.h
# if SANITIZER_INTERCEPT_READ
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, read, int fd, void *buf, SIZE_T count);
# endif
# if SANITIZER_INTERCEPT_PREAD
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread, int fd, void *buf,
SIZE_T count, OFF_T offset);
# endif
# if SANITIZER_INTERCEPT_PREAD64
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
SIZE_T count, OFF64_T offset);
# endif
# if SANITIZER_INTERCEPT_WRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, write, int fd, void *ptr, SIZE_T count);
# endif
# if SANITIZER_INTERCEPT_PWRITE
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pwrite,
int fd, void *ptr, SIZE_T count, OFF_T offset);
# endif
# if ASAN_INTERCEPT_MLOCKX
// mlock/munlock
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, SIZE_T len);
DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags);
DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void);
# endif
// Windows threads.
# if defined(_WIN32)
__declspec(dllimport)
void* __stdcall CreateThread(void *sec, uptr st, void* start,
void *arg, DWORD fl, DWORD *id);
# endif
// Posix threads.
# if ASAN_INTERCEPT_PTHREAD_CREATE
DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create,
void *thread, void *attr,
void *(*start_routine)(void*), void *arg);
# endif
# if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
DECLARE_FUNCTION_AND_WRAPPER(void *, localtime, unsigned long *timep);
DECLARE_FUNCTION_AND_WRAPPER(void *, localtime_r, unsigned long *timep,
void *result);
DECLARE_FUNCTION_AND_WRAPPER(void *, gmtime, unsigned long *timep);
DECLARE_FUNCTION_AND_WRAPPER(void *, gmtime_r, unsigned long *timep,
void *result);
DECLARE_FUNCTION_AND_WRAPPER(char *, ctime, unsigned long *timep);
DECLARE_FUNCTION_AND_WRAPPER(char *, ctime_r, unsigned long *timep,
char *result);
DECLARE_FUNCTION_AND_WRAPPER(char *, asctime, void *tm);
DECLARE_FUNCTION_AND_WRAPPER(char *, asctime_r, void *tm, char *result);
# endif
// stdio.h
# if SANITIZER_INTERCEPT_SCANF
DECLARE_FUNCTION_AND_WRAPPER(int, vscanf, const char *format, va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vsscanf, const char *str, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, vfscanf, void *stream, const char *format,
va_list ap);
DECLARE_FUNCTION_AND_WRAPPER(int, scanf, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, fscanf,
void* stream, const char *format, ...);
DECLARE_FUNCTION_AND_WRAPPER(int, sscanf, // NOLINT
const char *str, const char *format, ...);
# endif
# if defined(__APPLE__)
typedef void* pthread_workqueue_t;
typedef void* pthread_workitem_handle_t;
typedef void* dispatch_group_t;
typedef void* dispatch_queue_t;
typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block);
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f,
dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_sync_f,
dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_after_f,
dispatch_time_t when, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_barrier_async_f,
dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f,
dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
# if !defined(MISSING_BLOCKS_SUPPORT)
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async,
dispatch_group_t dg,
dispatch_queue_t dq, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async,
dispatch_queue_t dq, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_after,
dispatch_queue_t dq, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void (^work)(void));
DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void (^work)(void));
# endif // MISSING_BLOCKS_SUPPORT
typedef void malloc_zone_t;
typedef size_t vm_size_t;
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned flags);
DECLARE_FUNCTION_AND_WRAPPER(malloc_zone_t *, malloc_default_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(
malloc_zone_t *, malloc_default_purgeable_zone, void);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_make_purgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(int, malloc_make_nonpurgeable, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void, malloc_set_zone_name,
malloc_zone_t *zone, const char *name);
DECLARE_FUNCTION_AND_WRAPPER(void *, malloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void, free, void *ptr);
DECLARE_FUNCTION_AND_WRAPPER(void *, realloc, void *ptr, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, calloc, size_t nmemb, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(void *, valloc, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(size_t, malloc_good_size, size_t size);
DECLARE_FUNCTION_AND_WRAPPER(int, posix_memalign,
void **memptr, size_t alignment, size_t size);
#if 0
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_prepare, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_parent, void);
DECLARE_FUNCTION_AND_WRAPPER(void, _malloc_fork_child, void);
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT___CXA_ATEXIT 1
#else
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
# endif // __APPLE__
} // extern "C"
#endif // defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL))
#endif // ASAN_INTERCEPTED_FUNCTIONS_H

View File

@ -15,10 +15,10 @@
#include "asan_intercepted_functions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_libc.h"
@ -42,15 +42,16 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
__asan_region_is_poisoned(__offset, __size)) { \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
GET_CURRENT_PC_BP_SP; \
__asan_report_error(pc, bp, sp, __offset, isWrite, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size); \
} \
} while (0)
#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true);
#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true)
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
@ -86,9 +87,9 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
}
void SetThreadName(const char *name) {
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanThread *t = GetCurrentThread();
if (t)
t->summary()->set_name(name);
asanThreadRegistry().SetThreadName(t->tid(), name);
}
} // namespace __asan
@ -96,40 +97,76 @@ void SetThreadName(const char *name) {
// ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPTOR_UNPOISON_PARAM(ctx, count) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
ctx = 0; \
(void)ctx; \
ENSURE_ASAN_INITED(); \
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
do { \
if (asan_init_is_running) return REAL(func)(__VA_ARGS__); \
ctx = 0; \
(void) ctx; \
ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false)
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
} while (false)
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
do { \
} while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
asanThreadRegistry().SetCurrent(t);
return t->ThreadStart();
SetCurrentThread(t);
return t->ThreadStart(GetTid());
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
extern "C" int pthread_attr_getdetachstate(void *attr, int *v);
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking in thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
int detached = 0;
if (attr != 0)
pthread_attr_getdetachstate(attr, &detached);
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(start_routine, arg);
CreateThreadContextArgs args = { t, &stack };
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
return REAL(pthread_create)(thread, attr, asan_thread_start, t);
}
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum)) {
if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
@ -137,15 +174,15 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
if (!AsanInterceptsSignal(signum)) {
if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
}
#elif ASAN_POSIX
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact);
struct sigaction *oldact)
#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if ASAN_INTERCEPT_SWAPCONTEXT
@ -215,13 +252,15 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
// All functions return 0 (success).
static void MlockIsUnsupported() {
static bool printed = 0;
static bool printed = false;
if (printed) return;
printed = true;
Printf("INFO: AddressSanitizer ignores mlock/mlockall/munlock/munlockall\n");
if (flags()->verbosity > 0) {
Printf("INFO: AddressSanitizer ignores "
"mlock/mlockall/munlock/munlockall\n");
}
}
extern "C" {
INTERCEPTOR(int, mlock, const void *addr, uptr len) {
MlockIsUnsupported();
return 0;
@ -241,36 +280,56 @@ INTERCEPTOR(int, munlockall, void) {
MlockIsUnsupported();
return 0;
}
} // extern "C"
static inline int CharCmp(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
int c1_low = ToLower(c1);
int c2_low = ToLower(c2);
return c1_low - c2_low;
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
if (!asan_inited) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
const unsigned char *s1 = (const unsigned char*)a1;
const unsigned char *s2 = (const unsigned char*)a2;
uptr i;
for (i = 0; i < size; i++) {
c1 = s1[i];
c2 = s2[i];
if (c1 != c2) break;
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
// Check the entire regions even if the first bytes of the buffers are
// different.
ASAN_READ_RANGE(a1, size);
ASAN_READ_RANGE(a2, size);
// Fallthrough to REAL(memcmp) below.
} else {
unsigned char c1 = 0, c2 = 0;
const unsigned char *s1 = (const unsigned char*)a1;
const unsigned char *s2 = (const unsigned char*)a2;
uptr i;
for (i = 0; i < size; i++) {
c1 = s1[i];
c2 = s2[i];
if (c1 != c2) break;
}
ASAN_READ_RANGE(s1, Min(i + 1, size));
ASAN_READ_RANGE(s2, Min(i + 1, size));
return CharCmp(c1, c2);
}
}
ASAN_READ_RANGE(s1, Min(i + 1, size));
ASAN_READ_RANGE(s2, Min(i + 1, size));
return CharCmp(c1, c2);
return REAL(memcmp(a1, a2, size));
}
#define MEMMOVE_BODY { \
if (!asan_inited) return internal_memmove(to, from, size); \
if (asan_init_is_running) { \
return REAL(memmove)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(from, size); \
ASAN_WRITE_RANGE(to, size); \
} \
return internal_memmove(to, from, size); \
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) MEMMOVE_BODY
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
#if !SANITIZER_MAC
if (!asan_inited) return internal_memcpy(to, from, size);
// memcpy is called during __asan_init() from the internals
// of printf(...).
@ -287,24 +346,19 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8, so
// calling REAL(memcpy) here leads to infinite recursion.
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
return internal_memcpy(to, from, size);
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
if (!asan_inited) return internal_memmove(to, from, size);
if (asan_init_is_running) {
return REAL(memmove)(to, from, size);
}
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=116.
return internal_memmove(to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
// calls. If we just disable error reporting with
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
MEMMOVE_BODY
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
@ -341,7 +395,12 @@ INTERCEPTOR(char*, strchr, const char *str, int c) {
INTERCEPTOR(char*, index, const char *string, int c)
ALIAS(WRAPPER_NAME(strchr));
# else
# if SANITIZER_MAC
DECLARE_REAL(char*, index, const char *string, int c)
OVERRIDE_FUNCTION(index, strchr);
# else
DEFINE_REAL(char*, index, const char *string, int c)
# endif
# endif
#endif // ASAN_INTERCEPT_INDEX
@ -383,26 +442,8 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
return REAL(strncat)(to, from, size);
}
INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
if (!asan_inited) return internal_strcmp(s1, s2);
if (asan_init_is_running) {
return REAL(strcmp)(s1, s2);
}
ENSURE_ASAN_INITED();
unsigned char c1, c2;
uptr i;
for (i = 0; ; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
ASAN_READ_RANGE(s1, i + 1);
ASAN_READ_RANGE(s2, i + 1);
return CharCmp(c1, c2);
}
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if defined(__APPLE__)
#if SANITIZER_MAC
if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT
#endif
// strcpy is called from malloc_default_purgeable_zone()
@ -422,21 +463,16 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
#if defined(__APPLE__)
// FIXME: because internal_strdup() uses InternalAlloc(), which currently
// just calls malloc() on Mac, we can't use internal_strdup() with the
// dynamic runtime. We can remove the call to REAL(strdup) once InternalAlloc
// starts using mmap() instead.
// See also http://code.google.com/p/address-sanitizer/issues/detail?id=123.
if (!asan_inited) return REAL(strdup)(s);
#endif
if (!asan_inited) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
uptr length = REAL(strlen)(s);
ASAN_READ_RANGE(s, length + 1);
}
return REAL(strdup)(s);
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
REAL(memcpy)(new_mem, s, length + 1);
return reinterpret_cast<char*>(new_mem);
}
#endif
@ -455,54 +491,13 @@ INTERCEPTOR(uptr, strlen, const char *s) {
return length;
}
#if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP
INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
ENSURE_ASAN_INITED();
unsigned char c1, c2;
uptr i;
for (i = 0; ; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
INTERCEPTOR(uptr, wcslen, const wchar_t *s) {
uptr length = REAL(wcslen)(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
}
ASAN_READ_RANGE(s1, i + 1);
ASAN_READ_RANGE(s2, i + 1);
return CharCaseCmp(c1, c2);
}
INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, uptr n) {
ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
uptr i;
for (i = 0; i < n; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
ASAN_READ_RANGE(s1, Min(i + 1, n));
ASAN_READ_RANGE(s2, Min(i + 1, n));
return CharCaseCmp(c1, c2);
}
#endif // ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP
INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
if (!asan_inited) return internal_strncmp(s1, s2, size);
// strncmp is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
if (asan_init_is_running) {
return REAL(strncmp)(s1, s2, size);
}
ENSURE_ASAN_INITED();
unsigned char c1 = 0, c2 = 0;
uptr i;
for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
ASAN_READ_RANGE(s1, Min(i + 1, size));
ASAN_READ_RANGE(s2, Min(i + 1, size));
return CharCmp(c1, c2);
return length;
}
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
@ -532,7 +527,7 @@ static inline bool IsValidStrtolBase(int base) {
}
static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
CHECK(endptr != 0);
CHECK(endptr);
if (nptr == *endptr) {
// No digits were found at strtol call, we need to find out the last
// symbol accessed by strtoll on our own.
@ -563,7 +558,7 @@ INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
}
INTERCEPTOR(int, atoi, const char *nptr) {
#if defined(__APPLE__)
#if SANITIZER_MAC
if (!asan_inited) return REAL(atoi)(nptr);
#endif
ENSURE_ASAN_INITED();
@ -582,7 +577,7 @@ INTERCEPTOR(int, atoi, const char *nptr) {
}
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
#if defined(__APPLE__)
#if SANITIZER_MAC
if (!asan_inited) return REAL(atol)(nptr);
#endif
ENSURE_ASAN_INITED();
@ -631,22 +626,47 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
static void AtCxaAtexit(void *unused) {
(void)unused;
StopInitOrderChecking();
}
#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
REAL(__cxa_atexit)(AtCxaAtexit, 0, 0);
return res;
}
#endif // ASAN_INTERCEPT___CXA_ATEXIT
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) do { \
if (!INTERCEPT_FUNCTION(name) && flags()->verbosity > 0) \
Report("AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#if defined(_WIN32)
#if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD flags, void* tid) {
DWORD thr_flags, void* tid) {
// Strict init-order checking in thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(start_routine, arg);
CreateThreadContextArgs args = { t, &stack };
bool detached = false; // FIXME: how can we determine it on Windows?
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
return REAL(CreateThread)(security, stack_size,
asan_thread_start, t, flags, tid);
asan_thread_start, t, thr_flags, tid);
}
namespace __asan {
@ -663,9 +683,6 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
was_called_once = true;
#if defined(__APPLE__)
return;
#else
SANITIZER_COMMON_INTERCEPTORS_INIT;
// Intercept mem* functions.
@ -679,16 +696,11 @@ void InitializeAsanInterceptors() {
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strchr);
ASAN_INTERCEPT_FUNC(strcmp);
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
ASAN_INTERCEPT_FUNC(strlen);
ASAN_INTERCEPT_FUNC(wcslen);
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncmp);
ASAN_INTERCEPT_FUNC(strncpy);
#if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP
ASAN_INTERCEPT_FUNC(strcasecmp);
ASAN_INTERCEPT_FUNC(strncasecmp);
#endif
#if ASAN_INTERCEPT_STRDUP
ASAN_INTERCEPT_FUNC(strdup);
#endif
@ -741,15 +753,19 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(pthread_create);
#endif
// Intercept atexit function.
#if ASAN_INTERCEPT___CXA_ATEXIT
ASAN_INTERCEPT_FUNC(__cxa_atexit);
#endif
// Some Windows-specific interceptors.
#if defined(_WIN32)
#if SANITIZER_WINDOWS
InitializeWindowsInterceptors();
#endif
if (flags()->verbosity > 0) {
Report("AddressSanitizer: libc interceptors initialized\n");
}
#endif // __APPLE__
}
} // namespace __asan

View File

@ -23,8 +23,13 @@ extern "C" {
// Everytime the asan ABI changes we also change the version number in this
// name. Objects build with incompatible asan ABI version
// will not link with run-time.
void __asan_init_v1() SANITIZER_INTERFACE_ATTRIBUTE;
#define __asan_init __asan_init_v1
// Changes between ABI versions:
// v1=>v2: added 'module_name' to __asan_global
// v2=>v3: stack frame description (created by the compiler)
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
SANITIZER_INTERFACE_ATTRIBUTE void __asan_init_v3();
#define __asan_init __asan_init_v3
// This structure describes an instrumented global variable.
struct __asan_global {
@ -32,102 +37,92 @@ extern "C" {
uptr size; // The original size of the global.
uptr size_with_redzone; // The size with the redzone.
const char *name; // Name as a C string.
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
};
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
void __asan_register_globals(__asan_global *globals, uptr n)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unregister_globals(__asan_global *globals, uptr n)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_register_globals(__asan_global *globals, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unregister_globals(__asan_global *globals, uptr n);
// These two functions should be called before and after dynamic initializers
// run, respectively. They should be called with parameters describing all
// dynamically initialized globals defined in the calling TU.
void __asan_before_dynamic_init(uptr first_addr, uptr last_addr)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_after_dynamic_init()
SANITIZER_INTERFACE_ATTRIBUTE;
// These two functions are used by the instrumented code in the
// use-after-return mode. __asan_stack_malloc allocates size bytes of
// fake stack and __asan_stack_free poisons it. real_stack is a pointer to
// the real stack region.
uptr __asan_stack_malloc(uptr size, uptr real_stack)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack)
SANITIZER_INTERFACE_ATTRIBUTE;
// of a single module run, respectively.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_before_dynamic_init(const char *module_name);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
// function exits.
void __asan_poison_stack_memory(uptr addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unpoison_stack_memory(uptr addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_stack_memory(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_stack_memory(uptr addr, uptr size);
// Performs cleanup before a NoReturn function. Must be called before things
// like _exit and execl to avoid false positives on stack.
void __asan_handle_no_return() SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return();
void __asan_poison_memory_region(void const volatile *addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_unpoison_memory_region(void const volatile *addr, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_memory_region(void const volatile *addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
bool __asan_address_is_poisoned(void const volatile *addr)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
bool __asan_address_is_poisoned(void const volatile *addr);
uptr __asan_region_is_poisoned(uptr beg, uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_region_is_poisoned(uptr beg, uptr size);
void __asan_describe_address(uptr addr)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_describe_address(uptr addr);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, bool is_write, uptr access_size)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr addr, bool is_write, uptr access_size);
int __asan_set_error_exit_code(int exit_code)
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_set_death_callback(void (*callback)(void))
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_set_error_report_callback(void (*callback)(const char*))
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_death_callback(void (*callback)(void));
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_error_report_callback(void (*callback)(const char*));
/* OPTIONAL */ void __asan_on_error()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
int out_size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
int out_size);
uptr __asan_get_estimated_allocated_size(uptr size)
SANITIZER_INTERFACE_ATTRIBUTE;
bool __asan_get_ownership(const void *p)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_allocated_size(const void *p)
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_current_allocated_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_heap_size()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_free_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
uptr __asan_get_unmapped_bytes()
SANITIZER_INTERFACE_ATTRIBUTE;
void __asan_print_accumulated_stats()
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_estimated_allocated_size(uptr size);
/* OPTIONAL */ const char* __asan_default_options()
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE bool __asan_get_ownership(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
/* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
/* OPTIONAL */ void __asan_free_hook(void *ptr)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_free_hook(void *ptr);
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H

View File

@ -19,39 +19,8 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_libc.h"
#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported by AddressSanitizer"
#endif
#define ASAN_DEFAULT_FAILURE_EXITCODE 1
#if defined(__linux__)
# define ASAN_LINUX 1
#else
# define ASAN_LINUX 0
#endif
#if defined(__APPLE__)
# define ASAN_MAC 1
#else
# define ASAN_MAC 0
#endif
#if defined(_WIN32)
# define ASAN_WINDOWS 1
#else
# define ASAN_WINDOWS 0
#endif
#if defined(__ANDROID__) || defined(ANDROID)
# define ASAN_ANDROID 1
#else
# define ASAN_ANDROID 0
#endif
#define ASAN_POSIX (ASAN_LINUX || ASAN_MAC)
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# error "The AddressSanitizer run-time should not be"
" instrumented by AddressSanitizer"
@ -61,7 +30,7 @@
// If set, asan will install its own SEGV signal handler.
#ifndef ASAN_NEEDS_SEGV
# if ASAN_ANDROID == 1
# if SANITIZER_ANDROID == 1
# define ASAN_NEEDS_SEGV 0
# else
# define ASAN_NEEDS_SEGV 1
@ -90,7 +59,7 @@
#endif
#ifndef ASAN_USE_PREINIT_ARRAY
# define ASAN_USE_PREINIT_ARRAY (ASAN_LINUX && !ASAN_ANDROID)
# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
#endif
// All internal functions in asan reside inside the __asan namespace
@ -121,6 +90,7 @@ void UnsetAlternateSignalStack();
void InstallSignalHandlers();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
// Wrapper for TLS/TSD.
void AsanTSDInit(void (*destructor)(void *tsd));
@ -129,24 +99,14 @@ void AsanTSDSet(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
// asan_poisoning.cc
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
// Poisons the shadow memory for "redzone_size" bytes starting from
// "addr + size".
void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value);
// Platfrom-specific options.
#ifdef __APPLE__
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // __APPLE__
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.

View File

@ -9,12 +9,13 @@
//
// Linux-specific details.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@ -29,7 +30,7 @@
#include <unistd.h>
#include <unwind.h>
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
// FIXME: where to get ucontext on Android?
#include <sys/ucontext.h>
#endif
@ -48,7 +49,7 @@ void *AsanDoesNotSupportStaticLinkage() {
}
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if ASAN_ANDROID
#if SANITIZER_ANDROID
*pc = *sp = *bp = 0;
#elif defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
@ -86,6 +87,11 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
# elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
#else
# error "Unsupported arch"
#endif
@ -99,25 +105,7 @@ void AsanPlatformThreadInit() {
// Nothing here for now.
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
#if defined(__arm__) || \
defined(__powerpc__) || defined(__powerpc64__) || \
defined(__sparc__)
fast = false;
#endif
if (!fast)
return stack->SlowUnwindStack(pc, max_s);
stack->size = 0;
stack->trace[0] = pc;
if (max_s > 1) {
stack->max_size = max_s;
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
}
}
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
ucontext_t *ucp = (ucontext_t*)context;
*stack = (uptr)ucp->uc_stack.ss_sp;
@ -131,4 +119,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
} // namespace __asan
#endif // __linux__
#endif // SANITIZER_LINUX

View File

@ -10,7 +10,8 @@
// Mac-specific details.
//===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "asan_interceptors.h"
#include "asan_internal.h"
@ -18,7 +19,7 @@
#include "asan_mapping.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include <crt_externs.h> // for _NSGetArgv
@ -50,15 +51,17 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif // SANITIZER_WORDSIZE
}
int GetMacosVersion() {
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
CHECK(len < maxlen);
CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1);
switch (version[0]) {
case '9': return MACOS_VERSION_LEOPARD;
case '1': {
@ -66,6 +69,7 @@ int GetMacosVersion() {
case '0': return MACOS_VERSION_SNOW_LEOPARD;
case '1': return MACOS_VERSION_LION;
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
default: return MACOS_VERSION_UNKNOWN;
}
}
@ -73,6 +77,18 @@ int GetMacosVersion() {
}
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
}
return result;
}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
@ -227,18 +243,6 @@ bool AsanInterceptsSignal(int signum) {
void AsanPlatformThreadInit() {
}
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->size = 0;
stack->trace[0] = pc;
if ((max_s) > 1) {
stack->max_size = max_s;
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
}
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
@ -286,32 +290,16 @@ typedef struct {
u32 parent_tid;
} asan_block_context_t;
// We use extern declarations of libdispatch functions here instead
// of including <dispatch/dispatch.h>. This header is not present on
// Mac OS X Leopard and eariler, and although we don't expect ASan to
// work on legacy systems, it's bad to break the build of
// LLVM compiler-rt there.
extern "C" {
void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
void *ctxt, dispatch_function_t func);
} // extern "C"
static ALWAYS_INLINE
ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = asanThreadRegistry().GetCurrent();
AsanThread *t = GetCurrentThread();
if (!t) {
t = AsanThread::Create(parent_tid, 0, 0, stack);
asanThreadRegistry().RegisterThread(t);
t = AsanThread::Create(0, 0);
CreateThreadContextArgs args = { t, stack };
asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
t->Init();
asanThreadRegistry().SetCurrent(t);
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
}
}
@ -345,7 +333,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
asan_ctxt->block = ctxt;
asan_ctxt->func = func;
asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
asan_ctxt->parent_tid = GetCurrentTidOrInvalid();
return asan_ctxt;
}
@ -411,7 +399,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
#define GET_ASAN_BLOCK(work) \
void (^asan_block)(void); \
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
int parent_tid = GetCurrentTidOrInvalid(); \
asan_block = ^(void) { \
GET_STACK_TRACE_THREAD; \
asan_register_worker_thread(parent_tid, &stack); \
@ -449,4 +437,4 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
}
#endif
#endif // __APPLE__
#endif // SANITIZER_MAC

View File

@ -34,12 +34,14 @@ typedef struct __CFRuntimeBase {
#endif
} CFRuntimeBase;
enum {
MACOS_VERSION_UNKNOWN = 0,
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LEOPARD,
MACOS_VERSION_SNOW_LEOPARD,
MACOS_VERSION_LION,
MACOS_VERSION_MOUNTAIN_LION
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS
};
// Used by asan_malloc_mac.cc and asan_mac.cc
@ -47,7 +49,7 @@ extern "C" void __CFInitialize();
namespace __asan {
int GetMacosVersion();
MacosVersion GetMacosVersion();
void MaybeReplaceCFAllocator();
} // namespace __asan

View File

@ -11,15 +11,16 @@
// We simply define functions like malloc, free, realloc, etc.
// They will replace the corresponding libc functions automagically.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
#include "asan_thread_registry.h"
#if ASAN_ANDROID
#if SANITIZER_ANDROID
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size)
@ -144,4 +145,4 @@ INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
}
#endif // __linux__
#endif // SANITIZER_LINUX

View File

@ -10,12 +10,14 @@
// Mac-specific malloc interception.
//===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include <AvailabilityMacros.h>
#include <CoreFoundation/CFBase.h>
#include <dlfcn.h>
#include <malloc/malloc.h>
#include <sys/mman.h>
#include "asan_allocator.h"
#include "asan_interceptors.h"
@ -24,7 +26,6 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
// Similar code is used in Google Perftools,
// http://code.google.com/p/google-perftools.
@ -40,10 +41,19 @@ INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
vm_size_t start_size, unsigned zone_flags) {
if (!asan_inited) __asan_init();
GET_STACK_TRACE_MALLOC;
uptr page_size = GetPageSizeCached();
uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size);
malloc_zone_t *new_zone =
(malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack);
(malloc_zone_t*)asan_memalign(page_size, allocated_size,
&stack, FROM_MALLOC);
internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone));
new_zone->zone_name = NULL; // The name will be changed anyway.
if (GetMacosVersion() >= MACOS_VERSION_LION) {
// Prevent the client app from overwriting the zone contents.
// Library functions that need to modify the zone will set PROT_WRITE on it.
// This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
mprotect(new_zone, allocated_size, PROT_READ);
}
return new_zone;
}
@ -282,7 +292,7 @@ void mi_force_unlock(malloc_zone_t *zone) {
void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
AsanMallocStats malloc_stats;
asanThreadRegistry().FillMallocStatistics(&malloc_stats);
FillMallocStatistics(&malloc_stats);
CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats));
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
}
@ -344,4 +354,4 @@ void ReplaceSystemMalloc() {
}
} // namespace __asan
#endif // __APPLE__
#endif // SANITIZER_MAC

View File

@ -9,7 +9,9 @@
//
// Windows-specific malloc interception.
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include "asan_allocator.h"
#include "asan_interceptors.h"
@ -28,11 +30,13 @@ using namespace __asan; // NOLINT
// revisited in the future.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
SANITIZER_INTERFACE_ATTRIBUTE
void _free_dbg(void* ptr, int) {
free(ptr);
}
@ -41,38 +45,46 @@ void cfree(void *ptr) {
CHECK(!"cfree() should not be used on Windows?");
}
SANITIZER_INTERFACE_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _malloc_dbg(size_t size, int , const char*, int) {
return malloc(size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
return calloc(n, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
return calloc(nmemb, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
CHECK(!"_realloc_dbg should not exist!");
return 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
void* _recalloc(void* p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
@ -82,6 +94,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
return realloc(p, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
size_t _msize(void *ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);

View File

@ -47,6 +47,20 @@
// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/MIPS mapping:
// || `[0x2aaa8000, 0xffffffff]` || HighMem ||
// || `[0x0fffd000, 0x2aaa7fff]` || HighShadow ||
// || `[0x0bffd000, 0x0fffcfff]` || ShadowGap ||
// || `[0x0aaa8000, 0x0bffcfff]` || LowShadow ||
// || `[0x00000000, 0x0aaa7fff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29;
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
@ -54,22 +68,23 @@ extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
# define SHADOW_SCALE (__asan_mapping_scale)
# define SHADOW_OFFSET (__asan_mapping_offset)
#else
# if ASAN_ANDROID
# define SHADOW_SCALE (3)
# define SHADOW_SCALE kDefaultShadowScale
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# else
# define SHADOW_SCALE (3)
# if SANITIZER_WORDSIZE == 32
# define SHADOW_OFFSET (1 << 29)
# if defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# else
# if defined(__powerpc64__)
# define SHADOW_OFFSET (1ULL << 41)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# else
# if ASAN_MAC
# define SHADOW_OFFSET (1ULL << 44)
# else
# define SHADOW_OFFSET 0x7fff8000ULL
# endif
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
# endif
# endif
@ -131,7 +146,6 @@ static uptr kHighMemEnd = 0x7fffffffffffULL;
static uptr kMidMemBeg = 0x3000000000ULL;
static uptr kMidMemEnd = 0x4fffffffffULL;
#else
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
#endif

View File

@ -27,7 +27,7 @@ using namespace __asan; // NOLINT
// On Android new() goes through malloc interceptors.
// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131.
#if !ASAN_ANDROID
#if !SANITIZER_ANDROID
// Fake std::nothrow_t to avoid including <new>.
namespace std {
@ -38,6 +38,14 @@ struct nothrow_t {};
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
@ -49,10 +57,26 @@ INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
OPERATOR_NEW_BODY(FROM_NEW);
}
INTERCEPTOR(void *, _Znam, size_t size) {
OPERATOR_NEW_BODY(FROM_NEW_BR);
}
INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW);
}
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW_BR);
}
#endif
#define OPERATOR_DELETE_BODY(type) \
GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack, type);
#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR_ATTRIBUTE
@ -64,4 +88,19 @@ INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
#endif

View File

@ -10,9 +10,7 @@
// Shadow memory poisoning by ASan RTL and by user application.
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@ -20,11 +18,11 @@ namespace __asan {
void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1;
CHECK(REAL(memset) != 0);
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
CHECK(REAL(memset));
FastPoisonShadow(addr, size, value);
}
void PoisonShadowPartialRightRedzone(uptr addr,
@ -33,20 +31,10 @@ void PoisonShadowPartialRightRedzone(uptr addr,
u8 value) {
if (!flags()->poison_heap) return;
CHECK(AddrIsAlignedByGranularity(addr));
u8 *shadow = (u8*)MemToShadow(addr);
for (uptr i = 0; i < redzone_size;
i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
*shadow = size - i; // first size-i bytes are addressable
}
}
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
}
struct ShadowSegmentEndpoint {
u8 *chunk;
s8 offset; // in [0, SHADOW_GRANULARITY)
@ -179,6 +167,55 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
return 0;
}
#define CHECK_SMALL_REGION(p, size, isWrite) \
do { \
uptr __p = reinterpret_cast<uptr>(p); \
uptr __size = size; \
if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
__asan::AddressIsPoisoned(__p + __size - 1))) { \
GET_CURRENT_PC_BP_SP; \
uptr __bad = __asan_region_is_poisoned(__p, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
} \
} while (false); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u32 __sanitizer_unaligned_load32(const uu32 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
u64 __sanitizer_unaligned_load64(const uu64 *p) {
CHECK_SMALL_REGION(p, sizeof(*p), false);
return *p;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
CHECK_SMALL_REGION(p, sizeof(*p), true);
*p = x;
}
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {

View File

@ -0,0 +1,57 @@
//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Shadow memory poisoning by ASan RTL and by user application.
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_mapping.h"
namespace __asan {
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
// Poisons the shadow memory for "redzone_size" bytes starting from
// "addr + size".
void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value);
// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that
// assume that memory addresses are properly aligned. Use in
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap);
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
if (i + SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
*shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
// first size-i bytes are addressable
*shadow = static_cast<u8>(size - i);
}
}
}
} // namespace __asan

View File

@ -9,14 +9,15 @@
//
// Posix-specific details.
//===----------------------------------------------------------------------===//
#if defined(__linux__) || defined(__APPLE__)
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@ -40,7 +41,7 @@ static void MaybeInstallSigaction(int signum,
sigact.sa_sigaction = handler;
sigact.sa_flags = SA_SIGINFO;
if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK(0 == REAL(sigaction)(signum, &sigact, 0));
CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0));
if (flags()->verbosity >= 1) {
Report("Installed the sigaction for signal %d\n", signum);
}
@ -57,7 +58,7 @@ static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) {
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
CHECK(0 == sigaltstack(0, &oldstack));
CHECK_EQ(0, sigaltstack(0, &oldstack));
// If the alternate stack is already in place, do nothing.
if ((oldstack.ss_flags & SS_DISABLE) == 0) return;
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
@ -67,10 +68,10 @@ void SetAlternateSignalStack() {
altstack.ss_sp = base;
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK(0 == sigaltstack(&altstack, 0));
CHECK_EQ(0, sigaltstack(&altstack, 0));
if (flags()->verbosity > 0) {
Report("Alternative stack for T%d set: [%p,%p)\n",
asanThreadRegistry().GetCurrentTidOrInvalid(),
GetCurrentTidOrInvalid(),
altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size);
}
}
@ -80,7 +81,7 @@ void UnsetAlternateSignalStack() {
altstack.ss_sp = 0;
altstack.ss_flags = SS_DISABLE;
altstack.ss_size = 0;
CHECK(0 == sigaltstack(&altstack, &oldstack));
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
@ -100,7 +101,7 @@ static bool tsd_key_inited = false;
void AsanTSDInit(void (*destructor)(void *tsd)) {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK(0 == pthread_key_create(&tsd_key, destructor));
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
}
void *AsanTSDGet() {
@ -115,4 +116,4 @@ void AsanTSDSet(void *tsd) {
} // namespace __asan
#endif // __linux__ || __APPLE_
#endif // SANITIZER_LINUX || SANITIZER_MAC

View File

@ -16,9 +16,11 @@
// On Linux, we force __asan_init to be called before anyone else
// by placing it into .preinit_array section.
// FIXME: do we have anything like this on Mac?
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
__attribute__((section(".preinit_array"), used))
void (*__asan_preinit)(void) =__asan_init;
#elif defined(_WIN32) && defined(_DLL)
void (*__local_asan_preinit)(void) = __asan_init;
#elif SANITIZER_WINDOWS && defined(_DLL)
// On Windows, when using dynamic CRT (/MD), we can put a pointer
// to __asan_init into the global list of C initializers.
// See crt0dat.c in the CRT sources for the details.

View File

@ -15,8 +15,8 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@ -42,15 +42,6 @@ void AppendToErrorMessageBuffer(const char *buffer) {
}
// ---------------------- Decorator ------------------------------ {{{1
bool PrintsToTtyCached() {
static int cached = 0;
static bool prints_to_tty;
if (!cached) { // Ok wrt threads since we are printing only from one thread.
prints_to_tty = PrintsToTty();
cached = 1;
}
return prints_to_tty;
}
class Decorator: private __sanitizer::AnsiColorDecorator {
public:
Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
@ -111,7 +102,7 @@ static void PrintShadowBytes(const char *before, u8 *bytes,
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before = p == guilty ? "[" :
p - 1 == guilty ? "" : " ";
(p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
PrintShadowByte(before, *p, after);
}
@ -123,12 +114,12 @@ static void PrintLegend() {
"application bytes):\n", (int)SHADOW_GRANULARITY);
PrintShadowByte(" Addressable: ", 0);
Printf(" Partially addressable: ");
for (uptr i = 1; i < SHADOW_GRANULARITY; i++)
for (u8 i = 1; i < SHADOW_GRANULARITY; i++)
PrintShadowByte("", i, " ");
Printf("\n");
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
PrintShadowByte(" Heap righ redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed Heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic);
PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
@ -173,19 +164,34 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
}
}
static void DescribeThread(AsanThread *t) {
if (t)
DescribeThread(t->context());
}
// ---------------------- Address Descriptions ------------------- {{{1
static bool IsASCII(unsigned char c) {
return /*0x00 <= c &&*/ c <= 0x7F;
}
static const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
return (name[0] == '_' && name[1] == 'Z' && &getSymbolizer)
? getSymbolizer()->Demangle(name)
: name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
static void PrintGlobalNameIfASCII(const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
if (!IsASCII(*(unsigned char*)p)) return;
unsigned char c = *(unsigned char*)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char*)(g.beg + g.size - 1) != 0) return;
Printf(" '%s' is ascii string '%s'\n", g.name, (char*)g.beg);
if (*(char*)(g.beg + g.size - 1) != '\0') return;
Printf(" '%s' is ascii string '%s'\n",
MaybeDemangleGlobalName(g.name), (char*)g.beg);
}
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
@ -206,8 +212,8 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
// Can it happen?
Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg);
}
Printf(" of global variable '%s' (0x%zx) of size %zu\n",
g.name, g.beg, g.size);
Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
Printf("%s", d.EndLocation());
PrintGlobalNameIfASCII(g);
return true;
@ -234,57 +240,149 @@ bool DescribeAddressIfShadow(uptr addr) {
return false;
}
// Return " (thread_name) " or an empty string if the name is empty.
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len) {
const char *name = t->name;
if (name[0] == '\0') return "";
buff[0] = 0;
internal_strncat(buff, " (", 3);
internal_strncat(buff, name, buff_len - 4);
internal_strncat(buff, ")", 2);
return buff;
}
const char *ThreadNameWithParenthesis(u32 tid, char buff[],
uptr buff_len) {
if (tid == kInvalidTid) return "";
asanThreadRegistry().CheckLocked();
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
return ThreadNameWithParenthesis(t, buff, buff_len);
}
void PrintAccessAndVarIntersection(const char *var_name,
uptr var_beg, uptr var_size,
uptr addr, uptr access_size,
uptr prev_var_end, uptr next_var_beg) {
uptr var_end = var_beg + var_size;
uptr addr_end = addr + access_size;
const char *pos_descr = 0;
// If the variable [var_beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log.
if (addr >= var_beg) {
if (addr_end <= var_end)
pos_descr = "is inside"; // May happen if this is a use-after-return.
else if (addr < var_end)
pos_descr = "partially overflows";
else if (addr_end <= next_var_beg &&
next_var_beg - addr_end >= addr - var_end)
pos_descr = "overflows";
} else {
if (addr_end > var_beg)
pos_descr = "partially underflows";
else if (addr >= prev_var_end &&
addr - prev_var_end >= var_beg - addr_end)
pos_descr = "underflows";
}
Printf(" [%zd, %zd) '%s'", var_beg, var_beg + var_size, var_name);
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
Printf("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
} else {
Printf("\n");
}
}
struct StackVarDescr {
uptr beg;
uptr size;
const char *name_pos;
uptr name_len;
};
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
AsanThread *t = asanThreadRegistry().FindThreadByStackAddress(addr);
AsanThread *t = FindThreadByStackAddress(addr);
if (!t) return false;
const sptr kBufSize = 4095;
const uptr kBufSize = 4095;
char buf[kBufSize];
uptr offset = 0;
const char *frame_descr = t->GetFrameNameByAddr(addr, &offset);
uptr frame_pc = 0;
char tname[128];
const char *frame_descr = t->GetFrameNameByAddr(addr, &offset, &frame_pc);
#ifdef __powerpc64__
// On PowerPC64, the address of a function actually points to a
// three-doubleword data structure with the first field containing
// the address of the function's code.
frame_pc = *reinterpret_cast<uptr *>(frame_pc);
#endif
// This string is created by the compiler and has the following form:
// "FunctioName n alloc_1 alloc_2 ... alloc_n"
// "n alloc_1 alloc_2 ... alloc_n"
// where alloc_i looks like "offset size len ObjectName ".
CHECK(frame_descr);
// Report the function name and the offset.
const char *name_end = internal_strchr(frame_descr, ' ');
CHECK(name_end);
buf[0] = 0;
internal_strncat(buf, frame_descr,
Min(kBufSize,
static_cast<sptr>(name_end - frame_descr)));
Decorator d;
Printf("%s", d.Location());
Printf("Address %p is located at offset %zu "
"in frame <%s> of T%d's stack:\n",
(void*)addr, offset, Demangle(buf), t->tid());
Printf("Address %p is located in stack of thread T%d%s "
"at offset %zu in frame\n",
addr, t->tid(),
ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)),
offset);
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// The symbolizer may print more than one frame if inlining was involved.
// The frame numbers may be different than those in the stack trace printed
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
StackTrace alloca_stack;
alloca_stack.trace[0] = frame_pc + 16;
alloca_stack.size = 1;
Printf("%s", d.EndLocation());
PrintStack(&alloca_stack);
// Report the number of stack objects.
char *p;
uptr n_objects = internal_simple_strtoll(name_end, &p, 10);
CHECK(n_objects > 0);
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
CHECK_GT(n_objects, 0);
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
InternalScopedBuffer<StackVarDescr> vars(n_objects);
for (uptr i = 0; i < n_objects; i++) {
uptr beg, size;
sptr len;
beg = internal_simple_strtoll(p, &p, 10);
size = internal_simple_strtoll(p, &p, 10);
len = internal_simple_strtoll(p, &p, 10);
if (beg <= 0 || size <= 0 || len < 0 || *p != ' ') {
uptr len;
beg = (uptr)internal_simple_strtoll(p, &p, 10);
size = (uptr)internal_simple_strtoll(p, &p, 10);
len = (uptr)internal_simple_strtoll(p, &p, 10);
if (beg == 0 || size == 0 || *p != ' ') {
Printf("AddressSanitizer can't parse the stack frame "
"descriptor: |%s|\n", frame_descr);
break;
}
p++;
buf[0] = 0;
internal_strncat(buf, p, Min(kBufSize, len));
vars[i].beg = beg;
vars[i].size = size;
vars[i].name_pos = p;
vars[i].name_len = len;
p += len;
Printf(" [%zu, %zu) '%s'\n", beg, beg + size, buf);
}
for (uptr i = 0; i < n_objects; i++) {
buf[0] = 0;
internal_strncat(buf, vars[i].name_pos,
static_cast<uptr>(Min(kBufSize, vars[i].name_len)));
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
PrintAccessAndVarIntersection(buf, vars[i].beg, vars[i].size,
offset, access_size,
prev_var_end, next_var_beg);
}
Printf("HINT: this may be a false positive if your program uses "
"some custom stack unwind mechanism or swapcontext\n"
" (longjmp and C++ exceptions *are* supported)\n");
DescribeThread(t->summary());
DescribeThread(t);
return true;
}
@ -312,65 +410,43 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
Printf("%s", d.EndLocation());
}
// Return " (thread_name) " or an empty string if the name is empty.
const char *ThreadNameWithParenthesis(AsanThreadSummary *t, char buff[],
uptr buff_len) {
const char *name = t->name();
if (*name == 0) return "";
buff[0] = 0;
internal_strncat(buff, " (", 3);
internal_strncat(buff, name, buff_len - 4);
internal_strncat(buff, ")", 2);
return buff;
}
const char *ThreadNameWithParenthesis(u32 tid, char buff[],
uptr buff_len) {
if (tid == kInvalidTid) return "";
AsanThreadSummary *t = asanThreadRegistry().FindByTid(tid);
return ThreadNameWithParenthesis(t, buff, buff_len);
}
void DescribeHeapAddress(uptr addr, uptr access_size) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return;
DescribeAccessToHeapChunk(chunk, addr, access_size);
CHECK(chunk.AllocTid() != kInvalidTid);
AsanThreadSummary *alloc_thread =
asanThreadRegistry().FindByTid(chunk.AllocTid());
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread =
GetThreadContextByTidLocked(chunk.AllocTid());
StackTrace alloc_stack;
chunk.GetAllocStack(&alloc_stack);
AsanThread *t = asanThreadRegistry().GetCurrent();
CHECK(t);
char tname[128];
Decorator d;
AsanThreadContext *free_thread = 0;
if (chunk.FreeTid() != kInvalidTid) {
AsanThreadSummary *free_thread =
asanThreadRegistry().FindByTid(chunk.FreeTid());
free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
StackTrace free_stack;
chunk.GetFreeStack(&free_stack);
PrintStack(&free_stack);
Printf("%spreviously allocated by thread T%d%s here:%s\n",
d.Allocation(), alloc_thread->tid(),
d.Allocation(), alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
PrintStack(&alloc_stack);
DescribeThread(t->summary());
DescribeThread(free_thread);
DescribeThread(alloc_thread);
} else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
PrintStack(&alloc_stack);
DescribeThread(t->summary());
DescribeThread(alloc_thread);
}
PrintStack(&alloc_stack);
DescribeThread(GetCurrentThread());
if (free_thread)
DescribeThread(free_thread);
DescribeThread(alloc_thread);
}
void DescribeAddress(uptr addr, uptr access_size) {
@ -388,26 +464,27 @@ void DescribeAddress(uptr addr, uptr access_size) {
// ------------------- Thread description -------------------- {{{1
void DescribeThread(AsanThreadSummary *summary) {
CHECK(summary);
void DescribeThread(AsanThreadContext *context) {
CHECK(context);
asanThreadRegistry().CheckLocked();
// No need to announce the main thread.
if (summary->tid() == 0 || summary->announced()) {
if (context->tid == 0 || context->announced) {
return;
}
summary->set_announced(true);
context->announced = true;
char tname[128];
Printf("Thread T%d%s", summary->tid(),
ThreadNameWithParenthesis(summary->tid(), tname, sizeof(tname)));
Printf("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
Printf(" created by T%d%s here:\n",
summary->parent_tid(),
ThreadNameWithParenthesis(summary->parent_tid(),
context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid,
tname, sizeof(tname)));
PrintStack(summary->stack());
PrintStack(&context->stack);
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadSummary *parent_summary =
asanThreadRegistry().FindByTid(summary->parent_tid());
DescribeThread(parent_summary);
AsanThreadContext *parent_context =
GetThreadContextByTidLocked(context->parent_tid);
DescribeThread(parent_context);
}
}
@ -426,7 +503,7 @@ class ScopedInErrorReport {
// they are defined as no-return.
Report("AddressSanitizer: while reporting a bug found another one."
"Ignoring.\n");
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
u32 current_tid = GetCurrentTidOrInvalid();
if (current_tid != reporting_thread_tid) {
// ASan found two bugs in different threads simultaneously. Sleep
// long enough to make sure that the thread which started to print
@ -438,24 +515,20 @@ class ScopedInErrorReport {
internal__exit(flags()->exitcode);
}
ASAN_ON_ERROR();
reporting_thread_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
// recursive reports.
asanThreadRegistry().Lock();
CommonSanitizerReportMutex.Lock();
reporting_thread_tid = GetCurrentTidOrInvalid();
Printf("===================================================="
"=============\n");
if (reporting_thread_tid != kInvalidTid) {
// We started reporting an error message. Stop using the fake stack
// in case we call an instrumented function from a symbolizer.
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
CHECK(curr_thread);
curr_thread->fake_stack().StopUsingFakeStack();
}
}
// Destructor is NORETURN, as functions that report errors are.
NORETURN ~ScopedInErrorReport() {
// Make sure the current thread is announced.
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
if (curr_thread) {
DescribeThread(curr_thread->summary());
}
DescribeThread(GetCurrentThread());
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
@ -469,13 +542,15 @@ class ScopedInErrorReport {
static void ReportSummary(const char *error_type, StackTrace *stack) {
if (!stack->size) return;
if (IsSymbolizerAvailable()) {
if (&getSymbolizer && getSymbolizer()->IsAvailable()) {
AddressInfo ai;
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
SymbolizeCode(stack->trace[0], &ai, 1);
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
getSymbolizer()->SymbolizeCode(pc, &ai, 1);
ReportErrorSummary(error_type,
StripPathPrefix(ai.file, flags()->strip_path_prefix),
StripPathPrefix(ai.file,
common_flags()->strip_path_prefix),
ai.line, ai.function);
}
// FIXME: do we need to print anything at all if there is no symbolizer?
@ -488,7 +563,7 @@ void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
" (pc %p sp %p bp %p T%d)\n",
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
asanThreadRegistry().GetCurrentTidOrInvalid());
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
Printf("AddressSanitizer can not provide additional info.\n");
GET_STACK_TRACE_FATAL(pc, bp);
@ -500,7 +575,13 @@ void ReportDoubleFree(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting double-free on %p:\n", addr);
char tname[128];
u32 curr_tid = GetCurrentTidOrInvalid();
Report("ERROR: AddressSanitizer: attempting double-free on %p in "
"thread T%d%s:\n",
addr, curr_tid,
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
@ -511,8 +592,11 @@ void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
char tname[128];
u32 curr_tid = GetCurrentTidOrInvalid();
Report("ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p\n", addr);
"which was not malloc()-ed: %p in thread T%d%s\n", addr,
curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
PrintStack(stack);
DescribeHeapAddress(addr, 1);
@ -678,7 +762,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
bug_descr, (void*)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
u32 curr_tid = GetCurrentTidOrInvalid();
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n",
d.Access(),
@ -712,6 +796,6 @@ void __asan_describe_address(uptr addr) {
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
void __asan_on_error() {}
#endif

View File

@ -27,7 +27,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size);
// Determines memory type on its own.
void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadSummary *summary);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);

View File

@ -11,17 +11,21 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "lsan/lsan_common.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
namespace __asan {
@ -62,13 +66,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
}
// -------------------------- Flags ------------------------- {{{1
static const int kDeafultMallocContextSize = 30;
static const int kDefaultMallocContextSize = 30;
static Flags asan_flags;
Flags *flags() {
return &asan_flags;
}
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
@ -86,28 +86,32 @@ static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() {
}
static void ParseFlagsFromString(Flags *f, const char *str) {
ParseCommonFlagsFromString(str);
CHECK((uptr)common_flags()->malloc_context_size <= kStackTraceMax);
ParseFlag(str, &f->quarantine_size, "quarantine_size");
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->verbosity, "verbosity");
ParseFlag(str, &f->redzone, "redzone");
CHECK(f->redzone >= 16);
CHECK_GE(f->redzone, 16);
CHECK(IsPowerOfTwo(f->redzone));
ParseFlag(str, &f->debug, "debug");
ParseFlag(str, &f->report_globals, "report_globals");
ParseFlag(str, &f->check_initialization_order, "initialization_order");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
CHECK((uptr)f->malloc_context_size <= kStackTraceMax);
ParseFlag(str, &f->check_initialization_order, "check_initialization_order");
ParseFlag(str, &f->replace_str, "replace_str");
ParseFlag(str, &f->replace_intrin, "replace_intrin");
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free");
ParseFlag(str, &f->use_fake_stack, "use_fake_stack");
ParseFlag(str, &f->detect_stack_use_after_return,
"detect_stack_use_after_return");
ParseFlag(str, &f->uar_stack_size_log, "uar_stack_size_log");
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size");
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte");
ParseFlag(str, &f->exitcode, "exitcode");
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning");
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying");
ParseFlag(str, &f->handle_segv, "handle_segv");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack");
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size");
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit");
@ -116,37 +120,47 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->print_legend, "print_legend");
ParseFlag(str, &f->atexit, "atexit");
ParseFlag(str, &f->disable_core, "disable_core");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp");
ParseFlag(str, &f->strict_init_order, "strict_init_order");
}
void InitializeFlags(Flags *f, const char *env) {
internal_memset(f, 0, sizeof(*f));
CommonFlags *cf = common_flags();
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->malloc_context_size = kDefaultMallocContextSize;
cf->fast_unwind_on_fatal = false;
cf->fast_unwind_on_malloc = true;
cf->strip_path_prefix = "";
cf->handle_ioctl = false;
cf->log_path = 0;
cf->detect_leaks = false;
cf->leak_check_at_exit = true;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->symbolize = false;
f->verbosity = 0;
f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128;
f->redzone = 16;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = true;
f->malloc_context_size = kDeafultMallocContextSize;
f->check_initialization_order = false;
f->replace_str = true;
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->use_fake_stack = true;
f->max_malloc_fill_size = 0;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
f->uar_stack_size_log = 0;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
f->handle_segv = ASAN_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = false;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
@ -155,15 +169,15 @@ void InitializeFlags(Flags *f, const char *env) {
f->print_legend = true;
f->atexit = false;
f->disable_core = (SANITIZER_WORDSIZE == 64);
f->strip_path_prefix = "";
f->allow_reexec = true;
f->print_full_thread_history = true;
f->log_path = 0;
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
f->alloc_dealloc_mismatch = true;
f->use_stack_depot = true; // Only affects allocator2.
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->use_stack_depot = true;
f->strict_memcmp = true;
f->strict_init_order = false;
// Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton());
@ -177,6 +191,20 @@ void InitializeFlags(Flags *f, const char *env) {
// Override from command line.
ParseFlagsFromString(f, env);
#if !CAN_SANITIZE_LEAKS
if (cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
#endif
if (cf->detect_leaks && !f->use_stack_depot) {
Report("%s: detect_leaks is ignored (requires use_stack_depot).\n",
SanitizerToolName);
cf->detect_leaks = false;
}
}
// -------------------------- Globals --------------------- {{{1
@ -197,8 +225,8 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK((beg % GetPageSizeCached()) == 0);
CHECK(((end + 1) % GetPageSizeCached()) == 0);
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
@ -281,9 +309,7 @@ static NOINLINE void force_interface_symbols() {
case 25: __asan_poison_memory_region(0, 0); break;
case 26: __asan_unpoison_memory_region(0, 0); break;
case 27: __asan_set_error_exit_code(0); break;
case 28: __asan_stack_free(0, 0, 0); break;
case 29: __asan_stack_malloc(0, 0); break;
case 30: __asan_before_dynamic_init(0, 0); break;
case 30: __asan_before_dynamic_init(0); break;
case 31: __asan_after_dynamic_init(); break;
case 32: __asan_poison_stack_memory(0, 0); break;
case 33: __asan_unpoison_stack_memory(0, 0); break;
@ -304,22 +330,12 @@ static void asan_atexit() {
static void InitializeHighMemEnd() {
#if !ASAN_FIXED_MAPPING
#if SANITIZER_WORDSIZE == 64
# if defined(__powerpc64__)
// FIXME:
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure our which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
kHighMemEnd = (1ULL << 44) - 1; // 0x00000fffffffffffUL
# else
kHighMemEnd = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif
#else // SANITIZER_WORDSIZE == 32
kHighMemEnd = (1ULL << 32) - 1; // 0xffffffff;
#endif // SANITIZER_WORDSIZE
kHighMemEnd = GetMaxVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
}
static void ProtectGap(uptr a, uptr size) {
@ -361,7 +377,9 @@ static void PrintAddressSpaceLayout() {
}
Printf("\n");
Printf("red_zone=%zu\n", (uptr)flags()->redzone);
Printf("malloc_context_size=%zu\n", (uptr)flags()->malloc_context_size);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE);
Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY);
@ -380,7 +398,7 @@ using namespace __asan; // NOLINT
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif
@ -393,12 +411,28 @@ int NOINLINE __asan_set_error_exit_code(int exit_code) {
void NOINLINE __asan_handle_no_return() {
int local_stack;
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
AsanThread *curr_thread = GetCurrentThread();
CHECK(curr_thread);
uptr PageSize = GetPageSizeCached();
uptr top = curr_thread->stack_top();
uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
if (top - bottom > kMaxExpectedCleanupSize) {
static bool reported_warning = false;
if (reported_warning)
return;
reported_warning = true;
Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
"stack top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
"http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
top, bottom, top - bottom, top - bottom);
return;
}
PoisonShadow(bottom, top - bottom, 0);
if (curr_thread->has_fake_stack())
curr_thread->fake_stack()->HandleNoReturn();
}
void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
@ -424,7 +458,9 @@ void __asan_init() {
// initialization steps look at flags().
const char *options = GetEnv("ASAN_OPTIONS");
InitializeFlags(flags(), options);
__sanitizer_set_report_path(flags()->log_path);
__sanitizer_set_report_path(common_flags()->log_path);
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
if (flags()->verbosity && options) {
Report("Parsed ASAN_OPTIONS: %s\n", options);
@ -447,12 +483,12 @@ void __asan_init() {
ReplaceOperatorsNewAndDelete();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
uptr shadow_end = kHighShadowEnd;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, shadow_end);
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if ASAN_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
@ -476,7 +512,7 @@ void __asan_init() {
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, shadow_end)) {
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
@ -496,12 +532,16 @@ void __asan_init() {
}
InstallSignalHandlers();
AsanTSDInit(AsanThread::TSDDtor);
// Allocator should be initialized before starting external symbolizer, as
// fork() on Mac locks the allocator.
InitializeAllocator();
// Start symbolizer process if necessary.
if (flags()->symbolize) {
const char *external_symbolizer = GetEnv("ASAN_SYMBOLIZER_PATH");
if (external_symbolizer) {
InitializeExternalSymbolizer(external_symbolizer);
}
if (common_flags()->symbolize && &getSymbolizer) {
getSymbolizer()
->InitializeExternal(common_flags()->external_symbolizer_path);
}
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
@ -509,11 +549,24 @@ void __asan_init() {
asan_inited = 1;
asan_init_is_running = false;
asanThreadRegistry().Init();
asanThreadRegistry().GetMain()->ThreadStart();
InitTlsSize();
// Create main thread.
AsanThread *main_thread = AsanThread::Create(0, 0);
CreateThreadContextArgs create_main_args = { main_thread, 0 };
u32 main_tid = asanThreadRegistry().CreateThread(
0, true, 0, &create_main_args);
CHECK_EQ(0, main_tid);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
force_interface_symbols(); // no-op.
InitializeAllocator();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#endif // CAN_SANITIZE_LEAKS
if (flags()->verbosity) {
Report("AddressSanitizer Init done\n");

View File

@ -12,6 +12,7 @@
#include "asan_internal.h"
#include "asan_flags.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@ -22,8 +23,8 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
}
void PrintStack(StackTrace *stack) {
stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
flags()->strip_path_prefix, MaybeCallAsanSymbolize);
stack->PrintStack(stack->trace, stack->size, common_flags()->symbolize,
common_flags()->strip_path_prefix, MaybeCallAsanSymbolize);
}
} // namespace __asan
@ -33,8 +34,8 @@ void PrintStack(StackTrace *stack) {
// Provide default implementation of __asan_symbolize that does nothing
// and may be overriden by user if he wants to use his own symbolization.
// ASan on Windows has its own implementation of this.
#if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
return false;
}

View File

@ -12,12 +12,13 @@
#ifndef ASAN_STACK_H
#define ASAN_STACK_H
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "asan_flags.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __asan {
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
void PrintStack(StackTrace *stack);
} // namespace __asan
@ -25,10 +26,24 @@ void PrintStack(StackTrace *stack);
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused.
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, fast)
#if SANITIZER_WINDOWS
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
GetStackTrace(&stack, max_s, pc, bp, 0, 0, fast)
#else
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
{ \
AsanThread *t; \
stack.size = 0; \
if (asan_inited && (t = GetCurrentThread()) && !t->isUnwinding()) { \
uptr stack_top = t->stack_top(); \
uptr stack_bottom = t->stack_bottom(); \
ScopedUnwinding unwind_scope(t); \
GetStackTrace(&stack, max_s, pc, bp, stack_top, stack_bottom, fast); \
} \
}
#endif // SANITIZER_WINDOWS
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
@ -40,24 +55,24 @@ void PrintStack(StackTrace *stack);
#define GET_STACK_TRACE_FATAL(pc, bp) \
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
flags()->fast_unwind_on_fatal)
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_THREAD \
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(flags()->malloc_context_size, \
flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \
{ \
GET_STACK_TRACE(kStackTraceMax, \
flags()->fast_unwind_on_fatal); \
common_flags()->fast_unwind_on_fatal); \
PrintStack(&stack); \
}

View File

@ -12,13 +12,18 @@
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
AsanStats::AsanStats() {
CHECK(REAL(memset) != 0);
Clear();
}
void AsanStats::Clear() {
CHECK(REAL(memset));
REAL(memset)(this, 0, sizeof(AsanStats));
}
@ -51,11 +56,73 @@ void AsanStats::Print() {
malloc_large, malloc_small_slow);
}
void AsanStats::MergeFrom(const AsanStats *stats) {
uptr *dst_ptr = reinterpret_cast<uptr*>(this);
const uptr *src_ptr = reinterpret_cast<const uptr*>(stats);
uptr num_fields = sizeof(*this) / sizeof(uptr);
for (uptr i = 0; i < num_fields; i++)
dst_ptr[i] += src_ptr[i];
}
static BlockingMutex print_lock(LINKER_INITIALIZED);
static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
static AsanStats dead_threads_stats(LINKER_INITIALIZED);
static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread AsanStats.
static uptr max_malloced_memory;
static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg);
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
if (AsanThread *t = tctx->thread)
accumulated_stats->MergeFrom(&t->stats());
}
static void GetAccumulatedStats(AsanStats *stats) {
stats->Clear();
{
ThreadRegistryLock l(&asanThreadRegistry());
asanThreadRegistry()
.RunCallbackForEachThreadLocked(MergeThreadStats, stats);
}
stats->MergeFrom(&unknown_thread_stats);
{
BlockingMutexLock lock(&dead_threads_stats_lock);
stats->MergeFrom(&dead_threads_stats);
}
// This is not very accurate: we may miss allocation peaks that happen
// between two updates of accumulated_stats_. For more accurate bookkeeping
// the maximum should be updated on every malloc(), which is unacceptable.
if (max_malloced_memory < stats->malloced) {
max_malloced_memory = stats->malloced;
}
}
void FlushToDeadThreadStats(AsanStats *stats) {
BlockingMutexLock lock(&dead_threads_stats_lock);
dead_threads_stats.MergeFrom(stats);
stats->Clear();
}
void FillMallocStatistics(AsanMallocStats *malloc_stats) {
AsanStats stats;
GetAccumulatedStats(&stats);
malloc_stats->blocks_in_use = stats.mallocs;
malloc_stats->size_in_use = stats.malloced;
malloc_stats->max_size_in_use = max_malloced_memory;
malloc_stats->size_allocated = stats.mmaped;
}
AsanStats &GetCurrentThreadStats() {
AsanThread *t = GetCurrentThread();
return (t) ? t->stats() : unknown_thread_stats;
}
static void PrintAccumulatedStats() {
AsanStats stats;
asanThreadRegistry().GetAccumulatedStats(&stats);
GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
BlockingMutexLock lock(&print_lock);
stats.Print();
@ -71,15 +138,33 @@ static void PrintAccumulatedStats() {
using namespace __asan; // NOLINT
uptr __asan_get_current_allocated_bytes() {
return asanThreadRegistry().GetCurrentAllocatedBytes();
AsanStats stats;
GetAccumulatedStats(&stats);
uptr malloced = stats.malloced;
uptr freed = stats.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
return (malloced > freed) ? malloced - freed : 1;
}
uptr __asan_get_heap_size() {
return asanThreadRegistry().GetHeapSize();
AsanStats stats;
GetAccumulatedStats(&stats);
return stats.mmaped - stats.munmaped;
}
uptr __asan_get_free_bytes() {
return asanThreadRegistry().GetFreeBytes();
AsanStats stats;
GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped
- stats.munmaped
+ stats.really_freed
+ stats.really_freed_redzones;
uptr total_used = stats.malloced
+ stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
return (total_free > total_used) ? total_free - total_used : 1;
}
uptr __asan_get_unmapped_bytes() {

View File

@ -50,10 +50,17 @@ struct AsanStats {
// Default ctor for thread-local stats.
AsanStats();
// Prints formatted stats to stderr.
void Print();
void Print(); // Prints formatted stats to stderr.
void Clear();
void MergeFrom(const AsanStats *stats);
};
// Returns stats for GetCurrentThread(), or stats for fake "unknown thread"
// if GetCurrentThread() returns 0.
AsanStats &GetCurrentThreadStats();
// Flushes a given stats into accumulated stats of dead threads.
void FlushToDeadThreadStats(AsanStats *stats);
// A cross-platform equivalent of malloc_statistics_t on Mac OS.
struct AsanMallocStats {
uptr blocks_in_use;
@ -62,6 +69,8 @@ struct AsanMallocStats {
uptr size_allocated;
};
void FillMallocStatistics(AsanMallocStats *malloc_stats);
} // namespace __asan
#endif // ASAN_STATS_H

View File

@ -11,46 +11,82 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_interceptors.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "lsan/lsan_common.h"
namespace __asan {
AsanThread::AsanThread(LinkerInitialized x)
: fake_stack_(x),
malloc_storage_(x),
stats_(x) { }
// AsanThreadContext implementation.
AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine,
void *arg, StackTrace *stack) {
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack) {
internal_memcpy(&stack, args->stack, sizeof(stack));
}
thread = args->thread;
thread->set_context(this);
}
void AsanThreadContext::OnFinished() {
// Drop the link to the AsanThread object.
thread = 0;
}
// MIPS requires aligned address
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(AsanThreadContext), "AsanThreadContext");
return new(mem) AsanThreadContext(tid);
}
ThreadRegistry &asanThreadRegistry() {
static bool initialized;
// Don't worry about thread_safety - this should be called when there is
// a single thread.
if (!initialized) {
// Never reuse ASan threads: we store pointer to AsanThreadContext
// in TSD and can't reliably tell when no more TSD destructors will
// be called. It would be wrong to reuse AsanThreadContext for another
// thread before all TSD destructors will be called for it.
asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
initialized = true;
}
return *asan_thread_registry;
}
AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
return static_cast<AsanThreadContext *>(
asanThreadRegistry().GetThreadLocked(tid));
}
// AsanThread implementation.
AsanThread *AsanThread::Create(thread_callback_t start_routine,
void *arg) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
const uptr kSummaryAllocSize = PageSize;
CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize);
AsanThreadSummary *summary =
(AsanThreadSummary*)MmapOrDie(PageSize, "AsanThreadSummary");
summary->Init(parent_tid, stack);
summary->set_thread(thread);
thread->set_summary(summary);
thread->context_ = 0;
return thread;
}
void AsanThreadSummary::TSDDtor(void *tsd) {
AsanThreadSummary *summary = (AsanThreadSummary*)tsd;
if (flags()->verbosity >= 1) {
Report("T%d TSDDtor\n", summary->tid());
}
if (summary->thread()) {
summary->thread()->Destroy();
}
void AsanThread::TSDDtor(void *tsd) {
AsanThreadContext *context = (AsanThreadContext*)tsd;
if (flags()->verbosity >= 1)
Report("T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
}
void AsanThread::Destroy() {
@ -58,41 +94,68 @@ void AsanThread::Destroy() {
Report("T%d exited\n", tid());
}
asanThreadRegistry().UnregisterThread(this);
CHECK(summary()->thread() == 0);
asanThreadRegistry().FinishThread(tid());
FlushToDeadThreadStats(&stats_);
// We also clear the shadow on thread destruction because
// some code may still be executing in later TSD destructors
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStack();
fake_stack().Cleanup();
ClearShadowForThreadStackAndTLS();
DeleteFakeStack();
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
}
// We want to create the FakeStack lazyly on the first use, but not eralier
// than the stack size is known and the procedure has to be async-signal safe.
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
uptr stack_size = this->stack_size();
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
return 0;
uptr old_val = 0;
// fake_stack_ has 3 states:
// 0 -- not initialized
// 1 -- being initialized
// ptr -- initialized
// This CAS checks if the state was 0 and if so changes it to state 1,
// if that was successfull, it initilizes the pointer.
if (atomic_compare_exchange_strong(
reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
if (flags()->uar_stack_size_log)
stack_size_log = static_cast<uptr>(flags()->uar_stack_size_log);
fake_stack_ = FakeStack::Create(stack_size_log);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
}
return 0;
}
void AsanThread::Init() {
SetThreadStackTopAndBottom();
SetThreadStackAndTls();
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStack();
ClearShadowForThreadStackAndTLS();
if (flags()->verbosity >= 1) {
int local = 0;
Report("T%d: stack [%p,%p) size 0x%zx; local=%p\n",
tid(), (void*)stack_bottom_, (void*)stack_top_,
stack_top_ - stack_bottom_, &local);
}
fake_stack_.Init(stack_size());
fake_stack_ = 0; // Will be initialized lazily if needed.
AsanPlatformThreadInit();
}
thread_return_t AsanThread::ThreadStart() {
thread_return_t AsanThread::ThreadStart(uptr os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
if (flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
// OS X libdispatch worker threads. But nobody is supposed to call
// ThreadStart() for the worker threads.
CHECK(tid() == 0);
CHECK_EQ(tid(), 0);
return 0;
}
@ -105,24 +168,33 @@ thread_return_t AsanThread::ThreadStart() {
return res;
}
void AsanThread::SetThreadStackTopAndBottom() {
GetThreadStackTopAndBottom(tid() == 0, &stack_top_, &stack_bottom_);
void AsanThread::SetThreadStackAndTls() {
uptr tls_size = 0;
GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
&tls_size);
stack_top_ = stack_bottom_ + stack_size_;
tls_end_ = tls_begin_ + tls_size;
int local;
CHECK(AddrIsInStack((uptr)&local));
}
void AsanThread::ClearShadowForThreadStack() {
void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_)
PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0);
}
const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) {
const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset,
uptr *frame_pc) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else {
bottom = fake_stack().AddrIsInFakeStack(addr);
} else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr);
CHECK(bottom);
*offset = addr - bottom;
*frame_pc = ((uptr*)bottom)[2];
return (const char *)((uptr*)bottom)[1];
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
@ -147,7 +219,104 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) {
uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
CHECK(ptr[0] == kCurrentStackFrameMagic);
*offset = addr - (uptr)ptr;
*frame_pc = ptr[2];
return (const char*)ptr[1];
}
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
void *addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
AsanThread *t = tctx->thread;
if (!t) return false;
if (t->AddrIsInStack((uptr)addr)) return true;
if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
return true;
return false;
}
AsanThread *GetCurrentThread() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (!context) {
if (SANITIZER_ANDROID) {
// On Android, libc constructor is called _after_ asan_init, and cleans up
// TSD. Try to figure out if this is still the main thread by the stack
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
if (ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
}
return 0;
}
return context->thread;
}
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
if (flags()->verbosity >= 2) {
Report("SetCurrentThread: %p for thread %p\n",
t->context(), (void*)GetThreadSelf());
}
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
AsanTSDSet(t->context());
CHECK_EQ(t->context(), AsanTSDGet());
}
u32 GetCurrentTidOrInvalid() {
AsanThread *t = GetCurrentThread();
return t ? t->tid() : kInvalidTid;
}
AsanThread *FindThreadByStackAddress(uptr addr) {
asanThreadRegistry().CheckLocked();
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
(void *)addr));
return tctx ? tctx->thread : 0;
}
void EnsureMainThreadIDIsCorrect() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (context && (context->tid == 0))
context->os_id = GetTid();
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
__asan::AsanThread *t = context->thread;
if (!t) return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
*tls_begin = t->tls_begin();
*tls_end = t->tls_end();
// ASan doesn't keep allocator caches in TLS, so these are unused.
*cache_begin = 0;
*cache_end = 0;
return true;
}
void LockThreadRegistry() {
__asan::asanThreadRegistry().Lock();
}
void UnlockThreadRegistry() {
__asan::asanThreadRegistry().Unlock();
}
void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect();
}
} // namespace __lsan

View File

@ -14,99 +14,148 @@
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_fake_stack.h"
#include "asan_stack.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __asan {
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
const u32 kMaxNumberOfThreads = (1 << 22); // 4M
class AsanThread;
// These objects are created for every thread and are never deleted,
// so we can find them by tid even if the thread is long dead.
class AsanThreadSummary {
class AsanThreadContext : public ThreadContextBase {
public:
explicit AsanThreadSummary(LinkerInitialized) { } // for T0.
void Init(u32 parent_tid, StackTrace *stack) {
parent_tid_ = parent_tid;
announced_ = false;
tid_ = kInvalidTid;
if (stack) {
internal_memcpy(&stack_, stack, sizeof(*stack));
}
thread_ = 0;
name_[0] = 0;
explicit AsanThreadContext(int tid)
: ThreadContextBase(tid),
announced(false),
thread(0) {
internal_memset(&stack, 0, sizeof(stack));
}
u32 tid() { return tid_; }
void set_tid(u32 tid) { tid_ = tid; }
u32 parent_tid() { return parent_tid_; }
bool announced() { return announced_; }
void set_announced(bool announced) { announced_ = announced; }
StackTrace *stack() { return &stack_; }
AsanThread *thread() { return thread_; }
void set_thread(AsanThread *thread) { thread_ = thread; }
static void TSDDtor(void *tsd);
void set_name(const char *name) {
internal_strncpy(name_, name, sizeof(name_) - 1);
}
const char *name() { return name_; }
bool announced;
StackTrace stack;
AsanThread *thread;
private:
u32 tid_;
u32 parent_tid_;
bool announced_;
StackTrace stack_;
AsanThread *thread_;
char name_[128];
void OnCreated(void *arg);
void OnFinished();
};
// AsanThreadSummary objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094);
// AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 4096);
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
explicit AsanThread(LinkerInitialized); // for T0.
static AsanThread *Create(u32 parent_tid, thread_callback_t start_routine,
void *arg, StackTrace *stack);
static AsanThread *Create(thread_callback_t start_routine, void *arg);
static void TSDDtor(void *tsd);
void Destroy();
void Init(); // Should be called from the thread itself.
thread_return_t ThreadStart();
thread_return_t ThreadStart(uptr os_id);
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
uptr stack_size() { return stack_top_ - stack_bottom_; }
u32 tid() { return summary_->tid(); }
AsanThreadSummary *summary() { return summary_; }
void set_summary(AsanThreadSummary *summary) { summary_ = summary; }
uptr stack_size() { return stack_size_; }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
u32 tid() { return context_->tid; }
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
const char *GetFrameNameByAddr(uptr addr, uptr *offset);
const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc);
bool AddrIsInStack(uptr addr) {
return addr >= stack_bottom_ && addr < stack_top_;
}
FakeStack &fake_stack() { return fake_stack_; }
void DeleteFakeStack() {
if (!fake_stack_) return;
FakeStack *t = fake_stack_;
fake_stack_ = 0;
SetTLSFakeStack(0);
t->Destroy();
}
bool has_fake_stack() {
return (reinterpret_cast<uptr>(fake_stack_) > 1);
}
FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return)
return 0;
if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
}
// True is this thread is currently unwinding stack (i.e. collecting a stack
// trace). Used to prevent deadlocks on platforms where libc unwinder calls
// malloc internally. See PR17116 for more details.
bool isUnwinding() const { return unwinding; }
void setUnwinding(bool b) { unwinding = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
private:
void SetThreadStackTopAndBottom();
void ClearShadowForThreadStack();
AsanThreadSummary *summary_;
AsanThread() : unwinding(false) {}
void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
AsanThreadContext *context_;
thread_callback_t start_routine_;
void *arg_;
uptr stack_top_;
uptr stack_bottom_;
// stack_size_ == stack_top_ - stack_bottom_;
// It needs to be set in a async-signal-safe manner.
uptr stack_size_;
uptr tls_begin_;
uptr tls_end_;
FakeStack fake_stack_;
FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding;
};
// ScopedUnwinding is a scope for stacktracing member of a context
class ScopedUnwinding {
public:
explicit ScopedUnwinding(AsanThread *t) : thread(t) {
t->setUnwinding(true);
}
~ScopedUnwinding() { thread->setUnwinding(false); }
private:
AsanThread *thread;
};
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
// Must be called under ThreadRegistryLock.
AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
// Get the current thread. May return 0.
AsanThread *GetCurrentThread();
void SetCurrentThread(AsanThread *t);
u32 GetCurrentTidOrInvalid();
AsanThread *FindThreadByStackAddress(uptr addr);
// Used to handle fork().
void EnsureMainThreadIDIsCorrect();
} // namespace __asan
#endif // ASAN_THREAD_H

View File

@ -1,196 +0,0 @@
//===-- asan_thread_registry.cc -------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// AsanThreadRegistry-related code. AsanThreadRegistry is a container
// for summaries of all created threads.
//===----------------------------------------------------------------------===//
#include "asan_stack.h"
#include "asan_thread.h"
#include "asan_thread_registry.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
static AsanThreadRegistry asan_thread_registry(LINKER_INITIALIZED);
AsanThreadRegistry &asanThreadRegistry() {
return asan_thread_registry;
}
AsanThreadRegistry::AsanThreadRegistry(LinkerInitialized x)
: main_thread_(x),
main_thread_summary_(x),
accumulated_stats_(x),
max_malloced_memory_(x),
mu_(x) { }
void AsanThreadRegistry::Init() {
AsanTSDInit(AsanThreadSummary::TSDDtor);
main_thread_.set_summary(&main_thread_summary_);
main_thread_summary_.set_thread(&main_thread_);
RegisterThread(&main_thread_);
SetCurrent(&main_thread_);
// At this point only one thread exists.
inited_ = true;
}
void AsanThreadRegistry::RegisterThread(AsanThread *thread) {
BlockingMutexLock lock(&mu_);
u32 tid = n_threads_;
n_threads_++;
CHECK(n_threads_ < kMaxNumberOfThreads);
AsanThreadSummary *summary = thread->summary();
CHECK(summary != 0);
summary->set_tid(tid);
thread_summaries_[tid] = summary;
}
void AsanThreadRegistry::UnregisterThread(AsanThread *thread) {
BlockingMutexLock lock(&mu_);
FlushToAccumulatedStatsUnlocked(&thread->stats());
AsanThreadSummary *summary = thread->summary();
CHECK(summary);
summary->set_thread(0);
}
AsanThread *AsanThreadRegistry::GetMain() {
return &main_thread_;
}
AsanThread *AsanThreadRegistry::GetCurrent() {
AsanThreadSummary *summary = (AsanThreadSummary *)AsanTSDGet();
if (!summary) {
#if ASAN_ANDROID
// On Android, libc constructor is called _after_ asan_init, and cleans up
// TSD. Try to figure out if this is still the main thread by the stack
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread is
// the main thread.
AsanThread* thread = FindThreadByStackAddress((uptr)&summary);
if (thread && thread->tid() == 0) {
SetCurrent(thread);
return thread;
}
#endif
return 0;
}
return summary->thread();
}
void AsanThreadRegistry::SetCurrent(AsanThread *t) {
CHECK(t->summary());
if (flags()->verbosity >= 2) {
Report("SetCurrent: %p for thread %p\n",
t->summary(), (void*)GetThreadSelf());
}
// Make sure we do not reset the current AsanThread.
CHECK(AsanTSDGet() == 0);
AsanTSDSet(t->summary());
CHECK(AsanTSDGet() == t->summary());
}
AsanStats &AsanThreadRegistry::GetCurrentThreadStats() {
AsanThread *t = GetCurrent();
return (t) ? t->stats() : main_thread_.stats();
}
void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_));
}
uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr malloced = accumulated_stats_.malloced;
uptr freed = accumulated_stats_.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
return (malloced > freed) ? malloced - freed : 1;
}
uptr AsanThreadRegistry::GetHeapSize() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
}
uptr AsanThreadRegistry::GetFreeBytes() {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
uptr total_free = accumulated_stats_.mmaped
- accumulated_stats_.munmaped
+ accumulated_stats_.really_freed
+ accumulated_stats_.really_freed_redzones;
uptr total_used = accumulated_stats_.malloced
+ accumulated_stats_.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
return (total_free > total_used) ? total_free - total_used : 1;
}
// Return several stats counters with a single call to
// UpdateAccumulatedStatsUnlocked().
void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) {
BlockingMutexLock lock(&mu_);
UpdateAccumulatedStatsUnlocked();
malloc_stats->blocks_in_use = accumulated_stats_.mallocs;
malloc_stats->size_in_use = accumulated_stats_.malloced;
malloc_stats->max_size_in_use = max_malloced_memory_;
malloc_stats->size_allocated = accumulated_stats_.mmaped;
}
AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) {
CHECK(tid < n_threads_);
CHECK(thread_summaries_[tid]);
return thread_summaries_[tid];
}
AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) {
BlockingMutexLock lock(&mu_);
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (!t || !(t->fake_stack().StackSize())) continue;
if (t->fake_stack().AddrIsInFakeStack(addr) || t->AddrIsInStack(addr)) {
return t;
}
}
return 0;
}
void AsanThreadRegistry::UpdateAccumulatedStatsUnlocked() {
for (u32 tid = 0; tid < n_threads_; tid++) {
AsanThread *t = thread_summaries_[tid]->thread();
if (t != 0) {
FlushToAccumulatedStatsUnlocked(&t->stats());
}
}
// This is not very accurate: we may miss allocation peaks that happen
// between two updates of accumulated_stats_. For more accurate bookkeeping
// the maximum should be updated on every malloc(), which is unacceptable.
if (max_malloced_memory_ < accumulated_stats_.malloced) {
max_malloced_memory_ = accumulated_stats_.malloced;
}
}
void AsanThreadRegistry::FlushToAccumulatedStatsUnlocked(AsanStats *stats) {
// AsanStats consists of variables of type uptr only.
uptr *dst = (uptr*)&accumulated_stats_;
uptr *src = (uptr*)stats;
uptr num_fields = sizeof(AsanStats) / sizeof(uptr);
for (uptr i = 0; i < num_fields; i++) {
dst[i] += src[i];
src[i] = 0;
}
}
} // namespace __asan

View File

@ -1,83 +0,0 @@
//===-- asan_thread_registry.h ----------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_thread_registry.cc
//===----------------------------------------------------------------------===//
#ifndef ASAN_THREAD_REGISTRY_H
#define ASAN_THREAD_REGISTRY_H
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_mutex.h"
namespace __asan {
// Stores summaries of all created threads, returns current thread,
// thread by tid, thread by stack address. There is a single instance
// of AsanThreadRegistry for the whole program.
// AsanThreadRegistry is thread-safe.
class AsanThreadRegistry {
public:
explicit AsanThreadRegistry(LinkerInitialized);
void Init();
void RegisterThread(AsanThread *thread);
void UnregisterThread(AsanThread *thread);
AsanThread *GetMain();
// Get the current thread. May return 0.
AsanThread *GetCurrent();
void SetCurrent(AsanThread *t);
u32 GetCurrentTidOrInvalid() {
if (!inited_) return 0;
AsanThread *t = GetCurrent();
return t ? t->tid() : kInvalidTid;
}
// Returns stats for GetCurrent(), or stats for
// T0 if GetCurrent() returns 0.
AsanStats &GetCurrentThreadStats();
// Flushes all thread-local stats to accumulated stats, and makes
// a copy of accumulated stats.
void GetAccumulatedStats(AsanStats *stats);
uptr GetCurrentAllocatedBytes();
uptr GetHeapSize();
uptr GetFreeBytes();
void FillMallocStatistics(AsanMallocStats *malloc_stats);
AsanThreadSummary *FindByTid(u32 tid);
AsanThread *FindThreadByStackAddress(uptr addr);
private:
void UpdateAccumulatedStatsUnlocked();
// Adds values of all counters in "stats" to accumulated stats,
// and fills "stats" with zeroes.
void FlushToAccumulatedStatsUnlocked(AsanStats *stats);
static const u32 kMaxNumberOfThreads = (1 << 22); // 4M
AsanThreadSummary *thread_summaries_[kMaxNumberOfThreads];
AsanThread main_thread_;
AsanThreadSummary main_thread_summary_;
AsanStats accumulated_stats_;
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread AsanStats.
uptr max_malloced_memory_;
u32 n_threads_;
BlockingMutex mu_;
bool inited_;
};
// Returns a single instance of registry.
AsanThreadRegistry &asanThreadRegistry();
} // namespace __asan
#endif // ASAN_THREAD_REGISTRY_H

View File

@ -9,7 +9,9 @@
//
// Windows-specific details.
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include <windows.h>
#include <dbghelp.h>
@ -21,6 +23,14 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_should_detect_stack_use_after_return() {
__asan_init();
return __asan_option_detect_stack_use_after_return;
}
}
namespace __asan {
// ---------------------- Stacktraces, symbols, etc. ---------------- {{{1
@ -28,30 +38,6 @@ static BlockingMutex dbghelp_lock(LINKER_INITIALIZED);
static bool dbghelp_initialized = false;
#pragma comment(lib, "dbghelp.lib")
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
(void)fast;
stack->max_size = max_s;
void *tmp[kStackTraceMax];
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0);
uptr offset = 0;
// Skip the RTL frames by searching for the PC in the stacktrace.
// FIXME: this doesn't work well for the malloc/free stacks yet.
for (uptr i = 0; i < cs_ret; i++) {
if (pc != (uptr)tmp[i])
continue;
offset = i;
break;
}
stack->size = cs_ret - offset;
for (uptr i = 0; i < stack->size; i++)
stack->trace[i] = (uptr)tmp[i + offset];
}
// ---------------------- TSD ---------------- {{{1
static bool tsd_key_inited = false;

View File

@ -1,6 +1,6 @@
# This file is used to maintain libtool version info for libmudflap. See
# This file is used to maintain libtool version info for libasan. See
# the libtool manual to understand the meaning of the fields. This is
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
0:0:0
1:0:0

View File

@ -14549,7 +14549,7 @@ fi
ac_config_files="$ac_config_files Makefile"
ac_config_files="$ac_config_files interception/Makefile sanitizer_common/Makefile asan/Makefile ubsan/Makefile"
ac_config_files="$ac_config_files interception/Makefile sanitizer_common/Makefile lsan/Makefile asan/Makefile ubsan/Makefile"
if test "x$TSAN_SUPPORTED" = "xyes"; then
@ -15679,6 +15679,7 @@ do
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"interception/Makefile") CONFIG_FILES="$CONFIG_FILES interception/Makefile" ;;
"sanitizer_common/Makefile") CONFIG_FILES="$CONFIG_FILES sanitizer_common/Makefile" ;;
"lsan/Makefile") CONFIG_FILES="$CONFIG_FILES lsan/Makefile" ;;
"asan/Makefile") CONFIG_FILES="$CONFIG_FILES asan/Makefile" ;;
"ubsan/Makefile") CONFIG_FILES="$CONFIG_FILES ubsan/Makefile" ;;
"tsan/Makefile") CONFIG_FILES="$CONFIG_FILES tsan/Makefile" ;;
@ -17026,6 +17027,17 @@ _EOF
;;
"sanitizer_common/Makefile":F) cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF
sed -f vpsed$$ $ac_file > tmp$$
mv tmp$$ $ac_file
rm vpsed$$
echo 'MULTISUBDIR =' >> $ac_file
ml_norecursion=yes
. ${multi_basedir}/config-ml.in
{ ml_norecursion=; unset ml_norecursion;}
;;
"lsan/Makefile":F) cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF
sed -f vpsed$$ $ac_file > tmp$$
mv tmp$$ $ac_file

View File

@ -89,7 +89,7 @@ AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE)
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES(AC_FOREACH([DIR], [interception sanitizer_common asan ubsan], [DIR/Makefile ]),
AC_CONFIG_FILES(AC_FOREACH([DIR], [interception sanitizer_common lsan asan ubsan], [DIR/Makefile ]),
[cat > vpsed$$ << \_EOF
s!`test -f '$<' || echo '$(srcdir)/'`!!
_EOF

View File

@ -39,6 +39,16 @@ extern "C" {
// the error message. This function can be overridden by the client.
void __sanitizer_report_error_summary(const char *error_summary);
// Some of the sanitizers (e.g. asan/tsan) may miss bugs that happen
// in unaligned loads/stores. In order to find such bugs reliably one needs
// to replace plain unaligned loads/stores with these calls.
uint16_t __sanitizer_unaligned_load16(const void *p);
uint32_t __sanitizer_unaligned_load32(const void *p);
uint64_t __sanitizer_unaligned_load64(const void *p);
void __sanitizer_unaligned_store16(void *p, uint16_t x);
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -0,0 +1,85 @@
//===-- dfsan_interface.h -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of DataFlowSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef DFSAN_INTERFACE_H
#define DFSAN_INTERFACE_H
#include <stddef.h>
#include <stdint.h>
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint16_t dfsan_label;
/// Stores information associated with a specific label identifier. A label
/// may be a base label created using dfsan_create_label, with associated
/// text description and user data, or an automatically created union label,
/// which represents the union of two label identifiers (which may themselves
/// be base or union labels).
struct dfsan_label_info {
// Fields for union labels, set to 0 for base labels.
dfsan_label l1;
dfsan_label l2;
// Fields for base labels.
const char *desc;
void *userdata;
};
/// Computes the union of \c l1 and \c l2, possibly creating a union label in
/// the process.
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
/// Creates and returns a base label with the given description and user data.
dfsan_label dfsan_create_label(const char *desc, void *userdata);
/// Sets the label for each address in [addr,addr+size) to \c label.
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
/// Sets the label for each address in [addr,addr+size) to the union of the
/// current label for that address and \c label.
void dfsan_add_label(dfsan_label label, void *addr, size_t size);
/// Retrieves the label associated with the given data.
///
/// The type of 'data' is arbitrary. The function accepts a value of any type,
/// which can be truncated or extended (implicitly or explicitly) as necessary.
/// The truncation/extension operations will preserve the label of the original
/// value.
dfsan_label dfsan_get_label(long data);
/// Retrieves the label associated with the data at the given address.
dfsan_label dfsan_read_label(const void *addr, size_t size);
/// Retrieves a pointer to the dfsan_label_info struct for the given label.
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
/// Returns whether the given label label contains the label elem.
int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// If the given label label contains a label with the description desc, returns
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
#ifdef __cplusplus
} // extern "C"
template <typename T>
void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
dfsan_set_label(label, (void *)&data, sizeof(T));
}
#endif
#endif // DFSAN_INTERFACE_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,50 @@
//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LSAN_INTERFACE_H
#define SANITIZER_LSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// Allocations made between calls to __lsan_disable() and __lsan_enable() will
// be treated as non-leaks. Disable/enable pairs may be nested.
void __lsan_disable();
void __lsan_enable();
// The heap object into which p points will be treated as a non-leak.
void __lsan_ignore_object(const void *p);
// The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
int __lsan_is_turned_off();
// Calling this function makes LSan enter the leak checking phase immediately.
// Use this if normal end-of-process leak checking happens too late (e.g. if
// you have intentional memory leaks in your shutdown code). Calling this
// function overrides end-of-process leak checking; it must be called at
// most once per process. This function will terminate the process if there
// are memory leaks and the exit_code flag is non-zero.
void __lsan_do_leak_check();
#ifdef __cplusplus
} // extern "C"
namespace __lsan {
class ScopedDisabler {
public:
ScopedDisabler() { __lsan_disable(); }
~ScopedDisabler() { __lsan_enable(); }
};
} // namespace __lsan
#endif
#endif // SANITIZER_LSAN_INTERFACE_H

View File

@ -0,0 +1,160 @@
//===-- msan_interface.h --------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef MSAN_INTERFACE_H
#define MSAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
#if __has_feature(memory_sanitizer)
/* Returns a string describing a stack origin.
Return NULL if the origin is invalid, or is not a stack origin. */
const char *__msan_get_origin_descr_if_stack(uint32_t id);
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
/* Get raw origin for an address. */
uint32_t __msan_get_origin(const volatile void *a);
/* Returns non-zero if tracking origins. */
int __msan_get_track_origins();
/* Returns the origin id of the latest UMR in the calling thread. */
uint32_t __msan_get_umr_origin();
/* Make memory region fully initialized (without changing its contents). */
void __msan_unpoison(const volatile void *a, size_t size);
/* Make memory region fully uninitialized (without changing its contents). */
void __msan_poison(const volatile void *a, size_t size);
/* Make memory region partially uninitialized (without changing its contents).
*/
void __msan_partial_poison(const volatile void *data, void *shadow,
size_t size);
/* Returns the offset of the first (at least partially) poisoned byte in the
memory range, or -1 if the whole range is good. */
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
/* For testing:
__msan_set_expect_umr(1);
... some buggy code ...
__msan_set_expect_umr(0);
The last line will verify that a UMR happened. */
void __msan_set_expect_umr(int expect_umr);
/* Change the value of keep_going flag. Non-zero value means don't terminate
program execution when an error is detected. This will not affect error in
modules that were compiled without the corresponding compiler flag. */
void __msan_set_keep_going(int keep_going);
/* Print shadow and origin for the memory range to stdout in a human-readable
format. */
void __msan_print_shadow(const volatile void *x, size_t size);
/* Print current function arguments shadow and origin to stdout in a
human-readable format. */
void __msan_print_param_shadow();
/* Returns true if running under a dynamic tool (DynamoRio-based). */
int __msan_has_dynamic_component();
/* Tell MSan about newly allocated memory (ex.: custom allocator).
Memory will be marked uninitialized, with origin at the call site. */
void __msan_allocated_memory(const volatile void* data, size_t size);
/* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
/***********************************/
/* Allocator statistics interface. */
/* Returns the estimated number of bytes that will be reserved by allocator
for request of "size" bytes. If Msan allocator can't allocate that much
memory, returns the maximal possible allocation size, otherwise returns
"size". */
size_t __msan_get_estimated_allocated_size(size_t size);
/* Returns true if p was returned by the Msan allocator and
is not yet freed. */
int __msan_get_ownership(const volatile void *p);
/* Returns the number of bytes reserved for the pointer p.
Requires (get_ownership(p) == true) or (p == 0). */
size_t __msan_get_allocated_size(const volatile void *p);
/* Number of bytes, allocated and not yet freed by the application. */
size_t __msan_get_current_allocated_bytes();
/* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
Generally, for request of X bytes, allocator can reserve and add to free
lists a large number of chunks of size X to use them for future requests.
All these chunks count toward the heap size. Currently, allocator never
releases memory to OS (instead, it just puts freed chunks to free
lists). */
size_t __msan_get_heap_size();
/* Number of bytes, mmaped by msan allocator, which can be used to fulfill
allocation requests. When a user program frees memory chunk, it can first
fall into quarantine and will count toward __msan_get_free_bytes()
later. */
size_t __msan_get_free_bytes();
/* Number of bytes in unmapped pages, that are released to OS. Currently,
always returns 0. */
size_t __msan_get_unmapped_bytes();
/* Malloc hooks that may be optionally provided by user.
__msan_malloc_hook(ptr, size) is called immediately after
allocation of "size" bytes, which returned "ptr".
__msan_free_hook(ptr) is called immediately before
deallocation of "ptr". */
void __msan_malloc_hook(const volatile void *ptr, size_t size);
void __msan_free_hook(const volatile void *ptr);
#else // __has_feature(memory_sanitizer)
#define __msan_get_origin_descr_if_stack(id) ((const char*)0)
#define __msan_set_origin(a, size, origin)
#define __msan_get_origin(a) ((uint32_t)-1)
#define __msan_get_track_origins() (0)
#define __msan_get_umr_origin() ((uint32_t)-1)
#define __msan_unpoison(a, size)
#define __msan_poison(a, size)
#define __msan_partial_poison(data, shadow, size)
#define __msan_test_shadow(x, size) ((intptr_t)-1)
#define __msan_set_exit_code(exit_code)
#define __msan_set_expect_umr(expect_umr)
#define __msan_print_shadow(x, size)
#define __msan_print_param_shadow()
#define __msan_has_dynamic_component() (0)
#define __msan_allocated_memory(data, size)
#endif // __has_feature(memory_sanitizer)
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -21,27 +21,20 @@
// These typedefs should be used only in the interceptor definitions to replace
// the standard system types (e.g. SSIZE_T instead of ssize_t)
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
// Mac and Linux/x86-64 are special.
#if defined(__APPLE__) || (defined(__linux__) && defined(__x86_64__))
typedef __sanitizer::u64 OFF_T;
#else
typedef __sanitizer::uptr OFF_T;
#endif
typedef __sanitizer::u64 OFF64_T;
typedef __sanitizer::uptr SIZE_T;
typedef __sanitizer::sptr SSIZE_T;
typedef __sanitizer::sptr PTRDIFF_T;
typedef __sanitizer::s64 INTMAX_T;
typedef __sanitizer::OFF_T OFF_T;
typedef __sanitizer::OFF64_T OFF64_T;
// How to add an interceptor:
// Suppose you need to wrap/replace system function (generally, from libc):
// int foo(const char *bar, double baz);
// You'll need to:
// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
// your source file.
// your source file. See the notes below for cases when
// INTERCEPTOR_WITH_SUFFIX(...) should be used instead.
// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
// intercepted successfully.
@ -55,15 +48,20 @@ typedef __sanitizer::u64 OFF64_T;
// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
// to a header file.
// Notes: 1. Things may not work properly if macro INTERCEPT(...) {...} or
// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or
// DECLARE_REAL(...) are located inside namespaces.
// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo);" to
// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to
// effectively redirect calls from "foo" to "zoo". In this case
// you aren't required to implement
// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
// but instead you'll have to add
// DEFINE_REAL(int, foo, const char *bar, double baz) in your
// DECLARE_REAL(int, foo, const char *bar, double baz) in your
// source file (to define a pointer to overriden function).
// 3. Some Mac functions have symbol variants discriminated by
// additional suffixes, e.g. _$UNIX2003 (see
// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
// How it works:
// To replace system functions on Linux we just need to declare functions
@ -73,6 +71,7 @@ typedef __sanitizer::u64 OFF64_T;
// we intercept. To resolve this we declare our interceptors with __interceptor_
// prefix, and then make actual interceptors weak aliases to __interceptor_
// functions.
//
// This is not so on Mac OS, where the two-level namespace makes
// our replacement functions invisible to other libraries. This may be overcomed
// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
@ -82,12 +81,43 @@ typedef __sanitizer::u64 OFF64_T;
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
// the calls to interposed functions done through stubs to the wrapper
// functions.
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
#if defined(__APPLE__)
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers.
struct interpose_substitution {
const uptr replacement;
const uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
{ reinterpret_cast<const uptr>(wrapper_name), \
reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
# define WRAPPER_NAME(x) "wrap_"#x
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32)
# if defined(_DLL) // DLL CRT
# define WRAP(x) x
@ -98,7 +128,10 @@ typedef __sanitizer::u64 OFF64_T;
# define WRAPPER_NAME(x) "wrap_"#x
# define INTERCEPTOR_ATTRIBUTE
# endif
# define DECLARE_WRAPPER(ret_type, func, ...)
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
#else
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
@ -142,6 +175,7 @@ typedef __sanitizer::u64 OFF64_T;
# define DEFINE_REAL(ret_type, func, ...)
#endif
#if !defined(__APPLE__)
#define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
@ -149,13 +183,36 @@ typedef __sanitizer::u64 OFF64_T;
INTERCEPTOR_ATTRIBUTE \
ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__)
#else // __APPLE__
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) suffix; \
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)
// Override |overridee| with |overrider|.
#define OVERRIDE_FUNCTION(overridee, overrider) \
INTERPOSER_2(overridee, WRAP(overrider))
#endif
#if defined(_WIN32)
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \
} \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \
extern "C" \
INTERCEPTOR_ATTRIBUTE \
ret_type __stdcall WRAP(func)(__VA_ARGS__)
@ -181,8 +238,6 @@ typedef unsigned long uptr; // NOLINT
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func)
#elif defined(__APPLE__)
# include "interception_mac.h"
# define OVERRIDE_FUNCTION(old_func, new_func) \
OVERRIDE_FUNCTION_MAC(old_func, new_func)
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
#else // defined(_WIN32)
# include "interception_win.h"

View File

@ -13,7 +13,6 @@
#ifdef __linux__
#include "interception.h"
#include <stddef.h> // for NULL
#include <dlfcn.h> // for dlsym
namespace __interception {
@ -22,6 +21,13 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
*func_addr = (uptr)dlsym(RTLD_NEXT, func_name);
return real == wrapper;
}
#if !defined(__ANDROID__) // android does not have dlvsym
void *GetFuncAddrVer(const char *func_name, const char *ver) {
return dlvsym(RTLD_NEXT, func_name, ver);
}
#endif // !defined(__ANDROID__)
} // namespace __interception

View File

@ -23,6 +23,7 @@ namespace __interception {
// returns true if a function with the given name was found.
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
uptr real, uptr wrapper);
void *GetFuncAddrVer(const char *func_name, const char *ver);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX(func) \
@ -31,5 +32,11 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
(::__interception::uptr)&(func), \
(::__interception::uptr)&WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym
#define INTERCEPT_FUNCTION_VER(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \
::__interception::GetFuncAddrVer(#func, #symver)
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
#endif // __linux__

View File

@ -0,0 +1,60 @@
AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
sanitizer_lsan_files = \
lsan_common.cc \
lsan_common_linux.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
# friends when we are called from the top level Makefile.
AM_MAKEFLAGS = \
"AR_FLAGS=$(AR_FLAGS)" \
"CC_FOR_BUILD=$(CC_FOR_BUILD)" \
"CFLAGS=$(CFLAGS)" \
"CXXFLAGS=$(CXXFLAGS)" \
"CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
"CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
"INSTALL=$(INSTALL)" \
"INSTALL_DATA=$(INSTALL_DATA)" \
"INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
"INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
"JC1FLAGS=$(JC1FLAGS)" \
"LDFLAGS=$(LDFLAGS)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
"MAKE=$(MAKE)" \
"MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
"PICFLAG=$(PICFLAG)" \
"PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
"SHELL=$(SHELL)" \
"RUNTESTFLAGS=$(RUNTESTFLAGS)" \
"exec_prefix=$(exec_prefix)" \
"infodir=$(infodir)" \
"libdir=$(libdir)" \
"prefix=$(prefix)" \
"includedir=$(includedir)" \
"AR=$(AR)" \
"AS=$(AS)" \
"LD=$(LD)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"NM=$(NM)" \
"PICFLAG=$(PICFLAG)" \
"RANLIB=$(RANLIB)" \
"DESTDIR=$(DESTDIR)"
MAKEOVERRIDES=
## ################################################################

View File

@ -0,0 +1,514 @@
# Makefile.in generated by automake 1.11.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
# Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
subdir = lsan
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
$(top_srcdir)/../config/depstand.m4 \
$(top_srcdir)/../config/lead-dot.m4 \
$(top_srcdir)/../config/libstdc++-raw-cxx.m4 \
$(top_srcdir)/../config/multi.m4 \
$(top_srcdir)/../config/override.m4 \
$(top_srcdir)/../ltoptions.m4 $(top_srcdir)/../ltsugar.m4 \
$(top_srcdir)/../ltversion.m4 $(top_srcdir)/../lt~obsolete.m4 \
$(top_srcdir)/acinclude.m4 $(top_srcdir)/../libtool.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_lsan_la_LIBADD =
am__objects_1 = lsan_common.lo lsan_common_linux.lo
am_libsanitizer_lsan_la_OBJECTS = $(am__objects_1)
libsanitizer_lsan_la_OBJECTS = $(am_libsanitizer_lsan_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@
depcomp = $(SHELL) $(top_srcdir)/../depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
SOURCES = $(libsanitizer_lsan_la_SOURCES)
ETAGS = etags
CTAGS = ctags
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCAS = @CCAS@
CCASDEPMODE = @CCASDEPMODE@
CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBSTDCXX_RAW_CXX_CXXFLAGS = @LIBSTDCXX_RAW_CXX_CXXFLAGS@
LIBSTDCXX_RAW_CXX_LDFLAGS = @LIBSTDCXX_RAW_CXX_LDFLAGS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
enable_shared = @enable_shared@
enable_static = @enable_static@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
multi_basedir = @multi_basedir@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
target_cpu = @target_cpu@
target_noncanonical = @target_noncanonical@
target_os = @target_os@
target_vendor = @target_vendor@
toolexecdir = @toolexecdir@
toolexeclibdir = @toolexeclibdir@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
# May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-Wno-long-long -fPIC -fno-builtin -fno-exceptions \
-fomit-frame-pointer -funwind-tables -fvisibility=hidden \
-Wno-variadic-macros $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_lsan.la
sanitizer_lsan_files = \
lsan_common.cc \
lsan_common_linux.cc
libsanitizer_lsan_la_SOURCES = $(sanitizer_lsan_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
# friends when we are called from the top level Makefile.
AM_MAKEFLAGS = \
"AR_FLAGS=$(AR_FLAGS)" \
"CC_FOR_BUILD=$(CC_FOR_BUILD)" \
"CFLAGS=$(CFLAGS)" \
"CXXFLAGS=$(CXXFLAGS)" \
"CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \
"CFLAGS_FOR_TARGET=$(CFLAGS_FOR_TARGET)" \
"INSTALL=$(INSTALL)" \
"INSTALL_DATA=$(INSTALL_DATA)" \
"INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
"INSTALL_SCRIPT=$(INSTALL_SCRIPT)" \
"JC1FLAGS=$(JC1FLAGS)" \
"LDFLAGS=$(LDFLAGS)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"LIBCFLAGS_FOR_TARGET=$(LIBCFLAGS_FOR_TARGET)" \
"MAKE=$(MAKE)" \
"MAKEINFO=$(MAKEINFO) $(MAKEINFOFLAGS)" \
"PICFLAG=$(PICFLAG)" \
"PICFLAG_FOR_TARGET=$(PICFLAG_FOR_TARGET)" \
"SHELL=$(SHELL)" \
"RUNTESTFLAGS=$(RUNTESTFLAGS)" \
"exec_prefix=$(exec_prefix)" \
"infodir=$(infodir)" \
"libdir=$(libdir)" \
"prefix=$(prefix)" \
"includedir=$(includedir)" \
"AR=$(AR)" \
"AS=$(AS)" \
"LD=$(LD)" \
"LIBCFLAGS=$(LIBCFLAGS)" \
"NM=$(NM)" \
"PICFLAG=$(PICFLAG)" \
"RANLIB=$(RANLIB)" \
"DESTDIR=$(DESTDIR)"
MAKEOVERRIDES =
all: all-am
.SUFFIXES:
.SUFFIXES: .cc .lo .o .obj
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign lsan/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign lsan/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
clean-noinstLTLIBRARIES:
-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
test "$$dir" != "$$p" || dir=.; \
echo "rm -f \"$${dir}/so_locations\""; \
rm -f "$${dir}/so_locations"; \
done
libsanitizer_lsan.la: $(libsanitizer_lsan_la_OBJECTS) $(libsanitizer_lsan_la_DEPENDENCIES)
$(CXXLINK) $(libsanitizer_lsan_la_OBJECTS) $(libsanitizer_lsan_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $<
.cc.obj:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
.cc.lo:
@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
mkid -fID $$unique
tags: TAGS
TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
set x; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: CTAGS
CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES)
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
mostlyclean-am
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am:
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am:
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
clean-libtool clean-noinstLTLIBRARIES ctags distclean \
distclean-compile distclean-generic distclean-libtool \
distclean-tags dvi dvi-am html html-am info info-am install \
install-am install-data install-data-am install-dvi \
install-dvi-am install-exec install-exec-am install-html \
install-html-am install-info install-info-am install-man \
install-pdf install-pdf-am install-ps install-ps-am \
install-strip installcheck installcheck-am installdirs \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
pdf pdf-am ps ps-am tags uninstall uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -0,0 +1,6 @@
# This file is used to maintain libtool version info for libmudflap. See
# the libtool manual to understand the meaning of the fields. This is
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
0:0:0

63
libsanitizer/lsan/lsan.cc Normal file
View File

@ -0,0 +1,63 @@
//=-- lsan.cc -------------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
namespace __lsan {
static void InitializeCommonFlags() {
CommonFlags *cf = common_flags();
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->symbolize = true;
cf->strip_path_prefix = "";
cf->fast_unwind_on_malloc = true;
cf->malloc_context_size = 30;
cf->detect_leaks = true;
cf->leak_check_at_exit = true;
ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS"));
}
void Init() {
static bool inited;
if (inited)
return;
inited = true;
SanitizerToolName = "LeakSanitizer";
InitializeCommonFlags();
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
u32 tid = ThreadCreate(0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
// Start symbolizer process if necessary.
if (common_flags()->symbolize) {
getSymbolizer()
->InitializeExternal(common_flags()->external_symbolizer_path);
}
InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
}
} // namespace __lsan

21
libsanitizer/lsan/lsan.h Normal file
View File

@ -0,0 +1,21 @@
//=-- lsan.h --------------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private header for standalone LSan RTL.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __lsan {
void Init();
void InitializeInterceptors();
} // namespace __lsan

View File

@ -0,0 +1,191 @@
//=-- lsan_allocator.cc ---------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_allocator.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_common.h"
namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
struct ChunkMetadata {
bool allocated : 8; // Must be first.
ChunkTag tag : 2;
uptr requested_size : 54;
u32 stack_trace_id;
};
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.Init();
}
void AllocatorThreadFinish() {
allocator.SwallowCache(&cache);
}
static ChunkMetadata *Metadata(void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared) {
if (size == 0)
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return 0;
}
void *p = allocator.Allocate(&cache, size, alignment, cleared);
RegisterAllocation(stack, p, size);
return p;
}
void Deallocate(void *p) {
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(&cache, p);
return 0;
}
p = allocator.Reallocate(&cache, p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*begin = (uptr)&cache;
*end = *begin + sizeof(cache);
}
uptr GetMallocUsableSize(void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
}
///// Interface to the common LSan module. /////
void LockAllocator() {
allocator.ForceLock();
}
void UnlockAllocator() {
allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&allocator;
*end = *begin + sizeof(allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
if (addr < chunk) return 0;
ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
if (m->allocated && addr < chunk + m->requested_size)
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
return chunk;
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
}
bool LsanMetadata::allocated() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
}
ChunkTag LsanMetadata::tag() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
}
void LsanMetadata::set_tag(ChunkTag value) {
reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
}
uptr LsanMetadata::requested_size() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
}
u32 LsanMetadata::stack_trace_id() const {
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
ChunkMetadata *m = Metadata(chunk);
CHECK(m);
if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
if (m->tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->tag = kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan

View File

@ -0,0 +1,37 @@
//=-- lsan_allocator.h ----------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Allocator for standalone LSan.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __lsan {
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
bool cleared);
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
uptr GetMallocUsableSize(void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H

View File

@ -0,0 +1,577 @@
//=-- lsan_common.cc ------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Implementation of common leak checking functionality.
//
//===----------------------------------------------------------------------===//
#include "lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#if CAN_SANITIZE_LEAKS
namespace __lsan {
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
Flags lsan_flags;
static void InitializeFlags() {
Flags *f = flags();
// Default values.
f->report_objects = false;
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
f->use_unaligned = false;
f->verbosity = 0;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
ParseFlag(options, &f->use_registers, "use_registers");
ParseFlag(options, &f->use_globals, "use_globals");
ParseFlag(options, &f->use_stacks, "use_stacks");
ParseFlag(options, &f->use_tls, "use_tls");
ParseFlag(options, &f->use_unaligned, "use_unaligned");
ParseFlag(options, &f->report_objects, "report_objects");
ParseFlag(options, &f->resolution, "resolution");
CHECK_GE(&f->resolution, 0);
ParseFlag(options, &f->max_leaks, "max_leaks");
CHECK_GE(&f->max_leaks, 0);
ParseFlag(options, &f->verbosity, "verbosity");
ParseFlag(options, &f->log_pointers, "log_pointers");
ParseFlag(options, &f->log_threads, "log_threads");
ParseFlag(options, &f->exitcode, "exitcode");
ParseFlag(options, &f->suppressions, "suppressions");
}
}
SuppressionContext *suppression_ctx;
void InitializeSuppressions() {
CHECK(!suppression_ctx);
ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
suppression_ctx = new(placeholder_) SuppressionContext;
char *suppressions_from_file;
uptr buffer_size;
if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
&buffer_size, 1 << 26 /* max_len */))
suppression_ctx->Parse(suppressions_from_file);
if (flags()->suppressions[0] && !buffer_size) {
Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
flags()->suppressions);
Die();
}
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
}
void InitCommonLsan() {
InitializeFlags();
InitializeSuppressions();
InitializePlatformSpecificModules();
}
class Decorator: private __sanitizer::AnsiColorDecorator {
public:
Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
const char *End() { return Default(); }
};
static inline bool CanBeAHeapPointer(uptr p) {
// Since our heap is located in mmap-ed memory, we can assume a sensible lower
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false;
#ifdef __x86_64__
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
#else
return true;
#endif
}
// Scans the memory range, looking for byte patterns that point into allocator
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
// There are two usage modes for this function: finding reachable or ignored
// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
const uptr alignment = flags()->pointer_alignment();
if (flags()->log_pointers)
Report("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
void *p = *reinterpret_cast<void **>(pp);
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
uptr chunk = PointsIntoChunk(p);
if (!chunk) continue;
LsanMetadata m(chunk);
// Reachable beats ignored beats leaked.
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
m.set_tag(tag);
if (flags()->log_pointers)
Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
}
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end = registers_begin + registers.size();
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
&cache_begin, &cache_end);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
if (flags()->log_threads)
Report("Thread %d not found in registry.\n", os_id);
continue;
}
uptr sp;
bool have_registers =
(suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
if (!have_registers) {
Report("Unable to get registers from thread %d.\n");
// If unable to get SP, consider the entire stack to be reachable.
sp = stack_begin;
}
if (flags()->use_registers && have_registers)
ScanRangeForPointers(registers_begin, registers_end, frontier,
"REGISTERS", kReachable);
if (flags()->use_stacks) {
if (flags()->log_threads)
Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack). Again, consider the entire stack
// range to be reachable.
if (flags()->log_threads)
Report("WARNING: stack pointer not in stack range.\n");
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
}
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable);
}
if (flags()->use_tls) {
if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
if (cache_begin == cache_end) {
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
} else {
// Because LSan should not be loaded with dlopen(), we can assume
// that allocator cache will be part of static TLS image.
CHECK_LE(tls_begin, cache_begin);
CHECK_GE(tls_end, cache_end);
if (tls_begin < cache_begin)
ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
kReachable);
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
}
}
}
}
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
while (frontier->size()) {
uptr next_chunk = frontier->back();
frontier->pop_back();
LsanMetadata m(next_chunk);
ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
"HEAP", tag);
}
}
// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
// which are reachable from it as indirectly leaked.
static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
ScanRangeForPointers(chunk, chunk + m.requested_size(),
/* frontier */ 0, "HEAP", kIndirectlyLeaked);
}
}
// ForEachChunk callback. If chunk is marked as ignored, adds its address to
// frontier.
static void CollectIgnoredCb(uptr chunk, void *arg) {
CHECK(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() == kIgnored)
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
}
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
Frontier frontier(GetPageSizeCached());
if (flags()->use_globals)
ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
if (flags()->log_pointers)
Report("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
if (flags()->log_pointers)
Report("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
static void PrintStackTraceById(u32 stack_trace_id) {
CHECK(stack_trace_id);
uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size);
StackTrace::PrintStack(trace, size, common_flags()->symbolize,
common_flags()->strip_path_prefix, 0);
}
// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
if (resolution > 0) {
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
} else {
leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
}
}
}
// ForEachChunkCallback. Prints addresses of unreachable chunks.
static void PrintLeakedCb(uptr chunk, void *arg) {
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Printf("%s leaked %zu byte object at %p.\n",
m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
m.requested_size(), chunk);
}
}
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
suppression_ctx->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
Printf("%s\n", line);
Printf("Suppressions used:\n");
Printf(" count bytes template\n");
for (uptr i = 0; i < matched.size(); i++)
Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
matched[i]->weight, matched[i]->templ);
Printf("%s\n\n", line);
}
static void PrintLeaked() {
Printf("\n");
Printf("Reporting individual objects:\n");
ForEachChunk(PrintLeakedCb, 0 /* arg */);
}
struct DoLeakCheckParam {
bool success;
LeakReport leak_report;
};
static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
CHECK(param);
CHECK(!param->success);
CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
if (!param->leak_report.IsEmpty() && flags()->report_objects)
PrintLeaked();
param->success = true;
}
void DoLeakCheck() {
EnsureMainThreadIDIsCorrect();
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off())
return;
DoLeakCheckParam param;
param.success = false;
LockThreadRegistry();
LockAllocator();
StopTheWorld(DoLeakCheckCallback, &param);
UnlockAllocator();
UnlockThreadRegistry();
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
Die();
}
uptr have_unsuppressed = param.leak_report.ApplySuppressions();
if (have_unsuppressed) {
Decorator d;
Printf("\n"
"================================================================="
"\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.End());
param.leak_report.PrintLargest(flags()->max_leaks);
}
if (have_unsuppressed || (flags()->verbosity >= 1)) {
PrintMatchedSuppressions();
param.leak_report.PrintSummary();
}
if (have_unsuppressed && flags()->exitcode)
internal__exit(flags()->exitcode);
}
static Suppression *GetSuppressionForAddr(uptr addr) {
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
uptr addr_frames_num =
getSymbolizer()->SymbolizeCode(addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
Suppression* s;
if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
return s;
}
return 0;
}
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
uptr size = 0;
const uptr *trace = StackDepotGet(stack_trace_id, &size);
for (uptr i = 0; i < size; i++) {
Suppression *s =
GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
if (s) return s;
}
return 0;
}
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
// in LeakReport::Add(). We don't expect to ever see this many leaks in
// real-world applications.
// FIXME: Get rid of this limit by changing the implementation of LeakReport to
// use a hash table.
const uptr kMaxLeaksConsidered = 5000;
void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
bool is_directly_leaked = (tag == kDirectlyLeaked);
for (uptr i = 0; i < leaks_.size(); i++)
if (leaks_[i].stack_trace_id == stack_trace_id &&
leaks_[i].is_directly_leaked == is_directly_leaked) {
leaks_[i].hit_count++;
leaks_[i].total_size += leaked_size;
return;
}
if (leaks_.size() == kMaxLeaksConsidered) return;
Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
is_directly_leaked, /* is_suppressed */ false };
leaks_.push_back(leak);
}
static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
if (leak1.is_directly_leaked == leak2.is_directly_leaked)
return leak1.total_size > leak2.total_size;
else
return leak1.is_directly_leaked;
}
void LeakReport::PrintLargest(uptr num_leaks_to_print) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
Printf("Too many leaks! Only the first %zu leaks encountered will be "
"reported.\n",
kMaxLeaksConsidered);
uptr unsuppressed_count = 0;
for (uptr i = 0; i < leaks_.size(); i++)
if (!leaks_[i].is_suppressed) unsuppressed_count++;
if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
Printf("The %zu largest leak(s):\n", num_leaks_to_print);
InternalSort(&leaks_, leaks_.size(), LeakComparator);
uptr leaks_printed = 0;
Decorator d;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
Printf("%s", d.Leak());
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
leaks_[i].total_size, leaks_[i].hit_count);
Printf("%s", d.End());
PrintStackTraceById(leaks_[i].stack_trace_id);
Printf("\n");
leaks_printed++;
if (leaks_printed == num_leaks_to_print) break;
}
if (leaks_printed < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_printed;
Printf("Omitting %zu more leak(s).\n", remaining);
}
}
void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
if (leaks_[i].is_suppressed) continue;
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
const int kMaxSummaryLength = 128;
InternalScopedBuffer<char> summary(kMaxSummaryLength);
internal_snprintf(summary.data(), kMaxSummaryLength,
"LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).",
bytes, allocations);
__sanitizer_report_error_summary(summary.data());
}
uptr LeakReport::ApplySuppressions() {
uptr unsuppressed_count = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count;
leaks_[i].is_suppressed = true;
} else {
unsuppressed_count++;
}
}
return unsuppressed_count;
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS
using namespace __lsan; // NOLINT
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_ignore_object(const void *p) {
#if CAN_SANITIZE_LEAKS
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
// locked.
BlockingMutexLock l(&global_mutex);
IgnoreObjectResult res = IgnoreObjectLocked(p);
if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
Report("__lsan_ignore_object(): no heap object found at %p", p);
if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
Report("__lsan_ignore_object(): "
"heap object at %p is already being ignored\n", p);
if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_disable() {
#if CAN_SANITIZE_LEAKS
__lsan::disable_counter++;
#endif
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_enable() {
#if CAN_SANITIZE_LEAKS
if (!__lsan::disable_counter) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
__lsan::disable_counter--;
#endif
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
__lsan::DoLeakCheck();
#endif // CAN_SANITIZE_LEAKS
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
}
#endif
} // extern "C"

View File

@ -0,0 +1,174 @@
//=-- lsan_common.h -------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Private LSan header.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_COMMON_H
#define LSAN_COMMON_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
namespace __lsan {
// Chunk tags.
enum ChunkTag {
kDirectlyLeaked = 0, // default
kIndirectlyLeaked = 1,
kReachable = 2,
kIgnored = 3
};
struct Flags {
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
// Print addresses of leaked objects after main leak report.
bool report_objects;
// Aggregate two objects into one leak if this many stack frames match. If
// zero, the entire stack trace must match.
int resolution;
// The number of leaks reported.
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Suppressions file name.
const char* suppressions;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
bool use_globals;
// Thread stacks.
bool use_stacks;
// Thread registers.
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Consider unaligned pointers valid.
bool use_unaligned;
// User-visible verbosity.
int verbosity;
// Debug logging.
bool log_pointers;
bool log_threads;
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
struct Leak {
uptr hit_count;
uptr total_size;
u32 stack_trace_id;
bool is_directly_leaked;
bool is_suppressed;
};
// Aggregates leaks by stack trace prefix.
class LeakReport {
public:
LeakReport() : leaks_(1) {}
void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag);
void PrintLargest(uptr max_leaks);
void PrintSummary();
bool IsEmpty() { return leaks_.size() == 0; }
uptr ApplySuppressions();
private:
InternalMmapVector<Leak> leaks_;
};
typedef InternalMmapVector<uptr> Frontier;
// Platform-specific functions.
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
kIgnoreObjectAlreadyIgnored,
kIgnoreObjectInvalid
};
// Functions called from the parent tool.
void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
// The following must be implemented in the parent tool.
void ForEachChunk(ForEachChunkCallback callback, void *arg);
// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
void UnlockAllocator();
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
// exec(), which invalidates the recorded TID. To update it, we must call
// gettid() from the main thread. Our solution is to call this function before
// leak checking and also before every call to pthread_create() (to handle cases
// where leak checking is initiated from a non-main thread).
void EnsureMainThreadIDIsCorrect();
// If p points into a chunk that has been allocated to the user, returns its
// user-visible address. Otherwise, returns 0.
uptr PointsIntoChunk(void *p);
// Returns address of user-visible chunk contained in this allocator chunk.
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
// Constructor accepts address of user-visible chunk.
explicit LsanMetadata(uptr chunk);
bool allocated() const;
ChunkTag tag() const;
void set_tag(ChunkTag value);
uptr requested_size() const;
u32 stack_trace_id() const;
private:
void *metadata_;
};
} // namespace __lsan
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions();
} // extern "C"
#endif // LSAN_COMMON_H

View File

@ -0,0 +1,139 @@
//=-- lsan_common_linux.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Implementation of common leak checking functionality. Linux-specific code.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#include "lsan_common.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
#include <link.h>
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __lsan {
static const char kLinkerName[] = "ld";
// We request 2 modules matching "ld", so we can print a warning if there's more
// than one match. But only the first one is actually used.
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = 0;
static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
}
void InitializePlatformSpecificModules() {
internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
uptr num_matches = GetListOfModules(
reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
if (num_matches == 1) {
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
return;
}
if (num_matches == 0)
Report("LeakSanitizer: Dynamic linker not found. "
"TLS will not be handled correctly.\n");
else if (num_matches > 1)
Report("LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName);
linker = 0;
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
void *data) {
Frontier *frontier = reinterpret_cast<Frontier *>(data);
for (uptr j = 0; j < info->dlpi_phnum; j++) {
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
// We're looking for .data and .bss sections, which reside in writeable,
// loadable segments.
if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
(phdr->p_memsz == 0))
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
uptr allocator_begin = 0, allocator_end = 0;
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
CHECK_LT(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
if (allocator_end < end)
ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
kReachable);
} else {
ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
}
}
return 0;
}
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
// suspending threads.
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
CHECK(stack_id);
uptr size = 0;
const uptr *trace = map->Get(stack_id, &size);
// The top frame is our malloc/calloc/etc. The next frame is the caller.
if (size >= 2)
return trace[1];
return 0;
}
struct ProcessPlatformAllocParam {
Frontier *frontier;
StackDepotReverseMap *stack_depot_reverse_map;
};
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
// reachable. Marks them as reachable and adds them to the frontier.
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
CHECK(arg);
ProcessPlatformAllocParam *param =
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0;
if (stack_id > 0)
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
// it as reachable, as we can't properly report its allocation stack anyway.
if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
m.set_tag(kReachable);
param->frontier->push_back(chunk);
}
}
}
// Handles dynamically allocated TLS blocks by treating all chunks allocated
// from ld-linux.so as reachable.
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
StackDepotReverseMap stack_depot_reverse_map;
ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX

View File

@ -0,0 +1,279 @@
//=-- lsan_interceptors.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Interceptors for standalone LSan.
//
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_thread.h"
using namespace __lsan;
extern "C" {
int pthread_attr_init(void *attr);
int pthread_attr_destroy(void *attr);
int pthread_attr_getdetachstate(void *attr, int *v);
int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
#define GET_STACK_TRACE \
StackTrace stack; \
{ \
uptr stack_top = 0, stack_bottom = 0; \
ThreadContext *t; \
bool fast = common_flags()->fast_unwind_on_malloc; \
if (fast && (t = CurrentThreadContext())) { \
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \
}
///// Malloc/free interceptors. /////
const bool kAlwaysClearMemory = true;
namespace std {
struct nothrow_t;
}
INTERCEPTOR(void*, malloc, uptr size) {
Init();
GET_STACK_TRACE;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
INTERCEPTOR(void, free, void *p) {
Init();
Deallocate(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
Init();
GET_STACK_TRACE;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
Init();
GET_STACK_TRACE;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
Init();
GET_STACK_TRACE;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
Init();
GET_STACK_TRACE;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
}
INTERCEPTOR(void*, valloc, uptr size) {
Init();
GET_STACK_TRACE;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
Init();
return GetMallocUsableSize(ptr);
}
struct fake_mallinfo {
int x[10];
};
INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
struct fake_mallinfo res;
internal_memset(&res, 0, sizeof(res));
return res;
}
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
INTERCEPTOR(void*, pvalloc, uptr size) {
Init();
GET_STACK_TRACE;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
INTERCEPTOR(void, cfree, void *p) ALIAS("free");
#define OPERATOR_NEW_BODY \
Init(); \
GET_STACK_TRACE; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
void *operator new(uptr size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
INTERCEPTOR_ATTRIBUTE
void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
Init(); \
Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
// We need this to intercept the __libc_memalign calls that are used to
// allocate dynamic TLS space in ld-linux.so.
INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s) ALIAS("memalign");
///// Thread initialization and finalization. /////
static unsigned g_thread_finalize_key;
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
return;
}
ThreadFinish();
}
struct ThreadParam {
void *(*callback)(void *arg);
void *param;
atomic_uintptr_t tid;
};
// PTHREAD_DESTRUCTOR_ITERATIONS from glibc.
const uptr kPthreadDestructorIterations = 4;
extern "C" void *__lsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
void *param = p->param;
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
if (pthread_setspecific(g_thread_finalize_key,
(void*)kPthreadDestructorIterations)) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
atomic_store(&p->tid, 0, memory_order_release);
SetCurrentThread(tid);
ThreadStart(tid, GetTid());
return callback(param);
}
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
Init();
EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr;
if (attr == 0) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSizeLinux(attr, 0);
int detached = 0;
pthread_attr_getdetachstate(attr, &detached);
ThreadParam p;
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
return res;
}
INTERCEPTOR(int, pthread_join, void *th, void **ret) {
Init();
int tid = ThreadTid((uptr)th);
int res = REAL(pthread_join)(th, ret);
if (res == 0)
ThreadJoin(tid);
return res;
}
namespace __lsan {
void InitializeInterceptors() {
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
INTERCEPT_FUNCTION(cfree);
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
INTERCEPT_FUNCTION(memalign);
INTERCEPT_FUNCTION(posix_memalign);
INTERCEPT_FUNCTION(__libc_memalign);
INTERCEPT_FUNCTION(valloc);
INTERCEPT_FUNCTION(pvalloc);
INTERCEPT_FUNCTION(malloc_usable_size);
INTERCEPT_FUNCTION(mallinfo);
INTERCEPT_FUNCTION(mallopt);
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
Die();
}
}
} // namespace __lsan

View File

@ -0,0 +1,154 @@
//=-- lsan_thread.cc ------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// See lsan_thread.h for details.
//
//===----------------------------------------------------------------------===//
#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "lsan_allocator.h"
namespace __lsan {
const u32 kInvalidTid = (u32) -1;
static ThreadRegistry *thread_registry;
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
return new(mem) ThreadContext(tid);
}
static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
u32 GetCurrentThread() {
return current_thread_tid;
}
void SetCurrentThread(u32 tid) {
current_thread_tid = tid;
}
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
stack_end_(0),
cache_begin_(0),
cache_end_(0),
tls_begin_(0),
tls_end_(0) {}
struct OnStartedArgs {
uptr stack_begin, stack_end,
cache_begin, cache_end,
tls_begin, tls_end;
};
void ThreadContext::OnStarted(void *arg) {
OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg);
stack_begin_ = args->stack_begin;
stack_end_ = args->stack_end;
tls_begin_ = args->tls_begin;
tls_end_ = args->tls_end;
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
}
void ThreadContext::OnFinished() {
AllocatorThreadFinish();
}
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
return thread_registry->CreateThread(user_id, detached, parent_tid,
/* arg */ 0);
}
void ThreadStart(u32 tid, uptr os_id) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
thread_registry->StartThread(tid, os_id, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
}
ThreadContext *CurrentThreadContext() {
if (!thread_registry) return 0;
if (GetCurrentThread() == kInvalidTid)
return 0;
// No lock needed when getting current thread.
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
}
static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
uptr uid = (uptr)arg;
if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
return true;
}
return false;
}
u32 ThreadTid(uptr uid) {
return thread_registry->FindThread(FindThreadByUid, (void*)uid);
}
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */0);
}
void EnsureMainThreadIDIsCorrect() {
if (GetCurrentThread() == 0)
CurrentThreadContext()->os_id = GetTid();
}
///// Interface to the common LSan module. /////
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end) {
ThreadContext *context = static_cast<ThreadContext *>(
thread_registry->FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
*stack_begin = context->stack_begin();
*stack_end = context->stack_end();
*tls_begin = context->tls_begin();
*tls_end = context->tls_end();
*cache_begin = context->cache_begin();
*cache_end = context->cache_end();
return true;
}
void LockThreadRegistry() {
thread_registry->Lock();
}
void UnlockThreadRegistry() {
thread_registry->Unlock();
}
} // namespace __lsan

View File

@ -0,0 +1,51 @@
//=-- lsan_thread.h -------------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of LeakSanitizer.
// Thread registry for standalone LSan.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_THREAD_H
#define LSAN_THREAD_H
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __lsan {
class ThreadContext : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
void OnStarted(void *arg);
void OnFinished();
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
uptr cache_begin() { return cache_begin_; }
uptr cache_end() { return cache_end_; }
private:
uptr stack_begin_, stack_end_,
cache_begin_, cache_end_,
tls_begin_, tls_end_;
};
void InitializeThreadRegistry();
void ThreadStart(u32 tid, uptr os_id);
void ThreadFinish();
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
void ThreadJoin(u32 tid);
u32 ThreadTid(uptr uid);
u32 GetCurrentThread();
void SetCurrentThread(u32 tid);
ThreadContext *CurrentThreadContext();
void EnsureMainThreadIDIsCorrect();
} // namespace __lsan
#endif // LSAN_THREAD_H

View File

@ -66,6 +66,7 @@ CUR_REV=$(get_current_rev)
echo Current upstream revision: $CUR_REV
merge include/sanitizer include/sanitizer
merge lib/asan asan
merge lib/lsan lsan
merge lib/tsan/rtl tsan
merge lib/sanitizer_common sanitizer_common
merge lib/interception interception

View File

@ -11,23 +11,27 @@ ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_linux.cc \
sanitizer_mac.cc \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \
sanitizer_win.cc
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)

View File

@ -56,12 +56,17 @@ CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
sanitizer_mac.lo sanitizer_platform_limits_posix.lo \
sanitizer_posix.lo sanitizer_printf.lo sanitizer_stackdepot.lo \
sanitizer_stacktrace.lo sanitizer_symbolizer.lo \
sanitizer_symbolizer_itanium.lo sanitizer_symbolizer_linux.lo \
sanitizer_symbolizer_mac.lo sanitizer_symbolizer_win.lo \
sanitizer_common_libcdep.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
@ -223,23 +228,27 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_linux.cc \
sanitizer_mac.cc \
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
sanitizer_flags.cc \
sanitizer_libc.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \
sanitizer_win.cc
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@ -336,20 +345,24 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_itanium.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:

View File

@ -7,44 +7,103 @@
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// This allocator that is used inside run-times.
// This allocator is used inside run-times.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
// FIXME: We should probably use more low-level allocator that would
// mmap some pages and split them into chunks to fulfill requests.
#if defined(__linux__) && !defined(__ANDROID__)
extern "C" void *__libc_malloc(__sanitizer::uptr size);
extern "C" void __libc_free(void *ptr);
# define LIBC_MALLOC __libc_malloc
# define LIBC_FREE __libc_free
#else // __linux__ && !ANDROID
# include <stdlib.h>
# define LIBC_MALLOC malloc
# define LIBC_FREE free
#endif // __linux__ && !ANDROID
#include "sanitizer_flags.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_GO)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
extern "C" void __libc_free(void *ptr);
# define LIBC_MALLOC __libc_malloc
# define LIBC_FREE __libc_free
# else
# include <stdlib.h>
# define LIBC_MALLOC malloc
# define LIBC_FREE free
# endif
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
(void)cache;
return LIBC_MALLOC(size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
LIBC_FREE(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
#else // SANITIZER_GO
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
static InternalAllocatorCache internal_allocator_cache;
static StaticSpinMutex internal_allocator_cache_mu;
InternalAllocator *internal_allocator() {
InternalAllocator *internal_allocator_instance =
reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
internal_allocator_instance->Init();
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
return internal_allocator_instance;
}
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
false);
}
return internal_allocator()->Allocate(cache, size, 8, false);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
}
internal_allocator()->Deallocate(cache, ptr);
}
#endif // SANITIZER_GO
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
void *InternalAlloc(uptr size) {
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
if (size + sizeof(u64) < size)
return 0;
void *p = LIBC_MALLOC(size + sizeof(u64));
void *p = RawInternalAlloc(size + sizeof(u64), cache);
if (p == 0)
return 0;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
void InternalFree(void *addr) {
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (addr == 0)
return;
addr = (char*)addr - sizeof(u64);
CHECK_EQ(((u64*)addr)[0], kBlockMagic);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
((u64*)addr)[0] = 0;
LIBC_FREE(addr);
RawInternalFree(addr, cache);
}
// LowLevelAllocator
@ -79,4 +138,14 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
void *AllocatorReturnNull() {
if (common_flags()->allocator_may_return_null)
return 0;
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
return 0;
}
} // namespace __sanitizer

View File

@ -21,18 +21,21 @@
namespace __sanitizer {
// Depending on allocator_may_return_null either return 0 or crash.
void *AllocatorReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
// Next 8 classes: 256 + i * 32 (i = 1 to 8).
// Next 8 classes: 512 + i * 64 (i = 1 to 8).
// Next 4 classes: 256 + i * 64 (i = 1 to 4).
// Next 4 classes: 512 + i * 128 (i = 1 to 4).
// ...
// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
//
// This structure of the size class map gives us:
// - Efficient table-free class-to-size and size-to-class functions.
// - Difference between two consequent size classes is betweed 12% and 6%
// - Difference between two consequent size classes is betweed 14% and 25%
//
// This class also gives a hint to a thread-caching allocator about the amount
// of chunks that need to be cached per-thread:
@ -59,46 +62,51 @@ namespace __sanitizer {
// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
//
// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
// c17 => s: 288 diff: +32 12% l 8 cached: 227 65376; id 17
// c18 => s: 320 diff: +32 11% l 8 cached: 204 65280; id 18
// c19 => s: 352 diff: +32 10% l 8 cached: 186 65472; id 19
// c20 => s: 384 diff: +32 09% l 8 cached: 170 65280; id 20
// c21 => s: 416 diff: +32 08% l 8 cached: 157 65312; id 21
// c22 => s: 448 diff: +32 07% l 8 cached: 146 65408; id 22
// c23 => s: 480 diff: +32 07% l 8 cached: 136 65280; id 23
// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
//
// c24 => s: 512 diff: +32 06% l 9 cached: 128 65536; id 24
// c25 => s: 576 diff: +64 12% l 9 cached: 113 65088; id 25
// c26 => s: 640 diff: +64 11% l 9 cached: 102 65280; id 26
// c27 => s: 704 diff: +64 10% l 9 cached: 93 65472; id 27
// c28 => s: 768 diff: +64 09% l 9 cached: 85 65280; id 28
// c29 => s: 832 diff: +64 08% l 9 cached: 78 64896; id 29
// c30 => s: 896 diff: +64 07% l 9 cached: 73 65408; id 30
// c31 => s: 960 diff: +64 07% l 9 cached: 68 65280; id 31
// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
//
// c32 => s: 1024 diff: +64 06% l 10 cached: 64 65536; id 32
// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
//
// ...
//
// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
//
// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
uptr kMinBatchClassT>
template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
class SizeClassMap {
static const uptr kMinSizeLog = 4;
static const uptr kMidSizeLog = kMinSizeLog + 4;
static const uptr kMinSize = 1 << kMinSizeLog;
static const uptr kMidSize = 1 << kMidSizeLog;
static const uptr kMidClass = kMidSize / kMinSize;
static const uptr S = 3;
static const uptr S = 2;
static const uptr M = (1 << S) - 1;
public:
static const uptr kMaxNumCached = kMaxNumCachedT;
// We transfer chunks between central and thread-local free lists in batches.
// For small size classes we allocate batches separately.
// For large size classes we use one of the chunks to store the batch.
struct TransferBatch {
TransferBatch *next;
uptr count;
void *batch[kMaxNumCached];
};
static const uptr kMinBatchClass = kMinBatchClassT;
static const uptr kMaxSize = 1 << kMaxSizeLog;
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
@ -141,7 +149,7 @@ class SizeClassMap {
Printf("\n");
uptr d = s - prev_s;
uptr p = prev_s ? (d * 100 / prev_s) : 0;
uptr l = MostSignificantSetBitIndex(s);
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
uptr cached = MaxCached(i) * s;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
@ -152,10 +160,16 @@ class SizeClassMap {
Printf("Total cached: %zd\n", total_cached);
}
static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
return Size(class_id) < sizeof(TransferBatch) -
sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
}
static void Validate() {
for (uptr c = 1; c < kNumClasses; c++) {
// Printf("Validate: c%zd\n", c);
uptr s = Size(c);
CHECK_NE(s, 0U);
CHECK_EQ(ClassID(s), c);
if (c != kNumClasses - 1)
CHECK_EQ(ClassID(s + 1), c + 1);
@ -173,24 +187,11 @@ class SizeClassMap {
if (c > 0)
CHECK_LT(Size(c-1), s);
}
// TransferBatch for kMinBatchClass must fit into the block itself.
const uptr batch_size = sizeof(TransferBatch)
- sizeof(void*) // NOLINT
* (kMaxNumCached - MaxCached(kMinBatchClass));
CHECK_LE(batch_size, Size(kMinBatchClass));
// TransferBatch for kMinBatchClass-1 must not fit into the block itself.
const uptr batch_size1 = sizeof(TransferBatch)
- sizeof(void*) // NOLINT
* (kMaxNumCached - MaxCached(kMinBatchClass - 1));
CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
}
};
typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(25, 28)>
DefaultSizeClassMap;
typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(17, 20)>
CompactSizeClassMap;
typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Memory allocator statistics
@ -279,6 +280,9 @@ struct NoOpMapUnmapCallback {
void OnUnmap(uptr p, uptr size) const { }
};
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
// SizeClassAllocator64 -- allocator for 64-bit address space.
//
// Space: a portion of address space of kSpaceSize bytes starting at
@ -339,25 +343,28 @@ class SizeClassAllocator64 {
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
RegionInfo *region = GetRegionInfo(class_id);
CHECK_GT(b->count, 0);
region->free_list.Push(b);
region->n_freed += b->count;
}
static bool PointerIsMine(void *p) {
static bool PointerIsMine(const void *p) {
return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
}
static uptr GetSizeClass(void *p) {
static uptr GetSizeClass(const void *p) {
return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
}
void *GetBlockBegin(void *p) {
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
if (!size) return 0;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
if (class_id >= kNumClasses) return 0;
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
@ -371,7 +378,7 @@ class SizeClassAllocator64 {
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
@ -430,6 +437,22 @@ class SizeClassAllocator64 {
}
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
RegionInfo *region = GetRegionInfo(class_id);
uptr chunk_size = SizeClassMap::Size(class_id);
uptr region_beg = kSpaceBeg + class_id * kRegionSize;
for (uptr chunk = region_beg;
chunk < region_beg + region->allocated_user;
chunk += chunk_size) {
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
callback(chunk, arg);
}
}
}
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@ -471,11 +494,12 @@ class SizeClassAllocator64 {
}
static uptr GetChunkIdx(uptr chunk, uptr size) {
u32 offset = chunk % kRegionSize;
uptr offset = chunk % kRegionSize;
// Here we divide by a non-constant. This is costly.
// We require that kRegionSize is at least 2^32 so that offset is 32-bit.
// We save 2x by using 32-bit div, but may need to use a 256-way switch.
return offset / (u32)size;
// size always fits into 32-bits. If the offset fits too, use 32-bit div.
if (offset >> (SANITIZER_WORDSIZE / 2))
return offset / size;
return (u32)offset / (u32)size;
}
NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
@ -513,14 +537,14 @@ class SizeClassAllocator64 {
region->mapped_meta += map_size;
}
CHECK_LE(region->allocated_meta, region->mapped_meta);
if (region->allocated_user + region->allocated_meta > kRegionSize) {
Printf("Out of memory. Dying.\n");
if (region->mapped_user + region->mapped_meta > kRegionSize) {
Printf("%s: Out of memory. Dying. ", SanitizerToolName);
Printf("The process has exhausted %zuMB for size class %zu.\n",
kRegionSize / 1024 / 1024, size);
Die();
}
for (;;) {
if (class_id < SizeClassMap::kMinBatchClass)
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)(region_beg + beg_idx);
@ -532,12 +556,37 @@ class SizeClassAllocator64 {
beg_idx += count * size;
if (beg_idx + count * size + size > region->mapped_user)
break;
CHECK_GT(b->count, 0);
region->free_list.Push(b);
}
return b;
}
};
// Maps integers in rage [0, kSize) to u8 values.
template<u64 kSize>
class FlatByteMap {
public:
void TestOnlyInit() {
internal_memset(map_, 0, sizeof(map_));
}
void set(uptr idx, u8 val) {
CHECK_LT(idx, kSize);
CHECK_EQ(0U, map_[idx]);
map_[idx] = val;
}
u8 operator[] (uptr idx) {
CHECK_LT(idx, kSize);
// FIXME: CHECK may be too expensive here.
return map_[idx];
}
private:
u8 map_[kSize];
};
// FIXME: Also implement TwoLevelByteMap.
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less
// efficient than SizeClassAllocator64.
@ -549,7 +598,7 @@ class SizeClassAllocator64 {
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
// a ByteMap possible_regions to store the size classes of each Region.
// 0 size class means the region is not used by the allocator.
//
// One Region is used to allocate chunks of a single size class.
@ -560,16 +609,19 @@ class SizeClassAllocator64 {
// chache-line aligned.
template <const uptr kSpaceBeg, const u64 kSpaceSize,
const uptr kMetadataSize, class SizeClassMap,
const uptr kRegionSizeLog,
class ByteMap,
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 {
public:
typedef typename SizeClassMap::TransferBatch Batch;
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
SizeClassMap, MapUnmapCallback> ThisT;
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
void Init() {
state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
void *MapWithCallback(uptr size) {
@ -589,7 +641,7 @@ class SizeClassAllocator32 {
alignment <= SizeClassMap::kMaxSize;
}
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
@ -617,18 +669,19 @@ class SizeClassAllocator32 {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
CHECK_GT(b->count, 0);
sci->free_list.push_front(b);
}
bool PointerIsMine(void *p) {
bool PointerIsMine(const void *p) {
return GetSizeClass(p) != 0;
}
uptr GetSizeClass(void *p) {
return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
uptr GetSizeClass(const void *p) {
return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
}
void *GetBlockBegin(void *p) {
void *GetBlockBegin(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
@ -650,16 +703,15 @@ class SizeClassAllocator32 {
// No need to lock here.
uptr res = 0;
for (uptr i = 0; i < kNumPossibleRegions; i++)
if (state_->possible_regions[i])
if (possible_regions[i])
res += kRegionSize;
return res;
}
void TestOnlyUnmap() {
for (uptr i = 0; i < kNumPossibleRegions; i++)
if (state_->possible_regions[i])
if (possible_regions[i])
UnmapWithCallback((i * kRegionSize), kRegionSize);
UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
}
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
@ -676,6 +728,23 @@ class SizeClassAllocator32 {
}
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr region = 0; region < kNumPossibleRegions; region++)
if (possible_regions[region]) {
uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
for (uptr chunk = region_beg;
chunk < region_beg + max_chunks_in_region * chunk_size;
chunk += chunk_size) {
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
callback(chunk, arg);
}
}
}
void PrintStats() {
}
@ -683,7 +752,6 @@ class SizeClassAllocator32 {
static const uptr kNumClasses = SizeClassMap::kNumClasses;
private:
static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
static const uptr kRegionSize = 1 << kRegionSizeLog;
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
@ -711,14 +779,13 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMmapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
state_->possible_regions[ComputeRegionId(res)] = class_id;
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
return &state_->size_class_info_array[class_id];
return &size_class_info_array[class_id];
}
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
@ -730,7 +797,7 @@ class SizeClassAllocator32 {
Batch *b = 0;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (b == 0) {
if (class_id < SizeClassMap::kMinBatchClass)
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)i;
@ -738,19 +805,19 @@ class SizeClassAllocator32 {
}
b->batch[b->count++] = (void*)i;
if (b->count == max_count) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
b = 0;
}
}
if (b)
if (b) {
CHECK_GT(b->count, 0);
sci->free_list.push_back(b);
}
}
struct State {
u8 possible_regions[kNumPossibleRegions];
SizeClassInfo size_class_info_array[kNumClasses];
};
State *state_;
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};
// Objects of this type should be used as local caches for SizeClassAllocator64
@ -788,8 +855,12 @@ struct SizeClassAllocatorLocalCache {
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
c->batch[c->count++] = p;
@ -815,7 +886,7 @@ struct SizeClassAllocatorLocalCache {
AllocatorStats stats_;
void InitCache() {
if (per_class_[0].max_count)
if (per_class_[1].max_count)
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
@ -831,7 +902,7 @@ struct SizeClassAllocatorLocalCache {
for (uptr i = 0; i < b->count; i++)
c->batch[i] = b->batch[i];
c->count = b->count;
if (class_id < SizeClassMap::kMinBatchClass)
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
}
@ -839,7 +910,7 @@ struct SizeClassAllocatorLocalCache {
InitCache();
PerClass *c = &per_class_[class_id];
Batch *b;
if (class_id < SizeClassMap::kMinBatchClass)
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
else
b = (Batch*)c->batch[0];
@ -850,6 +921,7 @@ struct SizeClassAllocatorLocalCache {
}
b->count = cnt;
c->count -= cnt;
CHECK_GT(b->count, 0);
allocator->DeallocateBatch(&stats_, class_id, b);
}
};
@ -870,7 +942,7 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
if (map_size < size) return 0; // Overflow.
if (map_size < size) return AllocatorReturnNull(); // Overflow.
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
MapUnmapCallback().OnMap(map_beg, map_size);
@ -889,6 +961,7 @@ class LargeMmapAllocator {
{
SpinMutexLock l(&mutex_);
uptr idx = n_chunks_++;
chunks_sorted_ = false;
CHECK_LT(idx, kMaxNumChunks);
h->chunk_idx = idx;
chunks_[idx] = h;
@ -912,6 +985,7 @@ class LargeMmapAllocator {
chunks_[idx] = chunks_[n_chunks_ - 1];
chunks_[idx]->chunk_idx = idx;
n_chunks_--;
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
stat->Add(AllocatorStatFreed, h->map_size);
@ -932,7 +1006,7 @@ class LargeMmapAllocator {
return res;
}
bool PointerIsMine(void *p) {
bool PointerIsMine(const void *p) {
return GetBlockBegin(p) != 0;
}
@ -941,13 +1015,16 @@ class LargeMmapAllocator {
}
// At least page_size_/2 metadata bytes is available.
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
Printf("%s: bad pointer %p\n", SanitizerToolName, p);
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
}
return GetHeader(p) + 1;
}
void *GetBlockBegin(void *ptr) {
void *GetBlockBegin(const void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
@ -964,7 +1041,49 @@ class LargeMmapAllocator {
CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size < p)
if (h->map_beg + h->map_size <= p)
return 0;
return GetUser(h);
}
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return 0;
if (!chunks_sorted_) {
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
SortArray(reinterpret_cast<uptr*>(chunks_), n);
for (uptr i = 0; i < n; i++)
chunks_[i]->chunk_idx = i;
chunks_sorted_ = true;
min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
chunks_[n - 1]->map_size;
}
if (p < min_mmap_ || p >= max_mmap_)
return 0;
uptr beg = 0, end = n - 1;
// This loop is a log(n) lower_bound. It does not check for the exact match
// to avoid expensive cache-thrashing loads.
while (end - beg >= 2) {
uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
if (p < reinterpret_cast<uptr>(chunks_[mid]))
end = mid - 1; // We are not interested in chunks_[mid].
else
beg = mid; // chunks_[mid] may still be what we want.
}
if (beg < end) {
CHECK_EQ(beg + 1, end);
// There are 2 chunks left, choose one.
if (p >= reinterpret_cast<uptr>(chunks_[end]))
beg = end;
}
Header *h = chunks_[beg];
if (h->map_beg + h->map_size <= p || p < h->map_beg)
return 0;
return GetUser(h);
}
@ -992,6 +1111,13 @@ class LargeMmapAllocator {
mutex_.Unlock();
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr i = 0; i < n_chunks_; i++)
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
}
private:
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
@ -1002,13 +1128,15 @@ class LargeMmapAllocator {
};
Header *GetHeader(uptr p) {
CHECK_EQ(p % page_size_, 0);
CHECK(IsAligned(p, page_size_));
return reinterpret_cast<Header*>(p - page_size_);
}
Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
Header *GetHeader(const void *p) {
return GetHeader(reinterpret_cast<uptr>(p));
}
void *GetUser(Header *h) {
CHECK_EQ((uptr)h % page_size_, 0);
CHECK(IsAligned((uptr)h, page_size_));
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
@ -1019,6 +1147,8 @@ class LargeMmapAllocator {
uptr page_size_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
uptr min_mmap_, max_mmap_;
bool chunks_sorted_;
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
@ -1047,7 +1177,7 @@ class CombinedAllocator {
if (size == 0)
size = 1;
if (size + alignment < size)
return 0;
return AllocatorReturnNull();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@ -1098,18 +1228,26 @@ class CombinedAllocator {
return primary_.PointerIsMine(p);
}
void *GetMetaData(void *p) {
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetMetaData(p);
return secondary_.GetMetaData(p);
}
void *GetBlockBegin(void *p) {
void *GetBlockBegin(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBegin(p);
}
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBeginFastLocked(p);
}
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
@ -1155,6 +1293,13 @@ class CombinedAllocator {
primary_.ForceUnlock();
}
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
primary_.ForEachChunk(callback, arg);
secondary_.ForEachChunk(callback, arg);
}
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;

View File

@ -0,0 +1,62 @@
//===-- sanitizer_allocator_internal.h -------------------------- C++ -----===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This allocator is used inside run-times.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_INTERNAL_H
#define SANITIZER_ALLOCATOR_INTERNAL_H
#include "sanitizer_allocator.h"
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
// FIXME: Check if we may use even more compact size class map for internal
// purposes.
typedef CompactSizeClassMap InternalSizeClassMap;
static const uptr kInternalAllocatorSpace = 0;
#if SANITIZER_WORDSIZE == 32
static const u64 kInternalAllocatorSize = (1ULL << 32);
static const uptr kInternalAllocatorRegionSizeLog = 20;
#else
static const u64 kInternalAllocatorSize = (1ULL << 47);
static const uptr kInternalAllocatorRegionSizeLog = 24;
#endif
static const uptr kInternalAllocatorFlatByteMapSize =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef SizeClassAllocator32<
kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog,
FlatByteMap<kInternalAllocatorFlatByteMapSize> > PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
// We don't want our internal allocator to do any map/unmap operations.
struct CrashOnMapUnmap {
void OnMap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!");
}
void OnUnmap(uptr p, uptr size) const {
RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!");
}
};
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
LargeMmapAllocator<CrashOnMapUnmap> >
InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
InternalAllocator *internal_allocator();
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H

View File

@ -39,7 +39,17 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
// FIXME:
// 64-bit atomic operations are not atomic on 32-bit platforms.
// The implementation lacks necessary memory fences on ARM/PPC.
// We would like to use compiler builtin atomic operations,
// but they are mostly broken:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@ -55,7 +65,6 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
@ -111,12 +120,14 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a,
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}
} // namespace __sanitizer
#undef ATOMIC_ORDER
#endif // SANITIZER_ATOMIC_CLANG_H

View File

@ -132,6 +132,27 @@ INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
return v;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
u8 cmpv = *cmp;
u8 prev;
__asm {
mov al, cmpv
mov ecx, a
mov dl, xchgv
lock cmpxchg [ecx], dl
mov prev, al
}
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
@ -147,9 +168,9 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}

View File

@ -15,6 +15,7 @@
namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
uptr SanitizerVerbosity = 0;
uptr GetPageSizeCached() {
static uptr PageSize;
@ -23,22 +24,29 @@ uptr GetPageSizeCached() {
return PageSize;
}
static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
// isn't equal to the current PID, try to obtain file descriptor by opening
// file "report_path_prefix.<PID>".
static fd_t report_fd = kStderrFd;
static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
fd_t report_fd = kStderrFd;
// Set via __sanitizer_set_report_path.
bool log_to_file = false;
char report_path_prefix[sizeof(report_path_prefix)];
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
// child thread will be different from |report_fd_pid|.
static int report_fd_pid = 0;
uptr report_fd_pid = 0;
static void (*DieCallback)(void);
void SetDieCallback(void (*callback)(void)) {
static DieCallbackType DieCallback;
void SetDieCallback(DieCallbackType callback) {
DieCallback = callback;
}
DieCallbackType GetDieCallback() {
return DieCallback;
}
void NORETURN Die() {
if (DieCallback) {
DieCallback();
@ -61,41 +69,6 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
Die();
}
static void MaybeOpenReportFile() {
if (!log_to_file || (report_fd_pid == GetPid())) return;
InternalScopedBuffer<char> report_path_full(4096);
internal_snprintf(report_path_full.data(), report_path_full.size(),
"%s.%d", report_path_prefix, GetPid());
fd_t fd = OpenFile(report_path_full.data(), true);
if (fd == kInvalidFd) {
report_fd = kStderrFd;
log_to_file = false;
Report("ERROR: Can't open file: %s\n", report_path_full.data());
Die();
}
if (report_fd != kInvalidFd) {
// We're in the child. Close the parent's log.
internal_close(report_fd);
}
report_fd = fd;
report_fd_pid = GetPid();
}
bool PrintsToTty() {
MaybeOpenReportFile();
return internal_isatty(report_fd);
}
void RawWrite(const char *buffer) {
static const char *kRawWriteError = "RawWrite can't output requested buffer!";
uptr length = (uptr)internal_strlen(buffer);
MaybeOpenReportFile();
if (length != internal_write(report_fd, buffer, length)) {
internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
Die();
}
}
uptr ReadFileToBuffer(const char *file_name, char **buff,
uptr *buff_size, uptr max_len) {
uptr PageSize = GetPageSizeCached();
@ -105,8 +78,9 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
*buff_size = 0;
// The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
fd_t fd = OpenFile(file_name, /*write*/ false);
if (fd == kInvalidFd) return 0;
uptr openrv = OpenFile(file_name, /*write*/ false);
if (internal_iserror(openrv)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __FUNCTION__);
*buff_size = size;
@ -128,45 +102,15 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
return read_len;
}
// We don't want to use std::sort to avoid including <algorithm>, as
// we may end up with two implementation of std::sort - one in instrumented
// code, and the other in runtime.
// qsort() from stdlib won't work as it calls malloc(), which results
// in deadlock in ASan allocator.
// We re-implement in-place sorting w/o recursion as straightforward heapsort.
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
template<class T>
static inline bool CompareLess(const T &a, const T &b) {
return a < b;
}
void SortArray(uptr *array, uptr size) {
if (size < 2)
return;
// Stage 1: insert elements to the heap.
for (uptr i = 1; i < size; i++) {
uptr j, p;
for (j = i; j > 0; j = p) {
p = (j - 1) / 2;
if (array[j] > array[p])
Swap(array[j], array[p]);
else
break;
}
}
// Stage 2: swap largest element with the last one,
// and sink the new top.
for (uptr i = size - 1; i > 0; i--) {
Swap(array[0], array[i]);
uptr j, max_ind;
for (j = 0; j < i; j = max_ind) {
uptr left = 2 * j + 1;
uptr right = 2 * j + 2;
max_ind = j;
if (left < i && array[left] > array[max_ind])
max_ind = left;
if (right < i && array[right] > array[max_ind])
max_ind = right;
if (max_ind != j)
Swap(array[j], array[max_ind]);
else
break;
}
}
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
// We want to map a chunk of address space aligned to 'alignment'.
@ -200,6 +144,27 @@ void ReportErrorSummary(const char *error_type, const char *file,
__sanitizer_report_error_summary(buff.data());
}
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
n_ranges_ = 0;
}
void LoadedModule::addAddressRange(uptr beg, uptr end) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
ranges_[n_ranges_].beg = beg;
ranges_[n_ranges_].end = end;
n_ranges_++;
}
bool LoadedModule::containsAddress(uptr address) const {
for (uptr i = 0; i < n_ranges_; i++) {
if (ranges_[i].beg <= address && address < ranges_[i].end)
return true;
}
return false;
}
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT

View File

@ -15,6 +15,7 @@
#define SANITIZER_COMMON_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
@ -30,17 +31,22 @@ const uptr kCacheLineSize = 128;
const uptr kCacheLineSize = 64;
#endif
const uptr kMaxPathLength = 512;
extern const char *SanitizerToolName; // Can be changed by the tool.
extern uptr SanitizerVerbosity;
uptr GetPageSize();
uptr GetPageSizeCached();
uptr GetMmapGranularity();
uptr GetMaxVirtualAddress();
// Threads
int GetPid();
uptr GetTid();
uptr GetThreadSelf();
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom);
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size);
// Memory management
void *MmapOrDie(uptr size, const char *mem_type);
@ -54,10 +60,6 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size);
// Internal allocator
void *InternalAlloc(uptr size);
void InternalFree(void *p);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
@ -103,13 +105,20 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO
void RawWrite(const char *buffer);
bool PrintsToTty();
// Caching version of PrintsToTty(). Not thread-safe.
bool PrintsToTtyCached();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
void MaybeOpenReportFile();
extern fd_t report_fd;
extern bool log_to_file;
extern char report_path_prefix[4096];
extern uptr report_fd_pid;
fd_t OpenFile(const char *filename, bool write);
uptr OpenFile(const char *filename, bool write);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size',
@ -126,23 +135,29 @@ void DisableCoreDumper();
void DumpProcessMap();
bool FileExists(const char *filename);
const char *GetEnv(const char *name);
bool SetEnv(const char *name, const char *value);
const char *GetPwd();
char *FindPathToBinary(const char *name);
u32 GetUid();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
void PrepareForSandboxing();
void InitTlsSize();
uptr GetTlsSize();
// Other
void SleepForSeconds(int seconds);
void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
// Exit
void NORETURN Abort();
void NORETURN Die();
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
void NORETURN
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
// Set the name of the current thread to 'name', return true on succees.
@ -154,7 +169,9 @@ bool SanitizerGetThreadName(char *name, int max_len);
// Specific tools may override behavior of "Die" and "CheckFailed" functions
// to do tool-specific job.
void SetDieCallback(void (*callback)(void));
typedef void (*DieCallbackType)(void);
void SetDieCallback(DieCallbackType);
DieCallbackType GetDieCallback();
typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
@ -166,7 +183,7 @@ void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function);
// Math
#if defined(_WIN32) && !defined(__clang__)
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
extern "C" {
unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
@ -178,9 +195,9 @@ unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); /
#endif
INLINE uptr MostSignificantSetBitIndex(uptr x) {
CHECK(x != 0);
CHECK_NE(x, 0U);
unsigned long up; // NOLINT
#if !defined(_WIN32) || defined(__clang__)
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
#elif defined(_WIN64)
_BitScanReverse64(&up, x);
@ -219,7 +236,7 @@ INLINE bool IsAligned(uptr a, uptr alignment) {
INLINE uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
#if !defined(_WIN32) || defined(__clang__)
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
return __builtin_ctzl(x);
#elif defined(_WIN64)
unsigned long ret; // NOLINT
@ -260,6 +277,160 @@ INLINE int ToLower(int c) {
# define FIRST_32_SECOND_64(a, b) (a)
#endif
// A low-level vector based on mmap. May incur a significant memory overhead for
// small vectors.
// WARNING: The current implementation supports only POD types.
template<typename T>
class InternalMmapVector {
public:
explicit InternalMmapVector(uptr initial_capacity) {
CHECK_GT(initial_capacity, 0);
capacity_ = initial_capacity;
size_ = 0;
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
}
~InternalMmapVector() {
UnmapOrDie(data_, capacity_ * sizeof(T));
}
T &operator[](uptr i) {
CHECK_LT(i, size_);
return data_[i];
}
const T &operator[](uptr i) const {
CHECK_LT(i, size_);
return data_[i];
}
void push_back(const T &element) {
CHECK_LE(size_, capacity_);
if (size_ == capacity_) {
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Resize(new_capacity);
}
data_[size_++] = element;
}
T &back() {
CHECK_GT(size_, 0);
return data_[size_ - 1];
}
void pop_back() {
CHECK_GT(size_, 0);
size_--;
}
uptr size() const {
return size_;
}
const T *data() const {
return data_;
}
uptr capacity() const {
return capacity_;
}
private:
void Resize(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_LE(size_, new_capacity);
T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
"InternalMmapVector");
internal_memcpy(new_data, data_, size_ * sizeof(T));
T *old_data = data_;
data_ = new_data;
UnmapOrDie(old_data, capacity_ * sizeof(T));
capacity_ = new_capacity;
}
// Disallow evil constructors.
InternalMmapVector(const InternalMmapVector&);
void operator=(const InternalMmapVector&);
T *data_;
uptr capacity_;
uptr size_;
};
// HeapSort for arrays and InternalMmapVector.
template<class Container, class Compare>
void InternalSort(Container *v, uptr size, Compare comp) {
if (size < 2)
return;
// Stage 1: insert elements to the heap.
for (uptr i = 1; i < size; i++) {
uptr j, p;
for (j = i; j > 0; j = p) {
p = (j - 1) / 2;
if (comp((*v)[p], (*v)[j]))
Swap((*v)[j], (*v)[p]);
else
break;
}
}
// Stage 2: swap largest element with the last one,
// and sink the new top.
for (uptr i = size - 1; i > 0; i--) {
Swap((*v)[0], (*v)[i]);
uptr j, max_ind;
for (j = 0; j < i; j = max_ind) {
uptr left = 2 * j + 1;
uptr right = 2 * j + 2;
max_ind = j;
if (left < i && comp((*v)[max_ind], (*v)[left]))
max_ind = left;
if (right < i && comp((*v)[max_ind], (*v)[right]))
max_ind = right;
if (max_ind != j)
Swap((*v)[j], (*v)[max_ind]);
else
break;
}
}
}
template<class Container, class Value, class Compare>
uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
const Value &val, Compare comp) {
uptr not_found = last + 1;
while (last >= first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
else if (comp(val, v[mid]))
last = mid - 1;
else
return mid;
}
return not_found;
}
// Represents a binary loaded into virtual memory (e.g. this can be an
// executable or a shared object).
class LoadedModule {
public:
LoadedModule(const char *module_name, uptr base_address);
void addAddressRange(uptr beg, uptr end);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
private:
struct AddressRange {
uptr beg;
uptr end;
};
char *full_name_;
uptr base_address_;
static const uptr kMaxNumberOfAddressRanges = 6;
AddressRange ranges_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
};
// OS-dependent function that fills array with descriptions of at most
// "max_modules" currently loaded modules. Returns the number of
// initialized modules. If filter is nonzero, ignores modules for which
// filter(full_name) is false.
typedef bool (*string_predicate_t)(const char *);
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter);
} // namespace __sanitizer
#endif // SANITIZER_COMMON_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,566 @@
//===-- sanitizer_common_interceptors_ioctl.inc -----------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Ioctl handling in common sanitizer interceptors.
//===----------------------------------------------------------------------===//
#include "sanitizer_flags.h"
struct ioctl_desc {
unsigned req;
// FIXME: support read+write arguments. Those are currently marked as WRITE.
enum {
NONE,
READ,
WRITE,
CUSTOM
} type : 2;
unsigned size : 30;
const char* name;
};
const unsigned ioctl_table_max = 500;
static ioctl_desc ioctl_table[ioctl_table_max];
static unsigned ioctl_table_size = 0;
// This can not be declared as a global, because references to struct_*_sz
// require a global initializer. And this table must be available before global
// initializers are run.
static void ioctl_table_fill() {
#define _(rq, tp, sz) \
if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
CHECK(ioctl_table_size < ioctl_table_max); \
ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
ioctl_table[ioctl_table_size].size = sz; \
ioctl_table[ioctl_table_size].name = #rq; \
++ioctl_table_size; \
}
_(FIOASYNC, READ, sizeof(int));
_(FIOCLEX, NONE, 0);
_(FIOGETOWN, WRITE, sizeof(int));
_(FIONBIO, READ, sizeof(int));
_(FIONCLEX, NONE, 0);
_(FIOSETOWN, READ, sizeof(int));
_(SIOCADDMULTI, READ, struct_ifreq_sz);
_(SIOCATMARK, WRITE, sizeof(int));
_(SIOCDELMULTI, READ, struct_ifreq_sz);
_(SIOCGIFADDR, WRITE, struct_ifreq_sz);
_(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
_(SIOCGIFCONF, CUSTOM, 0);
_(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
_(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
_(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
_(SIOCGIFMTU, WRITE, struct_ifreq_sz);
_(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
_(SIOCGPGRP, WRITE, sizeof(int));
_(SIOCSIFADDR, READ, struct_ifreq_sz);
_(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
_(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
_(SIOCSIFFLAGS, READ, struct_ifreq_sz);
_(SIOCSIFMETRIC, READ, struct_ifreq_sz);
_(SIOCSIFMTU, READ, struct_ifreq_sz);
_(SIOCSIFNETMASK, READ, struct_ifreq_sz);
_(SIOCSPGRP, READ, sizeof(int));
_(TIOCCONS, NONE, 0);
_(TIOCEXCL, NONE, 0);
_(TIOCGETD, WRITE, sizeof(int));
_(TIOCGPGRP, WRITE, pid_t_sz);
_(TIOCGWINSZ, WRITE, struct_winsize_sz);
_(TIOCMBIC, READ, sizeof(int));
_(TIOCMBIS, READ, sizeof(int));
_(TIOCMGET, WRITE, sizeof(int));
_(TIOCMSET, READ, sizeof(int));
_(TIOCNOTTY, NONE, 0);
_(TIOCNXCL, NONE, 0);
_(TIOCOUTQ, WRITE, sizeof(int));
_(TIOCPKT, READ, sizeof(int));
_(TIOCSCTTY, NONE, 0);
_(TIOCSETD, READ, sizeof(int));
_(TIOCSPGRP, READ, pid_t_sz);
_(TIOCSTI, READ, sizeof(char));
_(TIOCSWINSZ, READ, struct_winsize_sz);
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_MAC
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
#endif
#if SANITIZER_LINUX
// Conflicting request ids.
// _(CDROMAUDIOBUFSIZ, NONE, 0);
// _(SNDCTL_TMR_CONTINUE, NONE, 0);
// _(SNDCTL_TMR_START, NONE, 0);
// _(SNDCTL_TMR_STOP, NONE, 0);
// _(SOUND_MIXER_READ_LOUD, WRITE, sizeof(int)); // same as ...READ_ENHANCE
// _(SOUND_MIXER_READ_MUTE, WRITE, sizeof(int)); // same as ...READ_ENHANCE
// _(SOUND_MIXER_WRITE_LOUD, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
_(BLKFLSBUF, NONE, 0);
_(BLKGETSIZE, WRITE, sizeof(uptr));
_(BLKRAGET, WRITE, sizeof(int));
_(BLKRASET, NONE, 0);
_(BLKROGET, WRITE, sizeof(int));
_(BLKROSET, READ, sizeof(int));
_(BLKRRPART, NONE, 0);
_(CDROMEJECT, NONE, 0);
_(CDROMEJECT_SW, NONE, 0);
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
_(CDROMPAUSE, NONE, 0);
_(CDROMPLAYMSF, READ, struct_cdrom_msf_sz);
_(CDROMPLAYTRKIND, READ, struct_cdrom_ti_sz);
_(CDROMREADAUDIO, READ, struct_cdrom_read_audio_sz);
_(CDROMREADCOOKED, READ, struct_cdrom_msf_sz);
_(CDROMREADMODE1, READ, struct_cdrom_msf_sz);
_(CDROMREADMODE2, READ, struct_cdrom_msf_sz);
_(CDROMREADRAW, READ, struct_cdrom_msf_sz);
_(CDROMREADTOCENTRY, WRITE, struct_cdrom_tocentry_sz);
_(CDROMREADTOCHDR, WRITE, struct_cdrom_tochdr_sz);
_(CDROMRESET, NONE, 0);
_(CDROMRESUME, NONE, 0);
_(CDROMSEEK, READ, struct_cdrom_msf_sz);
_(CDROMSTART, NONE, 0);
_(CDROMSTOP, NONE, 0);
_(CDROMSUBCHNL, WRITE, struct_cdrom_subchnl_sz);
_(CDROMVOLCTRL, READ, struct_cdrom_volctrl_sz);
_(CDROMVOLREAD, WRITE, struct_cdrom_volctrl_sz);
_(CDROM_GET_UPC, WRITE, 8);
_(EVIOCGABS, WRITE, struct_input_absinfo_sz); // fixup
_(EVIOCGBIT, WRITE, struct_input_id_sz); // fixup
_(EVIOCGEFFECTS, WRITE, sizeof(int));
_(EVIOCGID, WRITE, struct_input_id_sz);
_(EVIOCGKEY, WRITE, 0);
_(EVIOCGKEYCODE, WRITE, sizeof(int) * 2);
_(EVIOCGLED, WRITE, 0);
_(EVIOCGNAME, WRITE, 0);
_(EVIOCGPHYS, WRITE, 0);
_(EVIOCGRAB, READ, sizeof(int));
_(EVIOCGREP, WRITE, sizeof(int) * 2);
_(EVIOCGSND, WRITE, 0);
_(EVIOCGSW, WRITE, 0);
_(EVIOCGUNIQ, WRITE, 0);
_(EVIOCGVERSION, WRITE, sizeof(int));
_(EVIOCRMFF, READ, sizeof(int));
_(EVIOCSABS, READ, struct_input_absinfo_sz); // fixup
_(EVIOCSFF, READ, struct_ff_effect_sz);
_(EVIOCSKEYCODE, READ, sizeof(int) * 2);
_(EVIOCSREP, READ, sizeof(int) * 2);
_(FDCLRPRM, NONE, 0);
_(FDDEFPRM, READ, struct_floppy_struct_sz);
_(FDFLUSH, NONE, 0);
_(FDFMTBEG, NONE, 0);
_(FDFMTEND, NONE, 0);
_(FDFMTTRK, READ, struct_format_descr_sz);
_(FDGETDRVPRM, WRITE, struct_floppy_drive_params_sz);
_(FDGETDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
_(FDGETDRVTYP, WRITE, 16);
_(FDGETFDCSTAT, WRITE, struct_floppy_fdc_state_sz);
_(FDGETMAXERRS, WRITE, struct_floppy_max_errors_sz);
_(FDGETPRM, WRITE, struct_floppy_struct_sz);
_(FDMSGOFF, NONE, 0);
_(FDMSGON, NONE, 0);
_(FDPOLLDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
_(FDRAWCMD, WRITE, struct_floppy_raw_cmd_sz);
_(FDRESET, NONE, 0);
_(FDSETDRVPRM, READ, struct_floppy_drive_params_sz);
_(FDSETEMSGTRESH, NONE, 0);
_(FDSETMAXERRS, READ, struct_floppy_max_errors_sz);
_(FDSETPRM, READ, struct_floppy_struct_sz);
_(FDTWADDLE, NONE, 0);
_(FDWERRORCLR, NONE, 0);
_(FDWERRORGET, WRITE, struct_floppy_write_errors_sz);
_(HDIO_DRIVE_CMD, WRITE, sizeof(int));
_(HDIO_GETGEO, WRITE, struct_hd_geometry_sz);
_(HDIO_GET_32BIT, WRITE, sizeof(int));
_(HDIO_GET_DMA, WRITE, sizeof(int));
_(HDIO_GET_IDENTITY, WRITE, struct_hd_driveid_sz);
_(HDIO_GET_KEEPSETTINGS, WRITE, sizeof(int));
_(HDIO_GET_MULTCOUNT, WRITE, sizeof(int));
_(HDIO_GET_NOWERR, WRITE, sizeof(int));
_(HDIO_GET_UNMASKINTR, WRITE, sizeof(int));
_(HDIO_SET_32BIT, NONE, 0);
_(HDIO_SET_DMA, NONE, 0);
_(HDIO_SET_KEEPSETTINGS, NONE, 0);
_(HDIO_SET_MULTCOUNT, NONE, 0);
_(HDIO_SET_NOWERR, NONE, 0);
_(HDIO_SET_UNMASKINTR, NONE, 0);
_(MTIOCGET, WRITE, struct_mtget_sz);
_(MTIOCPOS, WRITE, struct_mtpos_sz);
_(MTIOCTOP, READ, struct_mtop_sz);
_(PPPIOCGASYNCMAP, WRITE, sizeof(int));
_(PPPIOCGDEBUG, WRITE, sizeof(int));
_(PPPIOCGFLAGS, WRITE, sizeof(int));
_(PPPIOCGUNIT, WRITE, sizeof(int));
_(PPPIOCGXASYNCMAP, WRITE, sizeof(int) * 8);
_(PPPIOCSASYNCMAP, READ, sizeof(int));
_(PPPIOCSDEBUG, READ, sizeof(int));
_(PPPIOCSFLAGS, READ, sizeof(int));
_(PPPIOCSMAXCID, READ, sizeof(int));
_(PPPIOCSMRU, READ, sizeof(int));
_(PPPIOCSXASYNCMAP, READ, sizeof(int) * 8);
_(SIOCADDRT, READ, struct_rtentry_sz);
_(SIOCDARP, READ, struct_arpreq_sz);
_(SIOCDELRT, READ, struct_rtentry_sz);
_(SIOCDRARP, READ, struct_arpreq_sz);
_(SIOCGARP, WRITE, struct_arpreq_sz);
_(SIOCGIFENCAP, WRITE, sizeof(int));
_(SIOCGIFHWADDR, WRITE, struct_ifreq_sz);
_(SIOCGIFMAP, WRITE, struct_ifreq_sz);
_(SIOCGIFMEM, WRITE, struct_ifreq_sz);
_(SIOCGIFNAME, NONE, 0);
_(SIOCGIFSLAVE, NONE, 0);
_(SIOCGRARP, WRITE, struct_arpreq_sz);
_(SIOCGSTAMP, WRITE, timeval_sz);
_(SIOCSARP, READ, struct_arpreq_sz);
_(SIOCSIFENCAP, READ, sizeof(int));
_(SIOCSIFHWADDR, READ, struct_ifreq_sz);
_(SIOCSIFLINK, NONE, 0);
_(SIOCSIFMAP, READ, struct_ifreq_sz);
_(SIOCSIFMEM, READ, struct_ifreq_sz);
_(SIOCSIFSLAVE, NONE, 0);
_(SIOCSRARP, READ, struct_arpreq_sz);
_(SNDCTL_COPR_HALT, WRITE, struct_copr_debug_buf_sz);
_(SNDCTL_COPR_LOAD, READ, struct_copr_buffer_sz);
_(SNDCTL_COPR_RCODE, WRITE, struct_copr_debug_buf_sz);
_(SNDCTL_COPR_RCVMSG, WRITE, struct_copr_msg_sz);
_(SNDCTL_COPR_RDATA, WRITE, struct_copr_debug_buf_sz);
_(SNDCTL_COPR_RESET, NONE, 0);
_(SNDCTL_COPR_RUN, WRITE, struct_copr_debug_buf_sz);
_(SNDCTL_COPR_SENDMSG, READ, struct_copr_msg_sz);
_(SNDCTL_COPR_WCODE, READ, struct_copr_debug_buf_sz);
_(SNDCTL_COPR_WDATA, READ, struct_copr_debug_buf_sz);
_(SNDCTL_DSP_GETBLKSIZE, WRITE, sizeof(int));
_(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
_(SNDCTL_DSP_NONBLOCK, NONE, 0);
_(SNDCTL_DSP_POST, NONE, 0);
_(SNDCTL_DSP_RESET, NONE, 0);
_(SNDCTL_DSP_SETFMT, WRITE, sizeof(int));
_(SNDCTL_DSP_SETFRAGMENT, WRITE, sizeof(int));
_(SNDCTL_DSP_SPEED, WRITE, sizeof(int));
_(SNDCTL_DSP_STEREO, WRITE, sizeof(int));
_(SNDCTL_DSP_SUBDIVIDE, WRITE, sizeof(int));
_(SNDCTL_DSP_SYNC, NONE, 0);
_(SNDCTL_FM_4OP_ENABLE, READ, sizeof(int));
_(SNDCTL_FM_LOAD_INSTR, READ, struct_sbi_instrument_sz);
_(SNDCTL_MIDI_INFO, WRITE, struct_midi_info_sz);
_(SNDCTL_MIDI_PRETIME, WRITE, sizeof(int));
_(SNDCTL_SEQ_CTRLRATE, WRITE, sizeof(int));
_(SNDCTL_SEQ_GETINCOUNT, WRITE, sizeof(int));
_(SNDCTL_SEQ_GETOUTCOUNT, WRITE, sizeof(int));
_(SNDCTL_SEQ_NRMIDIS, WRITE, sizeof(int));
_(SNDCTL_SEQ_NRSYNTHS, WRITE, sizeof(int));
_(SNDCTL_SEQ_OUTOFBAND, READ, struct_seq_event_rec_sz);
_(SNDCTL_SEQ_PANIC, NONE, 0);
_(SNDCTL_SEQ_PERCMODE, NONE, 0);
_(SNDCTL_SEQ_RESET, NONE, 0);
_(SNDCTL_SEQ_RESETSAMPLES, READ, sizeof(int));
_(SNDCTL_SEQ_SYNC, NONE, 0);
_(SNDCTL_SEQ_TESTMIDI, READ, sizeof(int));
_(SNDCTL_SEQ_THRESHOLD, READ, sizeof(int));
_(SNDCTL_SYNTH_INFO, WRITE, struct_synth_info_sz);
_(SNDCTL_SYNTH_MEMAVL, WRITE, sizeof(int));
_(SNDCTL_TMR_METRONOME, READ, sizeof(int));
_(SNDCTL_TMR_SELECT, WRITE, sizeof(int));
_(SNDCTL_TMR_SOURCE, WRITE, sizeof(int));
_(SNDCTL_TMR_TEMPO, WRITE, sizeof(int));
_(SNDCTL_TMR_TIMEBASE, WRITE, sizeof(int));
_(SOUND_MIXER_READ_ALTPCM, WRITE, sizeof(int));
_(SOUND_MIXER_READ_BASS, WRITE, sizeof(int));
_(SOUND_MIXER_READ_CAPS, WRITE, sizeof(int));
_(SOUND_MIXER_READ_CD, WRITE, sizeof(int));
_(SOUND_MIXER_READ_DEVMASK, WRITE, sizeof(int));
_(SOUND_MIXER_READ_ENHANCE, WRITE, sizeof(int));
_(SOUND_MIXER_READ_IGAIN, WRITE, sizeof(int));
_(SOUND_MIXER_READ_IMIX, WRITE, sizeof(int));
_(SOUND_MIXER_READ_LINE, WRITE, sizeof(int));
_(SOUND_MIXER_READ_LINE1, WRITE, sizeof(int));
_(SOUND_MIXER_READ_LINE2, WRITE, sizeof(int));
_(SOUND_MIXER_READ_LINE3, WRITE, sizeof(int));
_(SOUND_MIXER_READ_MIC, WRITE, sizeof(int));
_(SOUND_MIXER_READ_OGAIN, WRITE, sizeof(int));
_(SOUND_MIXER_READ_PCM, WRITE, sizeof(int));
_(SOUND_MIXER_READ_RECLEV, WRITE, sizeof(int));
_(SOUND_MIXER_READ_RECMASK, WRITE, sizeof(int));
_(SOUND_MIXER_READ_RECSRC, WRITE, sizeof(int));
_(SOUND_MIXER_READ_SPEAKER, WRITE, sizeof(int));
_(SOUND_MIXER_READ_STEREODEVS, WRITE, sizeof(int));
_(SOUND_MIXER_READ_SYNTH, WRITE, sizeof(int));
_(SOUND_MIXER_READ_TREBLE, WRITE, sizeof(int));
_(SOUND_MIXER_READ_VOLUME, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_ALTPCM, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_BASS, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_CD, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_ENHANCE, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_IGAIN, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_IMIX, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_LINE, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_LINE1, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_LINE2, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_LINE3, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_MIC, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_OGAIN, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_PCM, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_RECLEV, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_RECSRC, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_SPEAKER, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_SYNTH, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_TREBLE, WRITE, sizeof(int));
_(SOUND_MIXER_WRITE_VOLUME, WRITE, sizeof(int));
_(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
_(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
_(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
_(TCFLSH, NONE, 0);
_(TCGETA, WRITE, struct_termio_sz);
_(TCGETS, WRITE, struct_termios_sz);
_(TCSBRK, NONE, 0);
_(TCSBRKP, NONE, 0);
_(TCSETA, READ, struct_termio_sz);
_(TCSETAF, READ, struct_termio_sz);
_(TCSETAW, READ, struct_termio_sz);
_(TCSETS, READ, struct_termios_sz);
_(TCSETSF, READ, struct_termios_sz);
_(TCSETSW, READ, struct_termios_sz);
_(TCXONC, NONE, 0);
_(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);
_(TIOCGSOFTCAR, WRITE, sizeof(int));
_(TIOCINQ, WRITE, sizeof(int));
_(TIOCLINUX, READ, sizeof(char));
_(TIOCSERCONFIG, NONE, 0);
_(TIOCSERGETLSR, WRITE, sizeof(int));
_(TIOCSERGWILD, WRITE, sizeof(int));
_(TIOCSERSWILD, READ, sizeof(int));
_(TIOCSLCKTRMIOS, READ, struct_termios_sz);
_(TIOCSSOFTCAR, READ, sizeof(int));
_(VT_ACTIVATE, NONE, 0);
_(VT_DISALLOCATE, NONE, 0);
_(VT_GETMODE, WRITE, struct_vt_mode_sz);
_(VT_GETSTATE, WRITE, struct_vt_stat_sz);
_(VT_OPENQRY, WRITE, sizeof(int));
_(VT_RELDISP, NONE, 0);
_(VT_RESIZE, READ, struct_vt_sizes_sz);
_(VT_RESIZEX, READ, struct_vt_consize_sz);
_(VT_SENDSIG, NONE, 0);
_(VT_SETMODE, READ, struct_vt_mode_sz);
_(VT_WAITACTIVE, NONE, 0);
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
_(CYGETDEFTHRESH, WRITE, sizeof(int));
_(CYGETDEFTIMEOUT, WRITE, sizeof(int));
_(CYGETMON, WRITE, struct_cyclades_monitor_sz);
_(CYGETTHRESH, WRITE, sizeof(int));
_(CYGETTIMEOUT, WRITE, sizeof(int));
_(CYSETDEFTHRESH, NONE, 0);
_(CYSETDEFTIMEOUT, NONE, 0);
_(CYSETTHRESH, NONE, 0);
_(CYSETTIMEOUT, NONE, 0);
_(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
_(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
_(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
_(EQL_GETSLAVECFG, WRITE, struct_ifreq_sz);
_(EQL_SETMASTRCFG, WRITE, struct_ifreq_sz);
_(EQL_SETSLAVECFG, WRITE, struct_ifreq_sz);
_(EVIOCGKEYCODE_V2, WRITE, struct_input_keymap_entry_sz);
_(EVIOCGPROP, WRITE, 0);
_(EVIOCSKEYCODE_V2, READ, struct_input_keymap_entry_sz);
_(FS_IOC_GETFLAGS, WRITE, sizeof(int));
_(FS_IOC_GETVERSION, WRITE, sizeof(int));
_(FS_IOC_SETFLAGS, READ, sizeof(int));
_(FS_IOC_SETVERSION, READ, sizeof(int));
_(GIO_CMAP, WRITE, 48);
_(GIO_FONT, WRITE, 8192);
_(GIO_SCRNMAP, WRITE, e_tabsz);
_(GIO_UNIMAP, WRITE, struct_unimapdesc_sz);
_(GIO_UNISCRNMAP, WRITE, sizeof(short) * e_tabsz);
_(KDADDIO, NONE, 0);
_(KDDELIO, NONE, 0);
_(KDDISABIO, NONE, 0);
_(KDENABIO, NONE, 0);
_(KDGETKEYCODE, WRITE, struct_kbkeycode_sz);
_(KDGETLED, WRITE, 1);
_(KDGETMODE, WRITE, sizeof(int));
_(KDGKBDIACR, WRITE, struct_kbdiacrs_sz);
_(KDGKBENT, WRITE, struct_kbentry_sz);
_(KDGKBLED, WRITE, sizeof(int));
_(KDGKBMETA, WRITE, sizeof(int));
_(KDGKBMODE, WRITE, sizeof(int));
_(KDGKBSENT, WRITE, struct_kbsentry_sz);
_(KDGKBTYPE, WRITE, 1);
_(KDMAPDISP, NONE, 0);
_(KDMKTONE, NONE, 0);
_(KDSETKEYCODE, READ, struct_kbkeycode_sz);
_(KDSETLED, NONE, 0);
_(KDSETMODE, NONE, 0);
_(KDSIGACCEPT, NONE, 0);
_(KDSKBDIACR, READ, struct_kbdiacrs_sz);
_(KDSKBENT, READ, struct_kbentry_sz);
_(KDSKBLED, NONE, 0);
_(KDSKBMETA, NONE, 0);
_(KDSKBMODE, NONE, 0);
_(KDSKBSENT, READ, struct_kbsentry_sz);
_(KDUNMAPDISP, NONE, 0);
_(KIOCSOUND, NONE, 0);
_(LPABORT, NONE, 0);
_(LPABORTOPEN, NONE, 0);
_(LPCAREFUL, NONE, 0);
_(LPCHAR, NONE, 0);
_(LPGETIRQ, WRITE, sizeof(int));
_(LPGETSTATUS, WRITE, sizeof(int));
_(LPRESET, NONE, 0);
_(LPSETIRQ, NONE, 0);
_(LPTIME, NONE, 0);
_(LPWAIT, NONE, 0);
_(MTIOCGETCONFIG, WRITE, struct_mtconfiginfo_sz);
_(MTIOCSETCONFIG, READ, struct_mtconfiginfo_sz);
_(PIO_CMAP, NONE, 0);
_(PIO_FONT, READ, 8192);
_(PIO_SCRNMAP, READ, e_tabsz);
_(PIO_UNIMAP, READ, struct_unimapdesc_sz);
_(PIO_UNIMAPCLR, READ, struct_unimapinit_sz);
_(PIO_UNISCRNMAP, READ, sizeof(short) * e_tabsz);
_(SCSI_IOCTL_PROBE_HOST, READ, sizeof(int));
_(SCSI_IOCTL_TAGGED_DISABLE, NONE, 0);
_(SCSI_IOCTL_TAGGED_ENABLE, NONE, 0);
_(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
_(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
_(TIOCGSERIAL, WRITE, struct_serial_struct_sz);
_(TIOCSERGETMULTI, WRITE, struct_serial_multiport_struct_sz);
_(TIOCSERSETMULTI, READ, struct_serial_multiport_struct_sz);
_(TIOCSSERIAL, READ, struct_serial_struct_sz);
// The following ioctl requests are shared between AX25, IPX, netrom and
// mrouted.
// _(SIOCAIPXITFCRT, READ, sizeof(char));
// _(SIOCAX25GETUID, READ, struct_sockaddr_ax25_sz);
// _(SIOCNRGETPARMS, WRITE, struct_nr_parms_struct_sz);
// _(SIOCAIPXPRISLT, READ, sizeof(char));
// _(SIOCNRSETPARMS, READ, struct_nr_parms_struct_sz);
// _(SIOCAX25ADDUID, READ, struct_sockaddr_ax25_sz);
// _(SIOCNRDECOBS, NONE, 0);
// _(SIOCAX25DELUID, READ, struct_sockaddr_ax25_sz);
// _(SIOCIPXCFGDATA, WRITE, struct_ipx_config_data_sz);
// _(SIOCAX25NOUID, READ, sizeof(int));
// _(SIOCNRRTCTL, READ, sizeof(int));
// _(SIOCAX25DIGCTL, READ, sizeof(int));
// _(SIOCAX25GETPARMS, WRITE, struct_ax25_parms_struct_sz);
// _(SIOCAX25SETPARMS, READ, struct_ax25_parms_struct_sz);
#endif
#undef _
}
static bool ioctl_initialized = false;
struct ioctl_desc_compare {
bool operator()(const ioctl_desc& left, const ioctl_desc& right) const {
return left.req < right.req;
}
};
static void ioctl_init() {
ioctl_table_fill();
InternalSort(&ioctl_table, ioctl_table_size, ioctl_desc_compare());
bool bad = false;
for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
ioctl_table[i + 1].name);
bad = true;
}
}
if (bad) Die();
ioctl_initialized = true;
}
// Handle the most evil ioctls that encode argument value as part of request id.
static unsigned ioctl_request_fixup(unsigned req) {
#if SANITIZER_LINUX
if ((req & ~0x3fff001fU) == IOCTL_EVIOCGBIT)
return IOCTL_EVIOCGBIT;
if ((req & ~0x3fU) == IOCTL_EVIOCGABS)
return IOCTL_EVIOCGABS;
if ((req & ~0x3fU) == IOCTL_EVIOCSABS)
return IOCTL_EVIOCSABS;
#endif
return req;
}
static const ioctl_desc *ioctl_table_lookup(unsigned req) {
int left = 0;
int right = ioctl_table_size;
while (left < right) {
int mid = (left + right) / 2;
if (ioctl_table[mid].req < req)
left = mid + 1;
else
right = mid;
}
if (left == right && ioctl_table[left].req == req)
return ioctl_table + left;
else
return 0;
}
static const ioctl_desc *ioctl_lookup(unsigned req) {
req = ioctl_request_fixup(req);
const ioctl_desc *desc = ioctl_table_lookup(req);
if (desc) return desc;
// Try stripping access size from the request id.
desc = ioctl_table_lookup(req & ~0x3fff0000U);
// Sanity check: requests that encode access size are either read or write and
// have size of 0 in the table.
if (desc && desc->size == 0 &&
(desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ))
return desc;
return 0;
}
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
unsigned request, void *arg) {
if (desc->type == ioctl_desc::READ) {
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
}
if (desc->type != ioctl_desc::CUSTOM)
return;
switch (request) {
case 0x00008912: { // SIOCGIFCONF
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
break;
}
}
return;
}
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
unsigned request, void *arg) {
if (desc->type == ioctl_desc::WRITE) {
// FIXME: add verbose output
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
}
if (desc->type != ioctl_desc::CUSTOM)
return;
switch (request) {
case 0x00008912: { // SIOCGIFCONF
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
break;
}
}
return;
}

View File

@ -276,7 +276,7 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
CHECK_GT(n_inputs, 0);
const char *p = format;
while (*p && n_inputs) {
while (*p) {
ScanfDirective dir;
p = scanf_parse_next(p, allowGnuMalloc, &dir);
if (!p)
@ -299,6 +299,8 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
void *argp = va_arg(aq, void *);
if (dir.convSpecifier != 'n')
--n_inputs;
if (n_inputs < 0)
break;
if (size == SSS_STRLEN) {
size = internal_strlen((const char *)argp) + 1;
}

View File

@ -0,0 +1,35 @@
//===-- sanitizer_common_libcdep.cc ---------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
namespace __sanitizer {
bool PrintsToTty() {
MaybeOpenReportFile();
return internal_isatty(report_fd) != 0;
}
bool PrintsToTtyCached() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
if (SANITIZER_WINDOWS)
return 0;
static int cached = 0;
static bool prints_to_tty;
if (!cached) { // Not thread-safe.
prints_to_tty = PrintsToTty();
cached = 1;
}
return prints_to_tty;
}
} // namespace __sanitizer

File diff suppressed because it is too large Load Diff

View File

@ -16,15 +16,40 @@
namespace __sanitizer {
CommonFlags common_flags_dont_use_directly;
void ParseCommonFlagsFromString(const char *str) {
CommonFlags *f = common_flags();
ParseFlag(str, &f->malloc_context_size, "malloc_context_size");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->symbolize, "symbolize");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl");
ParseFlag(str, &f->log_path, "log_path");
ParseFlag(str, &f->detect_leaks, "detect_leaks");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null");
}
static bool GetFlagValue(const char *env, const char *name,
const char **value, int *value_length) {
if (env == 0)
return false;
const char *pos = internal_strstr(env, name);
const char *end;
if (pos == 0)
return false;
const char *pos = 0;
for (;;) {
pos = internal_strstr(env, name);
if (pos == 0)
return false;
if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) {
// Seems to be middle of another flag name or value.
env = pos + 1;
continue;
}
break;
}
pos += internal_strlen(name);
const char *end;
if (pos[0] != '=') {
end = pos;
} else {
@ -75,7 +100,7 @@ void ParseFlag(const char *env, int *flag, const char *name) {
int value_length;
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = internal_atoll(value);
*flag = static_cast<int>(internal_atoll(value));
}
static LowLevelAllocator allocator_for_flags;

View File

@ -20,6 +20,41 @@ void ParseFlag(const char *env, bool *flag, const char *name);
void ParseFlag(const char *env, int *flag, const char *name);
void ParseFlag(const char *env, const char **flag, const char *name);
struct CommonFlags {
// If set, use the online symbolizer from common sanitizer runtime.
bool symbolize;
// Path to external symbolizer.
const char *external_symbolizer_path;
// Strips this prefix from file paths in error reports.
const char *strip_path_prefix;
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
bool fast_unwind_on_fatal;
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
bool fast_unwind_on_malloc;
// Intercept and handle ioctl requests.
bool handle_ioctl;
// Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// Write logs to "log_path.pid" instead of stderr.
const char *log_path;
// Enable memory leak detection.
bool detect_leaks;
// Invoke leak checking in an atexit handler. Has no effect if
// detect_leaks=false, or if __lsan_do_leak_check() is called before the
// handler has a chance to run.
bool leak_check_at_exit;
// If false, the allocator will crash instead of returning 0 on out-of-memory.
bool allocator_may_return_null;
};
extern CommonFlags common_flags_dont_use_directly;
inline CommonFlags *common_flags() {
return &common_flags_dont_use_directly;
}
void ParseCommonFlagsFromString(const char *str);
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H

View File

@ -11,9 +11,12 @@
#ifndef SANITIZER_DEFS_H
#define SANITIZER_DEFS_H
#if defined(_WIN32)
// FIXME find out what we need on Windows. __declspec(dllexport) ?
# define SANITIZER_INTERFACE_ATTRIBUTE
#include "sanitizer_platform.h"
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
#elif defined(SANITIZER_GO)
# define SANITIZER_INTERFACE_ATTRIBUTE
@ -23,7 +26,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
#ifdef __linux__
#if SANITIZER_LINUX && !defined(SANITIZER_GO)
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@ -64,29 +67,39 @@ typedef signed int s32;
typedef signed long long s64; // NOLINT
typedef int fd_t;
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
// Mac and Linux/x86-64 are special.
#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
} // namespace __sanitizer
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
// Tell the tools to write their reports to given file descriptor instead of
// stderr.
void __sanitizer_set_report_fd(int fd)
SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(int fd);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
// that the tools may call to bypass the sandbox.
void __sanitizer_sandbox_on_notify(void *reserved)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_sandbox_on_notify(void *reserved);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
void __sanitizer_report_error_summary(const char *error_summary)
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
} // extern "C"
@ -95,13 +108,13 @@ using namespace __sanitizer; // NOLINT
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
#define INLINE static inline
#define INLINE inline
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define WEAK SANITIZER_WEAK_ATTRIBUTE
// Platform-specific defs.
#if defined(_MSC_VER)
# define ALWAYS_INLINE __declspec(forceinline)
# define ALWAYS_INLINE __forceinline
// FIXME(timurrrr): do we need this on Windows?
# define ALIAS(x)
# define ALIGNED(x) __declspec(align(x))
@ -116,8 +129,10 @@ using namespace __sanitizer; // NOLINT
# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE __attribute__((always_inline))
# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
// Please only use the ALIGNED macro before the type.
// Using ALIGNED after the variable declaration is not portable!
# define ALIGNED(x) __attribute__((aligned(x)))
# define FORMAT(f, a) __attribute__((format(printf, f, a)))
# define NOINLINE __attribute__((noinline))
@ -136,7 +151,15 @@ using namespace __sanitizer; // NOLINT
# endif
#endif // _MSC_VER
#if defined(_WIN32)
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
typedef ALIGNED(1) u64 uu64;
typedef ALIGNED(1) s16 us16;
typedef ALIGNED(1) s32 us32;
typedef ALIGNED(1) s64 us64;
#if SANITIZER_WINDOWS
typedef unsigned long DWORD; // NOLINT
typedef DWORD thread_return_t;
# define THREAD_CALLING_CONV __stdcall
@ -155,6 +178,9 @@ typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
// NOTE: Functions below must be defined in each run-time.
namespace __sanitizer {
void NORETURN Die();
// FIXME: No, this shouldn't be in the sanitizer interface.
SANITIZER_INTERFACE_ATTRIBUTE
void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
} // namespace __sanitizer
@ -259,10 +285,12 @@ extern "C" void* _ReturnAddress(void);
# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
#endif
#define HANDLE_EINTR(res, f) { \
do { \
res = (f); \
} while (res == -1 && errno == EINTR); \
#define HANDLE_EINTR(res, f) \
{ \
int rverrno; \
do { \
res = (f); \
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
}
#endif // SANITIZER_DEFS_H

View File

@ -8,6 +8,7 @@
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@ -122,6 +123,13 @@ char* internal_strchr(const char *s, int c) {
}
}
char *internal_strchrnul(const char *s, int c) {
char *res = internal_strchr(s, c);
if (!res)
res = (char*)s + internal_strlen(s);
return res;
}
char *internal_strrchr(const char *s, int c) {
const char *res = 0;
for (uptr i = 0; s[i]; i++) {
@ -149,8 +157,7 @@ char *internal_strncpy(char *dst, const char *src, uptr n) {
uptr i;
for (i = 0; i < n && src[i]; i++)
dst[i] = src[i];
for (; i < n; i++)
dst[i] = '\0';
internal_memset(dst + i, '\0', n - i);
return dst;
}

View File

@ -30,6 +30,7 @@ void *internal_memmove(void *dest, const void *src, uptr n);
// Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n);
char* internal_strchr(const char *s, int c);
char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
@ -51,36 +52,46 @@ bool mem_is_zero(const char *mem, uptr size);
// Memory
void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
int internal_munmap(void *addr, uptr length);
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
// I/O
const fd_t kInvalidFd = -1;
const fd_t kStdinFd = 0;
const fd_t kStdoutFd = 1;
const fd_t kStderrFd = 2;
int internal_close(fd_t fd);
uptr internal_close(fd_t fd);
int internal_isatty(fd_t fd);
// Use __sanitizer::OpenFile() instead.
fd_t internal_open(const char *filename, int flags);
fd_t internal_open(const char *filename, int flags, u32 mode);
uptr internal_open(const char *filename, int flags);
uptr internal_open(const char *filename, int flags, u32 mode);
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.
int internal_stat(const char *path, void *buf);
int internal_lstat(const char *path, void *buf);
int internal_fstat(fd_t fd, void *buf);
int internal_dup2(int oldfd, int newfd);
uptr internal_stat(const char *path, void *buf);
uptr internal_lstat(const char *path, void *buf);
uptr internal_fstat(fd_t fd, void *buf);
uptr internal_dup2(int oldfd, int newfd);
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
uptr internal_unlink(const char *path);
void NORETURN internal__exit(int exitcode);
uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
uptr internal_ptrace(int request, int pid, void *addr, void *data);
uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid();
uptr internal_getppid();
// Threading
int internal_sched_yield();
uptr internal_sched_yield();
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);
} // namespace __sanitizer

View File

@ -9,29 +9,48 @@
// run-time libraries and implements linux-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#ifdef __linux__
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
#include <asm/param.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#if !SANITIZER_ANDROID
#include <link.h>
#endif
#include <pthread.h>
#include <sched.h>
#include <sys/mman.h>
#include <sys/ptrace.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/prctl.h>
#include <unistd.h>
#include <unwind.h>
#include <errno.h>
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
long tv_usec;
};
// <linux/futex.h> is broken on some linux distributions.
const int FUTEX_WAIT = 0;
@ -48,165 +67,158 @@ const int FUTEX_WAKE = 1;
namespace __sanitizer {
#ifdef __x86_64__
#include "sanitizer_syscall_linux_x86_64.inc"
#else
#include "sanitizer_syscall_generic.inc"
#endif
// --------------- sanitizer_libc.h
void *internal_mmap(void *addr, uptr length, int prot, int flags,
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
return internal_syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
#else
return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset);
#endif
}
int internal_munmap(void *addr, uptr length) {
return syscall(__NR_munmap, addr, length);
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(__NR_munmap, addr, length);
}
int internal_close(fd_t fd) {
return syscall(__NR_close, fd);
uptr internal_close(fd_t fd) {
return internal_syscall(__NR_close, fd);
}
fd_t internal_open(const char *filename, int flags) {
return syscall(__NR_open, filename, flags);
uptr internal_open(const char *filename, int flags) {
return internal_syscall(__NR_open, filename, flags);
}
fd_t internal_open(const char *filename, int flags, u32 mode) {
return syscall(__NR_open, filename, flags, mode);
uptr internal_open(const char *filename, int flags, u32 mode) {
return internal_syscall(__NR_open, filename, flags, mode);
}
fd_t OpenFile(const char *filename, bool write) {
uptr OpenFile(const char *filename, bool write) {
return internal_open(filename,
write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count));
HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, buf, count));
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count));
HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, buf, count));
return res;
}
int internal_stat(const char *path, void *buf) {
#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
out->st_ino = in->st_ino;
out->st_mode = in->st_mode;
out->st_nlink = in->st_nlink;
out->st_uid = in->st_uid;
out->st_gid = in->st_gid;
out->st_rdev = in->st_rdev;
out->st_size = in->st_size;
out->st_blksize = in->st_blksize;
out->st_blocks = in->st_blocks;
out->st_atime = in->st_atime;
out->st_mtime = in->st_mtime;
out->st_ctime = in->st_ctime;
out->st_ino = in->st_ino;
}
#endif
uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return syscall(__NR_stat, path, buf);
return internal_syscall(__NR_stat, path, buf);
#else
return syscall(__NR_stat64, path, buf);
struct stat64 buf64;
int res = internal_syscall(__NR_stat64, path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
int internal_lstat(const char *path, void *buf) {
uptr internal_lstat(const char *path, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return syscall(__NR_lstat, path, buf);
return internal_syscall(__NR_lstat, path, buf);
#else
return syscall(__NR_lstat64, path, buf);
struct stat64 buf64;
int res = internal_syscall(__NR_lstat64, path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
int internal_fstat(fd_t fd, void *buf) {
uptr internal_fstat(fd_t fd, void *buf) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
return syscall(__NR_fstat, fd, buf);
return internal_syscall(__NR_fstat, fd, buf);
#else
return syscall(__NR_fstat64, fd, buf);
struct stat64 buf64;
int res = internal_syscall(__NR_fstat64, fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
#endif
}
uptr internal_filesize(fd_t fd) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
struct stat st;
#else
struct stat64 st;
#endif
if (internal_fstat(fd, &st))
return -1;
return (uptr)st.st_size;
}
int internal_dup2(int oldfd, int newfd) {
return syscall(__NR_dup2, oldfd, newfd);
uptr internal_dup2(int oldfd, int newfd) {
return internal_syscall(__NR_dup2, oldfd, newfd);
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return (uptr)syscall(__NR_readlink, path, buf, bufsize);
return internal_syscall(__NR_readlink, path, buf, bufsize);
}
int internal_sched_yield() {
return syscall(__NR_sched_yield);
uptr internal_unlink(const char *path) {
return internal_syscall(__NR_unlink, path);
}
uptr internal_sched_yield() {
return internal_syscall(__NR_sched_yield);
}
void internal__exit(int exitcode) {
syscall(__NR_exit_group, exitcode);
internal_syscall(__NR_exit_group, exitcode);
Die(); // Unreachable.
}
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
return internal_syscall(__NR_execve, filename, argv, envp);
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
#if SANITIZER_LINUX_USES_64BIT_SYSCALLS
struct stat st;
if (syscall(__NR_stat, filename, &st))
if (internal_stat(filename, &st))
return false;
#else
struct stat64 st;
if (syscall(__NR_stat64, filename, &st))
return false;
#endif
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
uptr GetTid() {
return syscall(__NR_gettid);
return internal_syscall(__NR_gettid);
}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
// This is the main thread. Libpthread may not be initialized yet.
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps;
uptr start, end, offset;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, 0, 0)) {
if ((uptr)&rl < end)
break;
prev_end = end;
}
CHECK((uptr)&rl >= start && (uptr)&rl < end);
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
if (stacksize > end - prev_end)
stacksize = end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
if (stacksize > kMaxThreadStackSize)
stacksize = kMaxThreadStackSize;
*stack_top = end;
*stack_bottom = end - stacksize;
return;
}
pthread_attr_t attr;
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
CHECK(stacksize < kMaxThreadStackSize); // Sanity check.
u64 NanoTime() {
kernel_timeval tv = {};
internal_syscall(__NR_gettimeofday, &tv, 0);
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
// Like getenv, but reads env directly from /proc and does not use libc.
@ -237,21 +249,11 @@ const char *GetEnv(const char *name) {
return 0; // Not found.
}
#ifdef __GLIBC__
extern "C" {
extern void *__libc_stack_end;
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
static void GetArgsAndEnv(char ***argv, char ***envp) {
uptr *stack_end = (uptr *)__libc_stack_end;
int argc = *stack_end;
*argv = (char**)(stack_end + 1);
*envp = (char**)(stack_end + argc + 2);
}
#else // __GLIBC__
#if !SANITIZER_GO
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@ -270,20 +272,32 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
(*arr)[count] = 0;
}
#endif
static void GetArgsAndEnv(char ***argv, char ***envp) {
static const int kMaxArgv = 2000, kMaxEnvp = 2000;
ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
static void GetArgsAndEnv(char*** argv, char*** envp) {
#if !SANITIZER_GO
if (&__libc_stack_end) {
#endif
uptr* stack_end = (uptr*)__libc_stack_end;
int argc = *stack_end;
*argv = (char**)(stack_end + 1);
*envp = (char**)(stack_end + argc + 2);
#if !SANITIZER_GO
} else {
static const int kMaxArgv = 2000, kMaxEnvp = 2000;
ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
}
#endif
}
#endif // __GLIBC__
void ReExec() {
char **argv, **envp;
GetArgsAndEnv(&argv, &envp);
execve("/proc/self/exe", argv, envp);
Printf("execve failed, errno %d\n", errno);
uptr rv = internal_execve("/proc/self/exe", argv, envp);
int rverrno;
CHECK_EQ(internal_iserror(rv, &rverrno), true);
Printf("execve failed, errno %d\n", rverrno);
Die();
}
@ -293,6 +307,10 @@ void PrepareForSandboxing() {
// process will be able to load additional libraries, so it's fine to use the
// cached mappings.
MemoryMappingLayout::CacheMemoryMappings();
// Same for /proc/self/exe in the symbolizer.
#if !SANITIZER_GO
getSymbolizer()->PrepareForSandboxing();
#endif
}
// ----------------- sanitizer_procmaps.h
@ -300,18 +318,22 @@ void PrepareForSandboxing() {
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
MemoryMappingLayout::MemoryMappingLayout() {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
&proc_self_maps_.mmaped_size, 1 << 26);
if (proc_self_maps_.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
if (cache_enabled) {
if (proc_self_maps_.mmaped_size == 0) {
LoadFromCache();
CHECK_GT(proc_self_maps_.len, 0);
}
} else {
CHECK_GT(proc_self_maps_.mmaped_size, 0);
}
// internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
CacheMemoryMappings();
if (cache_enabled)
CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
@ -373,7 +395,7 @@ static uptr ParseHex(char **str) {
return x;
}
static bool IsOnOf(char c, char c1, char c2) {
static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
@ -381,8 +403,33 @@ static bool IsDecimal(char c) {
return c >= '0' && c <= '9';
}
static bool IsHex(char c) {
return (c >= '0' && c <= '9')
|| (c >= 'a' && c <= 'f');
}
static uptr ReadHex(const char *p) {
uptr v = 0;
for (; IsHex(p[0]); p++) {
if (p[0] >= '0' && p[0] <= '9')
v = v * 16 + p[0] - '0';
else
v = v * 16 + p[0] - 'a' + 10;
}
return v;
}
static uptr ReadDecimal(const char *p) {
uptr v = 0;
for (; IsDecimal(p[0]); p++)
v = v * 10 + p[0] - '0';
return v;
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size) {
char filename[], uptr filename_size,
uptr *protection) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
@ -397,10 +444,22 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
CHECK_EQ(*current_++, '-');
*end = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
CHECK(IsOnOf(*current_++, '-', 'r'));
CHECK(IsOnOf(*current_++, '-', 'w'));
CHECK(IsOnOf(*current_++, '-', 'x'));
CHECK(IsOnOf(*current_++, 's', 'p'));
uptr local_protection = 0;
CHECK(IsOneOf(*current_, '-', 'r'));
if (*current_++ == 'r')
local_protection |= kProtectionRead;
CHECK(IsOneOf(*current_, '-', 'w'));
if (*current_++ == 'w')
local_protection |= kProtectionWrite;
CHECK(IsOneOf(*current_, '-', 'x'));
if (*current_++ == 'x')
local_protection |= kProtectionExecute;
CHECK(IsOneOf(*current_, 's', 'p'));
if (*current_++ == 's')
local_protection |= kProtectionShared;
if (protection) {
*protection = local_protection;
}
CHECK_EQ(*current_++, ' ');
*offset = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
@ -432,87 +491,35 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
// Gets the object name and the offset by walking MemoryMappingLayout.
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
uptr filename_size) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
uptr filename_size,
uptr *protection) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
protection);
}
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
#else
return false;
#endif
}
bool SanitizerGetThreadName(char *name, int max_len) {
#ifdef PR_GET_NAME
char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false;
internal_strncpy(name, buff, max_len);
name[max_len] = 0;
return true;
#else
return false;
#endif
}
#ifndef SANITIZER_GO
//------------------------- SlowUnwindStack -----------------------------------
#ifdef __arm__
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
#else
#define UNWIND_STOP _URC_NORMAL_STOP
#define UNWIND_CONTINUE _URC_NO_REASON
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#ifdef __arm__
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
// Clear the Thumb bit.
return val & ~(uptr)1;
#else
return _Unwind_GetIP(ctx);
#endif
}
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
StackTrace *b = (StackTrace*)param;
CHECK(b->size < b->max_size);
uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
this->size = 0;
this->max_size = max_depth;
if (max_depth > 1) {
_Unwind_Backtrace(Unwind_Trace, this);
// We need to pop a few frames so that pc is on top.
// trace[0] belongs to the current function so we always pop it.
int to_pop = 1;
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
this->PopStackFrames(to_pop);
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
char *smaps = 0;
uptr smaps_cap = 0;
uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
&smaps, &smaps_cap, 64<<20);
uptr start = 0;
bool file = false;
const char *pos = smaps;
while (pos < smaps + smaps_len) {
if (IsHex(pos[0])) {
start = ReadHex(pos);
for (; *pos != '/' && *pos > '\n'; pos++) {}
file = *pos == '/';
} else if (internal_strncmp(pos, "Rss:", 4) == 0) {
for (; *pos < '0' || *pos > '9'; pos++) {}
uptr rss = ReadDecimal(pos) * 1024;
cb(start, rss, file, stats, stats_size);
}
while (*pos++ != '\n') {}
}
this->trace[0] = pc;
UnmapOrDie(smaps, smaps_cap);
}
#endif // #ifndef SANITIZER_GO
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
@ -523,12 +530,16 @@ BlockingMutex::BlockingMutex(LinkerInitialized) {
CHECK_EQ(owner_, 0);
}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
internal_syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
}
void BlockingMutex::Unlock() {
@ -536,9 +547,281 @@ void BlockingMutex::Unlock() {
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping)
syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
internal_syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
}
void BlockingMutex::CheckLocked() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
// ----------------- sanitizer_linux.h
// The actual size of this structure is specified by d_reclen.
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
struct linux_dirent {
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[256];
};
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
return internal_syscall(__NR_ptrace, request, pid, addr, data);
}
uptr internal_waitpid(int pid, int *status, int options) {
return internal_syscall(__NR_wait4, pid, status, options, 0 /* rusage */);
}
uptr internal_getpid() {
return internal_syscall(__NR_getpid);
}
uptr internal_getppid() {
return internal_syscall(__NR_getppid);
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
return internal_syscall(__NR_getdents, fd, dirp, count);
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
return internal_syscall(__NR_lseek, fd, offset, whence);
}
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5);
}
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
return internal_syscall(__NR_sigaltstack, ss, oss);
}
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
: pid_(pid),
descriptor_(-1),
buffer_(4096),
error_(true),
entry_((struct linux_dirent *)buffer_.data()),
bytes_read_(0) {
char task_directory_path[80];
internal_snprintf(task_directory_path, sizeof(task_directory_path),
"/proc/%d/task/", pid);
uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
if (internal_iserror(openrv)) {
error_ = true;
Report("Can't open /proc/%d/task for reading.\n", pid);
} else {
error_ = false;
descriptor_ = openrv;
}
}
int ThreadLister::GetNextTID() {
int tid = -1;
do {
if (error_)
return -1;
if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries())
return -1;
if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' &&
entry_->d_name[0] <= '9') {
// Found a valid tid.
tid = (int)internal_atoll(entry_->d_name);
}
entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen);
} while (tid < 0);
return tid;
}
void ThreadLister::Reset() {
if (error_ || descriptor_ < 0)
return;
internal_lseek(descriptor_, 0, SEEK_SET);
}
ThreadLister::~ThreadLister() {
if (descriptor_ >= 0)
internal_close(descriptor_);
}
bool ThreadLister::error() { return error_; }
bool ThreadLister::GetDirectoryEntries() {
CHECK_GE(descriptor_, 0);
CHECK_NE(error_, true);
bytes_read_ = internal_getdents(descriptor_,
(struct linux_dirent *)buffer_.data(),
buffer_.size());
if (internal_iserror(bytes_read_)) {
Report("Can't read directory entries from /proc/%d/task.\n", pid_);
error_ = true;
return false;
} else if (bytes_read_ == 0) {
return false;
}
entry_ = (struct linux_dirent *)buffer_.data();
return true;
}
uptr GetPageSize() {
#if defined(__x86_64__) || defined(__i386__)
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
#endif
}
static char proc_self_exe_cache_str[kMaxPathLength];
static uptr proc_self_exe_cache_len = 0;
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
uptr module_name_len = internal_readlink(
"/proc/self/exe", buf, buf_len);
int readlink_error;
if (internal_iserror(module_name_len, &readlink_error)) {
if (proc_self_exe_cache_len) {
// If available, use the cached module name.
CHECK_LE(proc_self_exe_cache_len, buf_len);
internal_strncpy(buf, proc_self_exe_cache_str, buf_len);
module_name_len = internal_strlen(proc_self_exe_cache_str);
} else {
// We can't read /proc/self/exe for some reason, assume the name of the
// binary is unknown.
Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
"some stack frames may not be symbolized\n", readlink_error);
module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
}
CHECK_LT(module_name_len, buf_len);
buf[module_name_len] = '\0';
}
return module_name_len;
}
void CacheBinaryName() {
if (!proc_self_exe_cache_len) {
proc_self_exe_cache_len =
ReadBinaryName(proc_self_exe_cache_str, kMaxPathLength);
}
}
// Match full names of the form /path/to/base_name{-,.}*
bool LibraryNameIs(const char *full_name, const char *base_name) {
const char *name = full_name;
// Strip path.
while (*name != '\0') name++;
while (name > full_name && *name != '/') name--;
if (*name == '/') name++;
uptr base_name_length = internal_strlen(base_name);
if (internal_strncmp(name, base_name, base_name_length)) return false;
return (name[base_name_length] == '-' || name[base_name_length] == '.');
}
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize;
// Find the segment with the minimum base so we can "relocate" the p_vaddr
// fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC
// objects have a non-zero base.
uptr preferred_base = (uptr)-1;
for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
Elf_Phdr *phdr = (Elf_Phdr *)iter;
if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr)
preferred_base = (uptr)phdr->p_vaddr;
}
// Compute the delta from the real base to get a relocation delta.
sptr delta = (uptr)base - preferred_base;
// Now we can figure out what the loader really mapped.
for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
Elf_Phdr *phdr = (Elf_Phdr *)iter;
if (phdr->p_type == PT_LOAD) {
uptr seg_start = phdr->p_vaddr + delta;
uptr seg_end = seg_start + phdr->p_memsz;
// None of these values are aligned. We consider the ragged edges of the
// load command as defined, since they are mapped from the file.
seg_start = RoundDownTo(seg_start, GetPageSizeCached());
seg_end = RoundUpTo(seg_end, GetPageSizeCached());
cb((void *)seg_start, seg_end - seg_start);
}
}
}
#endif
#if defined(__x86_64__)
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
// the parent (because we don't know how to allocate a new thread
// descriptor to keep glibc happy). So the stock version of clone(), when
// used with CLONE_VM, would end up corrupting the parent's thread descriptor.
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
child_stack = (char *)child_stack - 2 * sizeof(void *);
((void **)child_stack)[0] = (void *)(uptr)fn;
((void **)child_stack)[1] = arg;
__asm__ __volatile__(
/* %rax = syscall(%rax = __NR_clone,
* %rdi = flags,
* %rsi = child_stack,
* %rdx = parent_tidptr,
* %r8 = new_tls,
* %r10 = child_tidptr)
*/
"movq %6,%%r8\n"
"movq %7,%%r10\n"
".cfi_endproc\n"
"syscall\n"
/* if (%rax != 0)
* return;
*/
"testq %%rax,%%rax\n"
"jnz 1f\n"
/* In the child. Terminate unwind chain. */
".cfi_startproc\n"
".cfi_undefined %%rip;\n"
"xorq %%rbp,%%rbp\n"
/* Call "fn(arg)". */
"popq %%rax\n"
"popq %%rdi\n"
"call *%%rax\n"
/* Call _exit(%rax). */
"movq %%rax,%%rdi\n"
"movq %2,%%rax\n"
"syscall\n"
/* Return to parent. */
"1:\n"
: "=a" (res)
: "a"(__NR_clone), "i"(__NR_exit),
"S"(child_stack),
"D"(flags),
"d"(parent_tidptr),
"r"(newtls),
"r"(child_tidptr)
: "rsp", "memory", "r8", "r10", "r11", "rcx");
return res;
}
#endif // defined(__x86_64__)
} // namespace __sanitizer
#endif // __linux__
#endif // SANITIZER_LINUX

View File

@ -0,0 +1,81 @@
//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Linux-specific syscall wrappers and classes.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_LINUX_H
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
struct link_map; // Opaque type returned by dlopen().
struct sigaltstack;
namespace __sanitizer {
// Dirent structure for getdents(). Note that this structure is different from
// the one in <dirent.h>, which is used by readdir().
struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
#ifdef __x86_64__
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
public:
explicit ThreadLister(int pid);
~ThreadLister();
// GetNextTID returns -1 if the list of threads is exhausted, or if there has
// been an error.
int GetNextTID();
void Reset();
bool error();
private:
bool GetDirectoryEntries();
int pid_;
int descriptor_;
InternalScopedBuffer<char> buffer_;
bool error_;
struct linux_dirent* entry_;
int bytes_read_;
};
void AdjustStackSizeLinux(void *attr, int verbosity);
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
uptr ThreadSelfOffset();
// Matches a library's file name against a base name (stripping path and version
// information).
bool LibraryNameIs(const char *full_name, const char *base_name);
// Read the name of the current binary from /proc/self/exe.
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
// Cache the value of /proc/self/exe.
void CacheBinaryName();
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
} // namespace __sanitizer
#endif // SANITIZER_LINUX
#endif // SANITIZER_LINUX_H

View File

@ -0,0 +1,351 @@
//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries and implements linux-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
#include "sanitizer_common.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include <dlfcn.h>
#include <pthread.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <unwind.h>
#if !SANITIZER_ANDROID
#include <elf.h>
#include <link.h>
#endif
namespace __sanitizer {
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
static const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
// This is the main thread. Libpthread may not be initialized yet.
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
if ((uptr)&rl < end)
break;
prev_end = end;
}
CHECK((uptr)&rl >= start && (uptr)&rl < end);
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
if (stacksize > end - prev_end)
stacksize = end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
if (stacksize > kMaxThreadStackSize)
stacksize = kMaxThreadStackSize;
*stack_top = end;
*stack_bottom = end - stacksize;
return;
}
pthread_attr_t attr;
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
uptr stacksize = 0;
void *stackaddr = 0;
pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
// Does not compile for Go because dlsym() requires -ldl
#ifndef SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
return false;
typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
#else
return false;
#endif
}
bool SanitizerGetThreadName(char *name, int max_len) {
#ifdef PR_GET_NAME
char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false;
internal_strncpy(name, buff, max_len);
name[max_len] = 0;
return true;
#else
return false;
#endif
}
#ifndef SANITIZER_GO
//------------------------- SlowUnwindStack -----------------------------------
#ifdef __arm__
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
#else
#define UNWIND_STOP _URC_NORMAL_STOP
#define UNWIND_CONTINUE _URC_NO_REASON
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#ifdef __arm__
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
// Clear the Thumb bit.
return val & ~(uptr)1;
#else
return _Unwind_GetIP(ctx);
#endif
}
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
StackTrace *b = (StackTrace*)param;
CHECK(b->size < b->max_size);
uptr pc = Unwind_GetIP(ctx);
b->trace[b->size++] = pc;
if (b->size == b->max_size) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
}
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
this->size = 0;
this->max_size = max_depth;
if (max_depth > 1) {
_Unwind_Backtrace(Unwind_Trace, this);
// We need to pop a few frames so that pc is on top.
// trace[0] belongs to the current function so we always pop it.
int to_pop = 1;
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
this->PopStackFrames(to_pop);
}
this->trace[0] = pc;
}
#endif // !SANITIZER_GO
static uptr g_tls_size;
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
# define DL_INTERNAL_FUNCTION
#endif
void InitTlsSize() {
#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
internal_memcpy(&get_tls, &get_tls_static_info_ptr,
sizeof(get_tls_static_info_ptr));
CHECK_NE(get_tls, 0);
size_t tls_size = 0;
size_t tls_align = 0;
get_tls(&tls_size, &tls_align);
g_tls_size = tls_size;
#endif
}
uptr GetTlsSize() {
return g_tls_size;
}
#if defined(__x86_64__) || defined(__i386__)
// sizeof(struct thread) from glibc.
// There has been a report of this being different on glibc 2.11 and 2.13. We
// don't know when this change happened, so 2.14 is a conservative estimate.
#if __GLIBC_PREREQ(2, 14)
const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1216, 2304);
#else
const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1168, 2304);
#endif
uptr ThreadDescriptorSize() {
return kThreadDescriptorSize;
}
// The offset at which pointer to self is located in the thread descriptor.
const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
uptr ThreadSelf() {
uptr descr_addr;
#ifdef __i386__
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
#else
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
#endif
return descr_addr;
}
#endif // defined(__x86_64__) || defined(__i386__)
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#ifndef SANITIZER_GO
#if defined(__x86_64__) || defined(__i386__)
*tls_addr = ThreadSelf();
*tls_size = GetTlsSize();
*tls_addr -= *tls_size;
*tls_addr += kThreadDescriptorSize;
#else
*tls_addr = 0;
*tls_size = 0;
#endif
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
*stk_size = stack_top - stack_bottom;
if (!main) {
// If stack and tls intersect, make them non-intersecting.
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
CHECK_GT(*tls_addr + *tls_size, *stk_addr);
CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
*stk_size -= *tls_size;
*tls_addr = *stk_addr + *stk_size;
}
}
#else // SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
*tls_size = 0;
#endif // SANITIZER_GO
}
void AdjustStackSizeLinux(void *attr_, int verbosity) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
size_t stacksize = 0;
pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
// We place a lot of tool data into TLS, account for that.
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
if (verbosity && stacksize != 0)
Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize);
pthread_attr_setstacksize(attr, minstacksize);
} else {
Printf("Sanitizer: pre-allocated stack size is insufficient: "
"%zu < %zu\n", stacksize, minstacksize);
Printf("Sanitizer: pthread_create is likely to fail.\n");
}
}
}
#if SANITIZER_ANDROID
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
return 0;
}
#else // SANITIZER_ANDROID
typedef ElfW(Phdr) Elf_Phdr;
struct DlIteratePhdrData {
LoadedModule *modules;
uptr current_n;
bool first;
uptr max_n;
string_predicate_t filter;
};
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
if (data->current_n == data->max_n)
return 0;
InternalScopedBuffer<char> module_name(kMaxPathLength);
module_name.data()[0] = '\0';
if (data->first) {
data->first = false;
// First module is the binary itself.
ReadBinaryName(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
}
if (module_name.data()[0] == '\0')
return 0;
if (data->filter && !data->filter(module_name.data()))
return 0;
void *mem = &data->modules[data->current_n];
LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
info->dlpi_addr);
data->current_n++;
for (int i = 0; i < info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
cur_module->addAddressRange(cur_beg, cur_end);
}
}
return 0;
}
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
CHECK(modules);
DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
dl_iterate_phdr(dl_iterate_phdr_cb, &data);
return data.current_n;
}
#endif // SANITIZER_ANDROID
} // namespace __sanitizer
#endif // SANITIZER_LINUX

View File

@ -10,7 +10,9 @@
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#ifdef __APPLE__
#include "sanitizer_platform.h"
#if SANITIZER_MAC
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
#ifndef _DARWIN_USE_64_BIT_INODE
@ -21,6 +23,7 @@
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include <crt_externs.h> // for _NSGetEnviron
@ -35,32 +38,35 @@
#include <sys/types.h>
#include <unistd.h>
#include <libkern/OSAtomic.h>
#include <errno.h>
namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
// ---------------------- sanitizer_libc.h
void *internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
return mmap(addr, length, prot, flags, fd, offset);
uptr internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
int internal_munmap(void *addr, uptr length) {
uptr internal_munmap(void *addr, uptr length) {
return munmap(addr, length);
}
int internal_close(fd_t fd) {
uptr internal_close(fd_t fd) {
return close(fd);
}
fd_t internal_open(const char *filename, int flags) {
uptr internal_open(const char *filename, int flags) {
return open(filename, flags);
}
fd_t internal_open(const char *filename, int flags, u32 mode) {
uptr internal_open(const char *filename, int flags, u32 mode) {
return open(filename, flags, mode);
}
fd_t OpenFile(const char *filename, bool write) {
uptr OpenFile(const char *filename, bool write) {
return internal_open(filename,
write ? O_WRONLY | O_CREAT : O_RDONLY, 0660);
}
@ -73,15 +79,15 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
return write(fd, buf, count);
}
int internal_stat(const char *path, void *buf) {
uptr internal_stat(const char *path, void *buf) {
return stat(path, (struct stat *)buf);
}
int internal_lstat(const char *path, void *buf) {
uptr internal_lstat(const char *path, void *buf) {
return lstat(path, (struct stat *)buf);
}
int internal_fstat(fd_t fd, void *buf) {
uptr internal_fstat(fd_t fd, void *buf) {
return fstat(fd, (struct stat *)buf);
}
@ -92,7 +98,7 @@ uptr internal_filesize(fd_t fd) {
return (uptr)st.st_size;
}
int internal_dup2(int oldfd, int newfd) {
uptr internal_dup2(int oldfd, int newfd) {
return dup2(oldfd, newfd);
}
@ -100,7 +106,7 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return readlink(path, buf, bufsize);
}
int internal_sched_yield() {
uptr internal_sched_yield() {
return sched_yield();
}
@ -108,6 +114,10 @@ void internal__exit(int exitcode) {
_exit(exitcode);
}
uptr internal_getpid() {
return getpid();
}
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@ -159,9 +169,13 @@ void PrepareForSandboxing() {
// Nothing here for now.
}
uptr GetPageSize() {
return sysconf(_SC_PAGESIZE);
}
// ----------------- sanitizer_procmaps.h
MemoryMappingLayout::MemoryMappingLayout() {
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
@ -214,7 +228,9 @@ void MemoryMappingLayout::LoadFromCache() {
template<u32 kLCSegment, typename SegmentCommand>
bool MemoryMappingLayout::NextSegmentLoad(
uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size) {
char filename[], uptr filename_size, uptr *protection) {
if (protection)
UNIMPLEMENTED();
const char* lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
@ -239,7 +255,8 @@ bool MemoryMappingLayout::NextSegmentLoad(
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size) {
char filename[], uptr filename_size,
uptr *protection) {
for (; current_image_ >= 0; current_image_--) {
const mach_header* hdr = _dyld_get_image_header(current_image_);
if (!hdr) continue;
@ -271,14 +288,14 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
start, end, offset, filename, filename_size))
start, end, offset, filename, filename_size, protection))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
start, end, offset, filename, filename_size))
start, end, offset, filename, filename_size, protection))
return true;
break;
}
@ -292,18 +309,24 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
char filename[],
uptr filename_size) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
uptr filename_size,
uptr *protection) {
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size,
protection);
}
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK(OS_SPINLOCK_INIT == 0);
CHECK(owner_ != (uptr)pthread_self());
CHECK_EQ(OS_SPINLOCK_INIT, 0);
CHECK_NE(owner_, (uptr)pthread_self());
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
CHECK(!owner_);
owner_ = (uptr)pthread_self();
@ -315,6 +338,69 @@ void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() {
CHECK_EQ((uptr)pthread_self(), owner_);
}
u64 NanoTime() {
return 0;
}
uptr GetTlsSize() {
return 0;
}
void InitTlsSize() {
}
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#ifndef SANITIZER_GO
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
*stk_size = stack_top - stack_bottom;
*tls_addr = 0;
*tls_size = 0;
#else
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
*tls_size = 0;
#endif
}
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter) {
MemoryMappingLayout memory_mapping(false);
memory_mapping.Reset();
uptr cur_beg, cur_end, cur_offset;
InternalScopedBuffer<char> module_name(kMaxPathLength);
uptr n_modules = 0;
for (uptr i = 0;
n_modules < max_modules &&
memory_mapping.Next(&cur_beg, &cur_end, &cur_offset,
module_name.data(), module_name.size(), 0);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
continue;
if (filter && !filter(cur_name))
continue;
LoadedModule *cur_module = 0;
if (n_modules > 0 &&
0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
cur_module = &modules[n_modules - 1];
} else {
void *mem = &modules[n_modules];
cur_module = new(mem) LoadedModule(cur_name, cur_beg);
n_modules++;
}
cur_module->addAddressRange(cur_beg, cur_end);
}
return n_modules;
}
} // namespace __sanitizer
#endif // __APPLE__
#endif // SANITIZER_MAC

View File

@ -68,8 +68,10 @@ class SpinMutex : public StaticSpinMutex {
class BlockingMutex {
public:
explicit BlockingMutex(LinkerInitialized);
BlockingMutex();
void Lock();
void Unlock();
void CheckLocked();
private:
uptr opaque_storage_[10];
uptr owner_; // for debugging

View File

@ -17,7 +17,7 @@
#include "sanitizer_internal_defs.h"
namespace __sanitizer {
#if (SANITIZER_WORDSIZE == 64) || defined(__APPLE__)
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_ptr_type;
#else
typedef u32 operator_new_ptr_type;

Some files were not shown because too many files have changed in this diff Show More