mf-heuristics.c: Fix whitespace at end of line.
* mf-heuristics.c: Fix whitespace at end of line. * mf-hooks1.c: Likewise. * mf-hooks2.c: Likewise. * mf-hooks3.c: Likewise. * mf-impl.h: Likewise. * mf-runtime.c: Likewise. * mf-runtime.h: Likewise. From-SVN: r96850
This commit is contained in:
parent
b025006799
commit
fb925a5148
@ -1,3 +1,13 @@
|
||||
2005-03-21 Mike Stump <mrs@apple.com>
|
||||
|
||||
* mf-heuristics.c: Fix whitespace at end of line.
|
||||
* mf-hooks1.c: Likewise.
|
||||
* mf-hooks2.c: Likewise.
|
||||
* mf-hooks3.c: Likewise.
|
||||
* mf-impl.h: Likewise.
|
||||
* mf-runtime.c: Likewise.
|
||||
* mf-runtime.h: Likewise.
|
||||
|
||||
2005-03-21 Zack Weinberg <zack@codesourcery.com>
|
||||
|
||||
* configure.ac: Do not invoke TL_AC_GCC_VERSION.
|
||||
|
@ -4,7 +4,7 @@
|
||||
and Graydon Hoare <graydon@redhat.com>
|
||||
|
||||
This file is part of GCC.
|
||||
|
||||
|
||||
GCC is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation; either version 2, or (at your option) any later
|
||||
@ -46,10 +46,10 @@ extern char _end;
|
||||
extern char _start;
|
||||
|
||||
|
||||
/* Run some quick validation of the given region.
|
||||
/* Run some quick validation of the given region.
|
||||
Return -1 / 0 / 1 if the access known-invalid, possibly-valid, or known-valid.
|
||||
*/
|
||||
int
|
||||
int
|
||||
__mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
|
||||
{
|
||||
VERBOSE_TRACE ("mf: heuristic check\n");
|
||||
@ -72,7 +72,7 @@ __mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
|
||||
uintptr_t stack_segment_base = 0;
|
||||
#endif
|
||||
|
||||
VERBOSE_TRACE ("mf: stack estimated as %p-%p\n",
|
||||
VERBOSE_TRACE ("mf: stack estimated as %p-%p\n",
|
||||
(void *) stack_top_guess, (void *) stack_segment_base);
|
||||
|
||||
if (ptr_high <= stack_segment_base &&
|
||||
@ -80,7 +80,7 @@ __mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
|
||||
ptr_high >= ptr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -118,13 +118,13 @@ __mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
|
||||
if (! deja_vu)
|
||||
{
|
||||
/* Time to run the heuristic. Rescan /proc/self/maps; update the
|
||||
entry[] array; XXX: remove expired entries, add new ones.
|
||||
entry[] array; XXX: remove expired entries, add new ones.
|
||||
XXX: Consider entries that have grown (e.g., stack). */
|
||||
char buf[512];
|
||||
char flags[4];
|
||||
void *low, *high;
|
||||
FILE *fp;
|
||||
|
||||
|
||||
fp = fopen ("/proc/self/maps", "r");
|
||||
if (fp)
|
||||
{
|
||||
@ -145,17 +145,17 @@ __mf_heuristic_check (uintptr_t ptr, uintptr_t ptr_high)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VERBOSE_TRACE ("mf: registering region #%d "
|
||||
"%p-%p given %s",
|
||||
i, (void *) low, (void *) high, buf);
|
||||
|
||||
|
||||
__mfu_register ((void *) low, (size_t) (high-low),
|
||||
__MF_TYPE_GUESS,
|
||||
__MF_TYPE_GUESS,
|
||||
"/proc/self/maps segment");
|
||||
|
||||
|
||||
return 0; /* undecided (tending to cachable) */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fclose (fp);
|
||||
|
@ -42,7 +42,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#if !defined(__FreeBSD__) && !defined(__APPLE__)
|
||||
#define _POSIX_SOURCE
|
||||
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
|
||||
#define _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#define _XOPEN_SOURCE
|
||||
#define _BSD_TYPES
|
||||
#define __EXTENSIONS__
|
||||
@ -93,11 +93,11 @@ WRAPPER(void *, malloc, size_t c)
|
||||
void *result;
|
||||
BEGIN_PROTECT (malloc, c);
|
||||
|
||||
size_with_crumple_zones =
|
||||
size_with_crumple_zones =
|
||||
CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
|
||||
__mf_opts.crumple_zone));
|
||||
result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
|
||||
|
||||
|
||||
if (LIKELY(result))
|
||||
{
|
||||
result += __mf_opts.crumple_zone;
|
||||
@ -142,23 +142,23 @@ WRAPPER(void *, calloc, size_t c, size_t n)
|
||||
DECLARE(void *, memset, void *, int, size_t);
|
||||
char *result;
|
||||
BEGIN_PROTECT (calloc, c, n);
|
||||
|
||||
size_with_crumple_zones =
|
||||
|
||||
size_with_crumple_zones =
|
||||
CLAMPADD((c * n), /* XXX: CLAMPMUL */
|
||||
CLAMPADD(__mf_opts.crumple_zone,
|
||||
__mf_opts.crumple_zone));
|
||||
__mf_opts.crumple_zone));
|
||||
result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
|
||||
|
||||
|
||||
if (LIKELY(result))
|
||||
memset (result, 0, size_with_crumple_zones);
|
||||
|
||||
|
||||
if (LIKELY(result))
|
||||
{
|
||||
result += __mf_opts.crumple_zone;
|
||||
__mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
|
||||
/* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -186,7 +186,7 @@ WRAPPER(void *, realloc, void *buf, size_t c)
|
||||
if (LIKELY(buf))
|
||||
base -= __mf_opts.crumple_zone;
|
||||
|
||||
size_with_crumple_zones =
|
||||
size_with_crumple_zones =
|
||||
CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
|
||||
__mf_opts.crumple_zone));
|
||||
result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
|
||||
@ -199,9 +199,9 @@ WRAPPER(void *, realloc, void *buf, size_t c)
|
||||
__mf_opts.wipe_heap = 0;
|
||||
|
||||
if (LIKELY(buf))
|
||||
__mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
|
||||
__mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
|
||||
/* NB: underlying region may have been __MF_TYPE_HEAP. */
|
||||
|
||||
|
||||
if (LIKELY(result))
|
||||
{
|
||||
result += __mf_opts.crumple_zone;
|
||||
@ -235,8 +235,8 @@ WRAPPER(void, free, void *buf)
|
||||
static void *free_queue [__MF_FREEQ_MAX];
|
||||
static unsigned free_ptr = 0;
|
||||
static int freeq_initialized = 0;
|
||||
DECLARE(void, free, void *);
|
||||
|
||||
DECLARE(void, free, void *);
|
||||
|
||||
BEGIN_PROTECT (free, buf);
|
||||
|
||||
if (UNLIKELY(buf == NULL))
|
||||
@ -245,7 +245,7 @@ WRAPPER(void, free, void *buf)
|
||||
LOCKTH ();
|
||||
if (UNLIKELY(!freeq_initialized))
|
||||
{
|
||||
memset (free_queue, 0,
|
||||
memset (free_queue, 0,
|
||||
__MF_FREEQ_MAX * sizeof (void *));
|
||||
freeq_initialized = 1;
|
||||
}
|
||||
@ -270,14 +270,14 @@ WRAPPER(void, free, void *buf)
|
||||
{
|
||||
if (__mf_opts.trace_mf_calls)
|
||||
{
|
||||
VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
|
||||
VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
|
||||
(void *) freeme,
|
||||
__mf_opts.crumple_zone);
|
||||
}
|
||||
CALL_REAL (free, freeme);
|
||||
}
|
||||
}
|
||||
else
|
||||
}
|
||||
else
|
||||
{
|
||||
/* back pointer up a bit to the beginning of crumple zone */
|
||||
char *base = (char *)buf;
|
||||
@ -285,8 +285,8 @@ WRAPPER(void, free, void *buf)
|
||||
if (__mf_opts.trace_mf_calls)
|
||||
{
|
||||
VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
|
||||
(void *) base,
|
||||
(void *) buf,
|
||||
(void *) base,
|
||||
(void *) buf,
|
||||
__mf_opts.crumple_zone);
|
||||
}
|
||||
CALL_REAL (free, base);
|
||||
@ -305,20 +305,20 @@ __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
|
||||
|
||||
|
||||
#undef mmap
|
||||
WRAPPER(void *, mmap,
|
||||
void *start, size_t length, int prot,
|
||||
WRAPPER(void *, mmap,
|
||||
void *start, size_t length, int prot,
|
||||
int flags, int fd, off_t offset)
|
||||
{
|
||||
DECLARE(void *, mmap, void *, size_t, int,
|
||||
DECLARE(void *, mmap, void *, size_t, int,
|
||||
int, int, off_t);
|
||||
void *result;
|
||||
BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
|
||||
|
||||
result = CALL_REAL (mmap, start, length, prot,
|
||||
result = CALL_REAL (mmap, start, length, prot,
|
||||
flags, fd, offset);
|
||||
|
||||
/*
|
||||
VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
|
||||
VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
|
||||
(uintptr_t) start, (uintptr_t) length,
|
||||
(uintptr_t) result);
|
||||
*/
|
||||
@ -363,11 +363,11 @@ WRAPPER(int , munmap, void *start, size_t length)
|
||||
DECLARE(int, munmap, void *, size_t);
|
||||
int result;
|
||||
BEGIN_PROTECT (munmap, start, length);
|
||||
|
||||
|
||||
result = CALL_REAL (munmap, start, length);
|
||||
|
||||
/*
|
||||
VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
|
||||
VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
|
||||
(uintptr_t) start, (uintptr_t) length,
|
||||
(uintptr_t) result);
|
||||
*/
|
||||
@ -387,7 +387,7 @@ WRAPPER(int , munmap, void *start, size_t length)
|
||||
|
||||
|
||||
/* This wrapper is a little different, as it's called indirectly from
|
||||
__mf_fini also to clean up pending allocations. */
|
||||
__mf_fini also to clean up pending allocations. */
|
||||
void *
|
||||
__mf_wrap_alloca_indirect (size_t c)
|
||||
{
|
||||
@ -431,7 +431,7 @@ __mf_wrap_alloca_indirect (size_t c)
|
||||
result = NULL;
|
||||
if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
|
||||
{
|
||||
track = (struct alloca_tracking *) CALL_REAL (malloc,
|
||||
track = (struct alloca_tracking *) CALL_REAL (malloc,
|
||||
sizeof (struct alloca_tracking));
|
||||
if (LIKELY (track != NULL))
|
||||
{
|
||||
@ -451,7 +451,7 @@ __mf_wrap_alloca_indirect (size_t c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#if !defined(__FreeBSD__) && !defined(__APPLE__)
|
||||
#define _POSIX_SOURCE
|
||||
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
|
||||
#define _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#define _XOPEN_SOURCE
|
||||
#define _BSD_TYPES
|
||||
#define __EXTENSIONS__
|
||||
@ -192,7 +192,7 @@ WRAPPER2(char *, strcpy, char *dest, const char *src)
|
||||
|
||||
size_t n = strlen (src);
|
||||
TRACE ("%s\n", __PRETTY_FUNCTION__);
|
||||
MF_VALIDATE_EXTENT(src, CLAMPADD(n, 1), __MF_CHECK_READ, "strcpy src");
|
||||
MF_VALIDATE_EXTENT(src, CLAMPADD(n, 1), __MF_CHECK_READ, "strcpy src");
|
||||
MF_VALIDATE_EXTENT(dest, CLAMPADD(n, 1), __MF_CHECK_WRITE, "strcpy dest");
|
||||
return strcpy (dest, src);
|
||||
}
|
||||
@ -216,7 +216,7 @@ WRAPPER2(char *, strcat, char *dest, const char *src)
|
||||
size_t src_sz;
|
||||
TRACE ("%s\n", __PRETTY_FUNCTION__);
|
||||
dest_sz = strlen (dest);
|
||||
src_sz = strlen (src);
|
||||
src_sz = strlen (src);
|
||||
MF_VALIDATE_EXTENT(src, CLAMPADD(src_sz, 1), __MF_CHECK_READ, "strcat src");
|
||||
MF_VALIDATE_EXTENT(dest, CLAMPADD(dest_sz, CLAMPADD(src_sz, 1)),
|
||||
__MF_CHECK_WRITE, "strcat dest");
|
||||
@ -228,15 +228,15 @@ WRAPPER2(char *, strncat, char *dest, const char *src, size_t n)
|
||||
{
|
||||
|
||||
/* nb: validating the extents (s,n) might be a mistake for two reasons.
|
||||
|
||||
(1) the string s might be shorter than n chars, and n is just a
|
||||
|
||||
(1) the string s might be shorter than n chars, and n is just a
|
||||
poor choice by the programmer. this is not a "true" error in the
|
||||
sense that the call to strncat would still be ok.
|
||||
|
||||
|
||||
(2) we could try to compensate for case (1) by calling strlen(s) and
|
||||
using that as a bound for the extent to verify, but strlen might fall off
|
||||
the end of a non-terminated string, leading to a false positive.
|
||||
|
||||
|
||||
so we will call strnlen(s,n) and use that as a bound.
|
||||
|
||||
if strnlen returns a length beyond the end of the registered extent
|
||||
@ -265,7 +265,7 @@ WRAPPER2(int, strcmp, const char *s1, const char *s2)
|
||||
size_t s2_sz;
|
||||
TRACE ("%s\n", __PRETTY_FUNCTION__);
|
||||
s1_sz = strlen (s1);
|
||||
s2_sz = strlen (s2);
|
||||
s2_sz = strlen (s2);
|
||||
MF_VALIDATE_EXTENT(s1, CLAMPADD(s1_sz, 1), __MF_CHECK_READ, "strcmp 1st arg");
|
||||
MF_VALIDATE_EXTENT(s2, CLAMPADD(s2_sz, 1), __MF_CHECK_WRITE, "strcmp 2nd arg");
|
||||
return strcmp (s1, s2);
|
||||
@ -278,7 +278,7 @@ WRAPPER2(int, strcasecmp, const char *s1, const char *s2)
|
||||
size_t s2_sz;
|
||||
TRACE ("%s\n", __PRETTY_FUNCTION__);
|
||||
s1_sz = strlen (s1);
|
||||
s2_sz = strlen (s2);
|
||||
s2_sz = strlen (s2);
|
||||
MF_VALIDATE_EXTENT(s1, CLAMPADD(s1_sz, 1), __MF_CHECK_READ, "strcasecmp 1st arg");
|
||||
MF_VALIDATE_EXTENT(s2, CLAMPADD(s2_sz, 1), __MF_CHECK_READ, "strcasecmp 2nd arg");
|
||||
return strcasecmp (s1, s2);
|
||||
@ -318,7 +318,7 @@ WRAPPER2(char *, strdup, const char *s)
|
||||
size_t n = strlen (s);
|
||||
TRACE ("%s\n", __PRETTY_FUNCTION__);
|
||||
MF_VALIDATE_EXTENT(s, CLAMPADD(n,1), __MF_CHECK_READ, "strdup region");
|
||||
result = (char *)CALL_REAL(malloc,
|
||||
result = (char *)CALL_REAL(malloc,
|
||||
CLAMPADD(CLAMPADD(n,1),
|
||||
CLAMPADD(__mf_opts.crumple_zone,
|
||||
__mf_opts.crumple_zone)));
|
||||
@ -343,11 +343,11 @@ WRAPPER2(char *, strndup, const char *s, size_t n)
|
||||
MF_VALIDATE_EXTENT(s, sz, __MF_CHECK_READ, "strndup region"); /* nb: strNdup */
|
||||
|
||||
/* note: strndup still adds a \0, even with the N limit! */
|
||||
result = (char *)CALL_REAL(malloc,
|
||||
result = (char *)CALL_REAL(malloc,
|
||||
CLAMPADD(CLAMPADD(n,1),
|
||||
CLAMPADD(__mf_opts.crumple_zone,
|
||||
__mf_opts.crumple_zone)));
|
||||
|
||||
|
||||
if (UNLIKELY(! result)) return result;
|
||||
|
||||
result += __mf_opts.crumple_zone;
|
||||
@ -393,7 +393,7 @@ WRAPPER2(char *, strstr, const char *haystack, const char *needle)
|
||||
|
||||
|
||||
#ifdef HAVE_MEMMEM
|
||||
WRAPPER2(void *, memmem,
|
||||
WRAPPER2(void *, memmem,
|
||||
const void *haystack, size_t haystacklen,
|
||||
const void *needle, size_t needlelen)
|
||||
{
|
||||
|
@ -41,7 +41,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#if !defined(__FreeBSD__) && !defined(__APPLE__)
|
||||
#define _POSIX_SOURCE
|
||||
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
|
||||
#define _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#define _XOPEN_SOURCE
|
||||
#define _BSD_TYPES
|
||||
#define __EXTENSIONS__
|
||||
@ -125,7 +125,7 @@ static unsigned __mf_pthread_info_idx[LIBMUDFLAPTH_THREADS_MAX];
|
||||
|
||||
/* Find any old empty entry in __mf_pthread_info; mark it used and
|
||||
return it. Return NULL if there are no more available slots. */
|
||||
struct pthread_info*
|
||||
struct pthread_info*
|
||||
__mf_allocate_blank_threadinfo (unsigned* idx)
|
||||
{
|
||||
static unsigned probe = LIBMUDFLAPTH_THREADS_MAX-1;
|
||||
@ -158,7 +158,7 @@ __mf_allocate_blank_threadinfo (unsigned* idx)
|
||||
}
|
||||
}
|
||||
while (probe != probe_at_start);
|
||||
|
||||
|
||||
rc = pthread_mutex_unlock (& mutex);
|
||||
assert (rc == 0);
|
||||
return NULL;
|
||||
@ -177,7 +177,7 @@ __mf_allocate_blank_threadinfo (unsigned* idx)
|
||||
from this context, since a new thread might just be "booting up",
|
||||
making printf unsafe to call.
|
||||
*/
|
||||
static struct pthread_info*
|
||||
static struct pthread_info*
|
||||
__mf_find_threadinfo ()
|
||||
{
|
||||
pthread_t it = pthread_self ();
|
||||
@ -197,14 +197,14 @@ __mf_find_threadinfo ()
|
||||
else for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
|
||||
{
|
||||
struct pthread_info* pi2 = & __mf_pthread_info [i];
|
||||
if (pi2->used_p && pi2->self == it)
|
||||
if (pi2->used_p && pi2->self == it)
|
||||
{
|
||||
*hash = i;
|
||||
result = pi2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result == NULL)
|
||||
{
|
||||
@ -247,7 +247,7 @@ __mf_find_threadinfo ()
|
||||
if (last != it)
|
||||
{
|
||||
/*
|
||||
VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
|
||||
VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
|
||||
(unsigned) it,
|
||||
(unsigned) *hash);
|
||||
*/
|
||||
@ -271,7 +271,7 @@ __mf_state_perthread ()
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
__mf_pthread_cleanup (void *arg)
|
||||
{
|
||||
struct pthread_info *pi = arg;
|
||||
@ -302,11 +302,11 @@ __mf_pthread_spawner (void *arg)
|
||||
pi->state = active;
|
||||
|
||||
VERBOSE_TRACE ("new user thread\n");
|
||||
|
||||
|
||||
if (__mf_opts.heur_std_data)
|
||||
{
|
||||
pi->thread_errno = & errno;
|
||||
__mf_register (pi->thread_errno, sizeof (int),
|
||||
__mf_register (pi->thread_errno, sizeof (int),
|
||||
__MF_TYPE_GUESS, "errno area (thread)");
|
||||
/* NB: we could use __MF_TYPE_STATIC above, but we guess that
|
||||
the thread errno is coming out of some dynamically allocated
|
||||
@ -327,7 +327,7 @@ __mf_pthread_spawner (void *arg)
|
||||
|
||||
/* Signal the main thread to resume. */
|
||||
psi->thread_info = pi;
|
||||
|
||||
|
||||
result = (*user_fn)(user_arg);
|
||||
}
|
||||
|
||||
@ -345,7 +345,7 @@ __mf_pthread_spawner (void *arg)
|
||||
#if PIC
|
||||
/* A special bootstrap variant. */
|
||||
int
|
||||
__mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
|
||||
__mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
|
||||
void * (*start) (void *), void *arg)
|
||||
{
|
||||
return -1;
|
||||
@ -354,12 +354,12 @@ __mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
|
||||
|
||||
|
||||
#undef pthread_create
|
||||
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
void * (*start) (void *), void *arg)
|
||||
{
|
||||
DECLARE(int, munmap, void *p, size_t l);
|
||||
DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
|
||||
DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
void * (*start) (void *), void *arg);
|
||||
int result;
|
||||
pthread_attr_t override_attr;
|
||||
@ -383,7 +383,7 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
|
||||
/* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
|
||||
(unsigned) pi->self, pi, pi->dead_p); */
|
||||
|
||||
|
||||
/* Delay actual deallocation by a few cycles, try to discourage the
|
||||
race mentioned at the end of __mf_pthread_spawner(). */
|
||||
if (pi->dead_p)
|
||||
@ -452,8 +452,8 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
#endif
|
||||
|
||||
#ifdef MF_MAP_ANON
|
||||
override_stack = CALL_REAL (mmap, NULL, override_stacksize,
|
||||
PROT_READ|PROT_WRITE,
|
||||
override_stack = CALL_REAL (mmap, NULL, override_stacksize,
|
||||
PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MF_MAP_ANON,
|
||||
0, 0);
|
||||
#else
|
||||
@ -465,8 +465,8 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
if (zerofd == -1)
|
||||
override_stack = MAP_FAILED;
|
||||
else
|
||||
override_stack = CALL_REAL (mmap, NULL, override_stacksize,
|
||||
PROT_READ|PROT_WRITE,
|
||||
override_stack = CALL_REAL (mmap, NULL, override_stacksize,
|
||||
PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE, zerofd, 0);
|
||||
}
|
||||
#endif
|
||||
@ -477,7 +477,7 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
return -1;
|
||||
}
|
||||
|
||||
VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
|
||||
VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
|
||||
override_stack, (unsigned long) override_stacksize);
|
||||
|
||||
/* Save the original allocated values for later deallocation. */
|
||||
@ -492,10 +492,10 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
override_stack = (void *)
|
||||
(((uintptr_t) override_stack + override_stacksize - alignment - perturb)
|
||||
& (~(uintptr_t)(alignment-1)));
|
||||
|
||||
|
||||
/* XXX: consider using POSIX2K attr_setstack() */
|
||||
if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
|
||||
pthread_attr_setstacksize (& override_attr,
|
||||
pthread_attr_setstacksize (& override_attr,
|
||||
override_stacksize - alignment - perturb) != 0)
|
||||
{
|
||||
/* This should not happen. */
|
||||
@ -509,12 +509,12 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
{
|
||||
struct pthread_start_info psi;
|
||||
struct pthread_info *pi = NULL;
|
||||
|
||||
|
||||
/* Fill in startup-control fields. */
|
||||
psi.user_fn = start;
|
||||
psi.user_arg = arg;
|
||||
psi.thread_info = NULL;
|
||||
|
||||
|
||||
/* Actually create the thread. */
|
||||
__mf_state = reentrant;
|
||||
result = CALL_REAL (pthread_create, thr, & override_attr,
|
||||
@ -529,7 +529,7 @@ WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
||||
{
|
||||
volatile struct pthread_start_info *psip = & psi;
|
||||
pi = psip->thread_info;
|
||||
if (pi != NULL)
|
||||
if (pi != NULL)
|
||||
break;
|
||||
sched_yield ();
|
||||
}
|
||||
@ -574,7 +574,7 @@ WRAPPER(int, pthread_join, pthread_t thr, void **rc)
|
||||
__mf_state = reentrant;
|
||||
result = CALL_REAL (pthread_join, thr, rc);
|
||||
__mf_state = active;
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
/* Implementation header for mudflap runtime library.
|
||||
Mudflap: narrow-pointer bounds-checking by tree rewriting.
|
||||
Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
Contributed by Frank Ch. Eigler <fche@redhat.com>
|
||||
Mudflap: narrow-pointer bounds-checking by tree rewriting.
|
||||
Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
Contributed by Frank Ch. Eigler <fche@redhat.com>
|
||||
and Graydon Hoare <graydon@redhat.com>
|
||||
|
||||
|
||||
This file is part of GCC.
|
||||
|
||||
GCC is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free
|
||||
Software Foundation; either version 2, or (at your option) any later
|
||||
version.
|
||||
|
||||
|
||||
In addition to the permissions in the GNU General Public License, the
|
||||
Free Software Foundation gives you unlimited permission to link the
|
||||
compiled version of this file into combinations with other programs,
|
||||
@ -19,12 +19,12 @@ from the use of this file. (The General Public License restrictions
|
||||
do apply in other respects; for example, they cover modification of
|
||||
the file, and distribution when not linked into a combine
|
||||
executable.)
|
||||
|
||||
|
||||
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
for more details.
|
||||
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with GCC; see the file COPYING. If not, write to the Free
|
||||
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
@ -82,10 +82,10 @@ typedef __mf_uintptr_t uintptr_t;
|
||||
|
||||
|
||||
|
||||
/* Private functions. */
|
||||
/* Private functions. */
|
||||
|
||||
extern void __mf_violation (void *ptr, size_t sz,
|
||||
uintptr_t pc, const char *location,
|
||||
extern void __mf_violation (void *ptr, size_t sz,
|
||||
uintptr_t pc, const char *location,
|
||||
int type);
|
||||
extern size_t __mf_backtrace (char ***, void *, unsigned);
|
||||
extern int __mf_heuristic_check (uintptr_t, uintptr_t);
|
||||
@ -96,7 +96,7 @@ extern int __mf_heuristic_check (uintptr_t, uintptr_t);
|
||||
|
||||
/* The mf_state type codes describe recursion and initialization order. */
|
||||
|
||||
enum __mf_state_enum { active, reentrant };
|
||||
enum __mf_state_enum { active, reentrant };
|
||||
|
||||
/* The __mf_options structure records optional or tunable aspects of the
|
||||
mudflap library's behavior. There is a single global instance of this
|
||||
@ -124,10 +124,10 @@ struct __mf_options
|
||||
unsigned adapt_cache;
|
||||
|
||||
/* Print list of leaked heap objects on shutdown. */
|
||||
unsigned print_leaks;
|
||||
unsigned print_leaks;
|
||||
|
||||
/* Detect reads of uninitialized objects. */
|
||||
unsigned check_initialization;
|
||||
unsigned check_initialization;
|
||||
|
||||
/* Print verbose description of violations. */
|
||||
unsigned verbose_violations;
|
||||
@ -142,7 +142,7 @@ struct __mf_options
|
||||
unsigned wipe_stack;
|
||||
unsigned wipe_heap;
|
||||
|
||||
/* Maintain a queue of this many deferred free()s,
|
||||
/* Maintain a queue of this many deferred free()s,
|
||||
to trap use of freed memory. */
|
||||
unsigned free_queue_length;
|
||||
|
||||
@ -179,7 +179,7 @@ struct __mf_options
|
||||
/* How to handle a violation. */
|
||||
enum
|
||||
{
|
||||
viol_nop, /* Return control to application. */
|
||||
viol_nop, /* Return control to application. */
|
||||
viol_segv, /* Signal self with segv. */
|
||||
viol_abort, /* Call abort (). */
|
||||
viol_gdb /* Fork a debugger on self */
|
||||
@ -208,11 +208,11 @@ struct __mf_dynamic_entry
|
||||
/* The definition of the array (mf-runtime.c) must match the enums! */
|
||||
extern struct __mf_dynamic_entry __mf_dynamic[];
|
||||
enum __mf_dynamic_index
|
||||
{
|
||||
{
|
||||
dyn_calloc, dyn_free, dyn_malloc, dyn_mmap,
|
||||
dyn_munmap, dyn_realloc,
|
||||
dyn_munmap, dyn_realloc,
|
||||
dyn_INITRESOLVE, /* Marker for last init-time resolution. */
|
||||
#ifdef LIBMUDFLAPTH
|
||||
#ifdef LIBMUDFLAPTH
|
||||
dyn_pthread_create,
|
||||
dyn_pthread_join,
|
||||
dyn_pthread_exit
|
||||
@ -242,7 +242,7 @@ extern pthread_mutex_t __mf_biglock;
|
||||
#ifdef LIBMUDFLAPTH
|
||||
extern enum __mf_state_enum *__mf_state_perthread ();
|
||||
#define __mf_state (* __mf_state_perthread ())
|
||||
#else
|
||||
#else
|
||||
extern enum __mf_state_enum __mf_state;
|
||||
#endif
|
||||
extern int __mf_starting_p;
|
||||
@ -285,7 +285,7 @@ extern struct __mf_options __mf_opts;
|
||||
#define __MF_PERSIST_MAX 256
|
||||
#define __MF_FREEQ_MAX 256
|
||||
|
||||
/*
|
||||
/*
|
||||
Wrapping and redirection:
|
||||
|
||||
Mudflap redirects a number of libc functions into itself, for "cheap"
|
||||
@ -293,7 +293,7 @@ extern struct __mf_options __mf_opts;
|
||||
unregister regions of memory as they are manipulated by the program
|
||||
(eg. malloc/free, mmap/munmap).
|
||||
|
||||
There are two methods of wrapping.
|
||||
There are two methods of wrapping.
|
||||
|
||||
(1) The static method involves a list of -wrap=foo flags being passed to
|
||||
the linker, which then links references to "foo" to the symbol
|
||||
|
@ -38,7 +38,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
|
||||
#if !defined(__FreeBSD__) && !defined(__APPLE__)
|
||||
#define _POSIX_SOURCE
|
||||
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
|
||||
#define _GNU_SOURCE
|
||||
#define _GNU_SOURCE
|
||||
#define _XOPEN_SOURCE
|
||||
#define _BSD_TYPES
|
||||
#define __EXTENSIONS__
|
||||
@ -263,11 +263,11 @@ static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX
|
||||
|
||||
void __mf_init () CTOR;
|
||||
static void __mf_sigusr1_respond ();
|
||||
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
__mf_object_t **objs, unsigned max_objs);
|
||||
static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
__mf_object_t **objs, unsigned max_objs, int type);
|
||||
static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
__mf_object_t **objs, unsigned max_objs);
|
||||
static void __mf_adapt_cache ();
|
||||
static void __mf_describe_object (__mf_object_t *obj);
|
||||
@ -312,41 +312,41 @@ static struct option
|
||||
} type;
|
||||
unsigned value;
|
||||
unsigned *target;
|
||||
}
|
||||
}
|
||||
options [] =
|
||||
{
|
||||
{"mode-nop",
|
||||
"mudflaps do nothing",
|
||||
set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
{"mode-populate",
|
||||
"mudflaps populate object tree",
|
||||
set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
{"mode-check",
|
||||
{"mode-nop",
|
||||
"mudflaps do nothing",
|
||||
set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
{"mode-populate",
|
||||
"mudflaps populate object tree",
|
||||
set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
{"mode-check",
|
||||
"mudflaps check for memory violations",
|
||||
set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
{"mode-violate",
|
||||
{"mode-violate",
|
||||
"mudflaps always cause violations (diagnostic)",
|
||||
set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode},
|
||||
|
||||
{"viol-nop",
|
||||
|
||||
{"viol-nop",
|
||||
"violations do not change program execution",
|
||||
set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode},
|
||||
{"viol-abort",
|
||||
{"viol-abort",
|
||||
"violations cause a call to abort()",
|
||||
set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode},
|
||||
{"viol-segv",
|
||||
{"viol-segv",
|
||||
"violations are promoted to SIGSEGV signals",
|
||||
set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode},
|
||||
{"viol-gdb",
|
||||
{"viol-gdb",
|
||||
"violations fork a gdb process attached to current program",
|
||||
set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode},
|
||||
{"trace-calls",
|
||||
{"trace-calls",
|
||||
"trace calls to mudflap runtime library",
|
||||
set_option, 1, &__mf_opts.trace_mf_calls},
|
||||
{"verbose-trace",
|
||||
{"verbose-trace",
|
||||
"trace internal events within mudflap runtime library",
|
||||
set_option, 1, &__mf_opts.verbose_trace},
|
||||
{"collect-stats",
|
||||
{"collect-stats",
|
||||
"collect statistics on mudflap's operation",
|
||||
set_option, 1, &__mf_opts.collect_stats},
|
||||
#ifdef SIGUSR1
|
||||
@ -354,25 +354,25 @@ options [] =
|
||||
"print report upon SIGUSR1",
|
||||
set_option, 1, &__mf_opts.sigusr1_report},
|
||||
#endif
|
||||
{"internal-checking",
|
||||
{"internal-checking",
|
||||
"perform more expensive internal checking",
|
||||
set_option, 1, &__mf_opts.internal_checking},
|
||||
{"print-leaks",
|
||||
{"print-leaks",
|
||||
"print any memory leaks at program shutdown",
|
||||
set_option, 1, &__mf_opts.print_leaks},
|
||||
{"check-initialization",
|
||||
{"check-initialization",
|
||||
"detect uninitialized object reads",
|
||||
set_option, 1, &__mf_opts.check_initialization},
|
||||
{"verbose-violations",
|
||||
{"verbose-violations",
|
||||
"print verbose messages when memory violations occur",
|
||||
set_option, 1, &__mf_opts.verbose_violations},
|
||||
{"abbreviate",
|
||||
{"abbreviate",
|
||||
"abbreviate repetitive listings",
|
||||
set_option, 1, &__mf_opts.abbreviate},
|
||||
{"timestamps",
|
||||
{"timestamps",
|
||||
"track object lifetime timestamps",
|
||||
set_option, 1, &__mf_opts.timestamps},
|
||||
{"ignore-reads",
|
||||
{"ignore-reads",
|
||||
"ignore read accesses - assume okay",
|
||||
set_option, 1, &__mf_opts.ignore_reads},
|
||||
{"wipe-stack",
|
||||
@ -381,43 +381,43 @@ options [] =
|
||||
{"wipe-heap",
|
||||
"wipe heap objects at free",
|
||||
set_option, 1, &__mf_opts.wipe_heap},
|
||||
{"heur-proc-map",
|
||||
{"heur-proc-map",
|
||||
"support /proc/self/map heuristics",
|
||||
set_option, 1, &__mf_opts.heur_proc_map},
|
||||
{"heur-stack-bound",
|
||||
"enable a simple upper stack bound heuristic",
|
||||
set_option, 1, &__mf_opts.heur_stack_bound},
|
||||
{"heur-start-end",
|
||||
{"heur-start-end",
|
||||
"support _start.._end heuristics",
|
||||
set_option, 1, &__mf_opts.heur_start_end},
|
||||
{"heur-stdlib",
|
||||
{"heur-stdlib",
|
||||
"register standard library data (argv, errno, stdin, ...)",
|
||||
set_option, 1, &__mf_opts.heur_std_data},
|
||||
{"free-queue-length",
|
||||
{"free-queue-length",
|
||||
"queue N deferred free() calls before performing them",
|
||||
read_integer_option, 0, &__mf_opts.free_queue_length},
|
||||
{"persistent-count",
|
||||
{"persistent-count",
|
||||
"keep a history of N unregistered regions",
|
||||
read_integer_option, 0, &__mf_opts.persistent_count},
|
||||
{"crumple-zone",
|
||||
{"crumple-zone",
|
||||
"surround allocations with crumple zones of N bytes",
|
||||
read_integer_option, 0, &__mf_opts.crumple_zone},
|
||||
/* XXX: not type-safe.
|
||||
{"lc-mask",
|
||||
{"lc-mask",
|
||||
"set lookup cache size mask to N (2**M - 1)",
|
||||
read_integer_option, 0, (int *)(&__mf_lc_mask)},
|
||||
{"lc-shift",
|
||||
{"lc-shift",
|
||||
"set lookup cache pointer shift",
|
||||
read_integer_option, 0, (int *)(&__mf_lc_shift)},
|
||||
*/
|
||||
{"lc-adapt",
|
||||
{"lc-adapt",
|
||||
"adapt mask/shift parameters after N cache misses",
|
||||
read_integer_option, 1, &__mf_opts.adapt_cache},
|
||||
{"backtrace",
|
||||
{"backtrace",
|
||||
"keep an N-level stack trace of each call context",
|
||||
read_integer_option, 0, &__mf_opts.backtrace},
|
||||
#ifdef LIBMUDFLAPTH
|
||||
{"thread-stack",
|
||||
{"thread-stack",
|
||||
"override thread stacks allocation: N kB",
|
||||
read_integer_option, 0, &__mf_opts.thread_stack},
|
||||
#endif
|
||||
@ -429,7 +429,7 @@ __mf_usage ()
|
||||
{
|
||||
struct option *opt;
|
||||
|
||||
fprintf (stderr,
|
||||
fprintf (stderr,
|
||||
"This is a %s%sGCC \"mudflap\" memory-checked binary.\n"
|
||||
"Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n"
|
||||
"\n"
|
||||
@ -473,7 +473,7 @@ __mf_usage ()
|
||||
strncpy (buf + strlen (opt->name), "=N", 2);
|
||||
fprintf (stderr, "-%-23.23s %s", buf, opt->description);
|
||||
fprintf (stderr, " [%d]\n", * opt->target);
|
||||
break;
|
||||
break;
|
||||
default: abort();
|
||||
}
|
||||
}
|
||||
@ -482,7 +482,7 @@ __mf_usage ()
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
int
|
||||
__mf_set_options (const char *optstr)
|
||||
{
|
||||
int rc;
|
||||
@ -490,7 +490,7 @@ __mf_set_options (const char *optstr)
|
||||
BEGIN_RECURSION_PROTECT ();
|
||||
rc = __mfu_set_options (optstr);
|
||||
/* XXX: It's not really that easy. A change to a bunch of parameters
|
||||
can require updating auxiliary state or risk crashing:
|
||||
can require updating auxiliary state or risk crashing:
|
||||
free_queue_length, crumple_zone ... */
|
||||
END_RECURSION_PROTECT ();
|
||||
UNLOCKTH ();
|
||||
@ -498,7 +498,7 @@ __mf_set_options (const char *optstr)
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
int
|
||||
__mfu_set_options (const char *optstr)
|
||||
{
|
||||
struct option *opts = 0;
|
||||
@ -520,30 +520,30 @@ __mfu_set_options (const char *optstr)
|
||||
|
||||
case '-':
|
||||
if (*optstr+1)
|
||||
{
|
||||
{
|
||||
int negate = 0;
|
||||
optstr++;
|
||||
|
||||
if (*optstr == '?' ||
|
||||
if (*optstr == '?' ||
|
||||
strncmp (optstr, "help", 4) == 0)
|
||||
{
|
||||
/* Caller will print help and exit. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
if (strncmp (optstr, "no-", 3) == 0)
|
||||
{
|
||||
negate = 1;
|
||||
optstr = & optstr[3];
|
||||
}
|
||||
|
||||
|
||||
for (opts = options; opts->name; opts++)
|
||||
{
|
||||
if (strncmp (optstr, opts->name, strlen (opts->name)) == 0)
|
||||
{
|
||||
optstr += strlen (opts->name);
|
||||
assert (opts->target);
|
||||
switch (opts->type)
|
||||
switch (opts->type)
|
||||
{
|
||||
case set_option:
|
||||
if (negate)
|
||||
@ -558,7 +558,7 @@ __mfu_set_options (const char *optstr)
|
||||
tmp = strtol (optstr, &nxt, 10);
|
||||
if ((optstr != nxt) && (tmp != LONG_MAX))
|
||||
{
|
||||
optstr = nxt;
|
||||
optstr = nxt;
|
||||
*(opts->target) = (int)tmp;
|
||||
}
|
||||
}
|
||||
@ -570,9 +570,9 @@ __mfu_set_options (const char *optstr)
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
default:
|
||||
fprintf (stderr,
|
||||
fprintf (stderr,
|
||||
"warning: unrecognized string '%s' in mudflap options\n",
|
||||
optstr);
|
||||
optstr += strlen (optstr);
|
||||
@ -602,7 +602,7 @@ __mfu_set_options (const char *optstr)
|
||||
|
||||
#ifdef PIC
|
||||
|
||||
void
|
||||
void
|
||||
__mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
|
||||
{
|
||||
char *err;
|
||||
@ -616,7 +616,7 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
|
||||
else
|
||||
#endif
|
||||
e->pointer = dlsym (RTLD_NEXT, e->name);
|
||||
|
||||
|
||||
err = dlerror ();
|
||||
|
||||
if (err)
|
||||
@ -624,7 +624,7 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
|
||||
fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n",
|
||||
e->name, err);
|
||||
abort ();
|
||||
}
|
||||
}
|
||||
if (! e->pointer)
|
||||
{
|
||||
fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name);
|
||||
@ -633,8 +633,8 @@ __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
__mf_resolve_dynamics ()
|
||||
static void
|
||||
__mf_resolve_dynamics ()
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < dyn_INITRESOLVE; i++)
|
||||
@ -819,7 +819,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n",
|
||||
ptr, entry_idx, (unsigned long)sz,
|
||||
(type == 0 ? "read" : "write"), location);
|
||||
|
||||
|
||||
switch (__mf_opts.mudflap_mode)
|
||||
{
|
||||
case mode_nop:
|
||||
@ -842,7 +842,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
case mode_check:
|
||||
{
|
||||
unsigned heuristics = 0;
|
||||
|
||||
|
||||
/* Advance aging/adaptation counters. */
|
||||
static unsigned adapt_count;
|
||||
adapt_count ++;
|
||||
@ -852,7 +852,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
adapt_count = 0;
|
||||
__mf_adapt_cache ();
|
||||
}
|
||||
|
||||
|
||||
/* Looping only occurs if heuristics were triggered. */
|
||||
while (judgement == 0)
|
||||
{
|
||||
@ -877,7 +877,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
assert (n == obj_count);
|
||||
dealloc_me = all_ovr_obj;
|
||||
}
|
||||
else
|
||||
else
|
||||
{
|
||||
all_ovr_obj = ovr_obj;
|
||||
dealloc_me = NULL;
|
||||
@ -894,7 +894,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
obj->write_count ++;
|
||||
obj->liveness ++;
|
||||
}
|
||||
|
||||
|
||||
/* Iterate over the various objects. There are a number of special cases. */
|
||||
for (i = 0; i < obj_count; i++)
|
||||
{
|
||||
@ -907,7 +907,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
/* Any object with a watch flag is bad. */
|
||||
if (UNLIKELY (obj->watching_p))
|
||||
judgement = -2; /* trigger VIOL_WATCH */
|
||||
|
||||
|
||||
/* A read from an uninitialized object is bad. */
|
||||
if (UNLIKELY (__mf_opts.check_initialization
|
||||
/* reading */
|
||||
@ -924,7 +924,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
for (i = 0; i < obj_count; i++)
|
||||
{
|
||||
__mf_object_t *obj = all_ovr_obj[i];
|
||||
|
||||
|
||||
/* Is this access entirely contained within this object? */
|
||||
if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high))
|
||||
{
|
||||
@ -961,7 +961,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
if (i == j) continue;
|
||||
|
||||
/* Filter out objects that cannot be spanned across. */
|
||||
if (obj2->type == __MF_TYPE_STACK
|
||||
if (obj2->type == __MF_TYPE_STACK
|
||||
|| obj2->type == __MF_TYPE_STATIC)
|
||||
continue;
|
||||
|
||||
@ -974,7 +974,7 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
&& (ptr_high >= obj2->low && ptr_higher <= obj2->high))
|
||||
uncovered_high_p = 0;
|
||||
}
|
||||
|
||||
|
||||
if (uncovered_low_p || uncovered_high_p)
|
||||
uncovered ++;
|
||||
}
|
||||
@ -1010,23 +1010,23 @@ void __mfu_check (void *ptr, size_t sz, int type, const char *location)
|
||||
if (__mf_opts.collect_stats)
|
||||
{
|
||||
__mf_count_check ++;
|
||||
|
||||
|
||||
if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high))
|
||||
/* && (old_entry.low != 0) && (old_entry.high != 0)) */
|
||||
__mf_lookup_cache_reusecount [entry_idx] ++;
|
||||
__mf_lookup_cache_reusecount [entry_idx] ++;
|
||||
}
|
||||
|
||||
|
||||
if (UNLIKELY (judgement < 0))
|
||||
__mf_violation (ptr, sz,
|
||||
(uintptr_t) __builtin_return_address (0), location,
|
||||
((judgement == -1) ?
|
||||
((judgement == -1) ?
|
||||
(type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) :
|
||||
__MF_VIOL_WATCH));
|
||||
}
|
||||
|
||||
|
||||
static __mf_object_t *
|
||||
__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
|
||||
__mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
|
||||
const char *name, uintptr_t pc)
|
||||
{
|
||||
DECLARE (void *, calloc, size_t c, size_t n);
|
||||
@ -1047,20 +1047,20 @@ __mf_insert_new_object (uintptr_t low, uintptr_t high, int type,
|
||||
#endif
|
||||
|
||||
if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I))
|
||||
new_obj->alloc_backtrace_size =
|
||||
new_obj->alloc_backtrace_size =
|
||||
__mf_backtrace (& new_obj->alloc_backtrace,
|
||||
(void *) pc, 2);
|
||||
|
||||
|
||||
__mf_link_object (new_obj);
|
||||
return new_obj;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
__mf_uncache_object (__mf_object_t *old_obj)
|
||||
{
|
||||
/* Remove any low/high pointers for this object from the lookup cache. */
|
||||
|
||||
|
||||
/* Can it possibly exist in the cache? */
|
||||
if (LIKELY (old_obj->read_count + old_obj->write_count))
|
||||
{
|
||||
@ -1074,7 +1074,7 @@ __mf_uncache_object (__mf_object_t *old_obj)
|
||||
struct __mf_cache *entry = & __mf_lookup_cache [i];
|
||||
/* NB: the "||" in the following test permits this code to
|
||||
tolerate the situation introduced by __mf_check over
|
||||
contiguous objects, where a cache entry spans several
|
||||
contiguous objects, where a cache entry spans several
|
||||
objects. */
|
||||
if (entry->low == low || entry->high == high)
|
||||
{
|
||||
@ -1100,14 +1100,14 @@ __mf_register (void *ptr, size_t sz, int type, const char *name)
|
||||
void
|
||||
__mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
{
|
||||
TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
|
||||
TRACE ("register ptr=%p size=%lu type=%x name='%s'\n",
|
||||
ptr, (unsigned long) sz, type, name ? name : "");
|
||||
|
||||
if (__mf_opts.collect_stats)
|
||||
{
|
||||
__mf_count_register ++;
|
||||
__mf_total_register_size [(type < 0) ? 0 :
|
||||
(type > __MF_TYPE_MAX) ? 0 :
|
||||
(type > __MF_TYPE_MAX) ? 0 :
|
||||
type] += sz;
|
||||
}
|
||||
|
||||
@ -1118,7 +1118,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
{
|
||||
case mode_nop:
|
||||
break;
|
||||
|
||||
|
||||
case mode_violate:
|
||||
__mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL,
|
||||
__MF_VIOL_REGISTER);
|
||||
@ -1140,7 +1140,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
uintptr_t low = (uintptr_t) ptr;
|
||||
uintptr_t high = CLAMPSZ (ptr, sz);
|
||||
uintptr_t pc = (uintptr_t) __builtin_return_address (0);
|
||||
|
||||
|
||||
/* Treat unknown size indication as 1. */
|
||||
if (UNLIKELY (sz == 0)) sz = 1;
|
||||
|
||||
@ -1153,7 +1153,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
if (UNLIKELY (num_overlapping_objs > 0))
|
||||
{
|
||||
__mf_object_t *ovr_obj = ovr_objs[0];
|
||||
|
||||
|
||||
/* Accept certain specific duplication pairs. */
|
||||
if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS))
|
||||
&& ovr_obj->low == low
|
||||
@ -1162,8 +1162,8 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
{
|
||||
/* Duplicate registration for static objects may come
|
||||
from distinct compilation units. */
|
||||
VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
|
||||
(void *) low, (void *) high,
|
||||
VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n",
|
||||
(void *) low, (void *) high,
|
||||
(ovr_obj->name ? ovr_obj->name : ""));
|
||||
break;
|
||||
}
|
||||
@ -1179,7 +1179,7 @@ __mfu_register (void *ptr, size_t sz, int type, const char *name)
|
||||
}
|
||||
else /* No overlapping objects: AOK. */
|
||||
__mf_insert_new_object (low, high, type, name, pc);
|
||||
|
||||
|
||||
/* We could conceivably call __mf_check() here to prime the cache,
|
||||
but then the read_count/write_count field is not reliable. */
|
||||
break;
|
||||
@ -1210,7 +1210,7 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type);
|
||||
|
||||
switch (__mf_opts.mudflap_mode)
|
||||
{
|
||||
{
|
||||
case mode_nop:
|
||||
break;
|
||||
|
||||
@ -1264,17 +1264,17 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
|
||||
/* Wipe buffer contents if desired. */
|
||||
if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK)
|
||||
|| (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
|
||||
|| (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP
|
||||
|| old_obj->type == __MF_TYPE_HEAP_I)))
|
||||
{
|
||||
memset ((void *) old_obj->low,
|
||||
0,
|
||||
(size_t) (old_obj->high - old_obj->low + 1));
|
||||
}
|
||||
|
||||
|
||||
/* Manage the object cemetary. */
|
||||
if (__mf_opts.persistent_count > 0 &&
|
||||
old_obj->type >= 0 &&
|
||||
if (__mf_opts.persistent_count > 0 &&
|
||||
old_obj->type >= 0 &&
|
||||
old_obj->type <= __MF_TYPE_MAX_CEM)
|
||||
{
|
||||
old_obj->deallocated_p = 1;
|
||||
@ -1288,7 +1288,7 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
#endif
|
||||
|
||||
if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP)
|
||||
old_obj->dealloc_backtrace_size =
|
||||
old_obj->dealloc_backtrace_size =
|
||||
__mf_backtrace (& old_obj->dealloc_backtrace,
|
||||
NULL, 2);
|
||||
|
||||
@ -1300,7 +1300,7 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
{
|
||||
unsigned row = old_obj->type;
|
||||
unsigned plot = __mf_object_dead_head [row];
|
||||
|
||||
|
||||
del_obj = __mf_object_cemetary [row][plot];
|
||||
__mf_object_cemetary [row][plot] = old_obj;
|
||||
plot ++;
|
||||
@ -1310,20 +1310,20 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
}
|
||||
else
|
||||
del_obj = old_obj;
|
||||
|
||||
|
||||
if (__mf_opts.print_leaks)
|
||||
{
|
||||
if ((old_obj->read_count + old_obj->write_count) == 0 &&
|
||||
(old_obj->type == __MF_TYPE_HEAP
|
||||
(old_obj->type == __MF_TYPE_HEAP
|
||||
|| old_obj->type == __MF_TYPE_HEAP_I))
|
||||
{
|
||||
fprintf (stderr,
|
||||
fprintf (stderr,
|
||||
"*******\n"
|
||||
"mudflap warning: unaccessed registered object:\n");
|
||||
__mf_describe_object (old_obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (del_obj != NULL) /* May or may not equal old_obj. */
|
||||
{
|
||||
if (__mf_opts.backtrace > 0)
|
||||
@ -1336,7 +1336,7 @@ __mfu_unregister (void *ptr, size_t sz, int type)
|
||||
}
|
||||
CALL_REAL(free, del_obj);
|
||||
}
|
||||
|
||||
|
||||
break;
|
||||
}
|
||||
} /* end switch (__mf_opts.mudflap_mode) */
|
||||
@ -1370,7 +1370,7 @@ __mf_adapt_cache_fn (mfsplay_tree_node n, void *param)
|
||||
struct tree_stats *s = (struct tree_stats *) param;
|
||||
|
||||
assert (obj != NULL && s != NULL);
|
||||
|
||||
|
||||
/* Exclude never-accessed objects. */
|
||||
if (obj->read_count + obj->write_count)
|
||||
{
|
||||
@ -1449,7 +1449,7 @@ __mf_adapt_cache ()
|
||||
break;
|
||||
}
|
||||
if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift;
|
||||
/* Converge toward this slowly to reduce flapping. */
|
||||
/* Converge toward this slowly to reduce flapping. */
|
||||
smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i;
|
||||
new_shift = (unsigned) (smoothed_new_shift + 0.5);
|
||||
assert (new_shift < sizeof (uintptr_t)*8);
|
||||
@ -1490,8 +1490,8 @@ __mf_adapt_cache ()
|
||||
max_objs of their pointers in objs[]. Return total count of
|
||||
overlaps (may exceed max_objs). */
|
||||
|
||||
unsigned
|
||||
__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
unsigned
|
||||
__mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
__mf_object_t **objs, unsigned max_objs, int type)
|
||||
{
|
||||
unsigned count = 0;
|
||||
@ -1517,14 +1517,14 @@ __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high,
|
||||
while (1)
|
||||
{
|
||||
__mf_object_t *obj;
|
||||
|
||||
|
||||
n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k));
|
||||
if (n == NULL) break;
|
||||
obj = (__mf_object_t *) n->value;
|
||||
|
||||
|
||||
if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */
|
||||
break;
|
||||
|
||||
|
||||
if (count < max_objs)
|
||||
objs[count] = (__mf_object_t *) n->value;
|
||||
count ++;
|
||||
@ -1599,31 +1599,31 @@ __mf_find_dead_objects (uintptr_t low, uintptr_t high,
|
||||
unsigned count = 0;
|
||||
unsigned recollection = 0;
|
||||
unsigned row = 0;
|
||||
|
||||
|
||||
assert (low <= high);
|
||||
assert (max_objs == 0 || objs != NULL);
|
||||
|
||||
|
||||
/* Widen the search from the most recent plots in each row, looking
|
||||
backward in time. */
|
||||
recollection = 0;
|
||||
while (recollection < __mf_opts.persistent_count)
|
||||
{
|
||||
count = 0;
|
||||
|
||||
|
||||
for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++)
|
||||
{
|
||||
unsigned plot;
|
||||
unsigned i;
|
||||
|
||||
|
||||
plot = __mf_object_dead_head [row];
|
||||
for (i = 0; i <= recollection; i ++)
|
||||
{
|
||||
__mf_object_t *obj;
|
||||
|
||||
|
||||
/* Look backward through row: it's a circular buffer. */
|
||||
if (plot > 0) plot --;
|
||||
else plot = __mf_opts.persistent_count - 1;
|
||||
|
||||
|
||||
obj = __mf_object_cemetary [row][plot];
|
||||
if (obj && obj->low <= high && obj->high >= low)
|
||||
{
|
||||
@ -1634,14 +1634,14 @@ __mf_find_dead_objects (uintptr_t low, uintptr_t high,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (count)
|
||||
break;
|
||||
|
||||
|
||||
/* Look farther back in time. */
|
||||
recollection = (recollection * 2) + 1;
|
||||
}
|
||||
|
||||
|
||||
return count;
|
||||
} else {
|
||||
return 0;
|
||||
@ -1680,7 +1680,7 @@ __mf_describe_object (__mf_object_t *obj)
|
||||
#endif
|
||||
"\n",
|
||||
(obj->deallocated_p ? "dead " : ""),
|
||||
(void *) obj, (obj->name ? obj->name : ""),
|
||||
(void *) obj, (obj->name ? obj->name : ""),
|
||||
(void *) obj->low, (void *) obj->high,
|
||||
(unsigned long) (obj->high - obj->low + 1),
|
||||
(obj->type == __MF_TYPE_NOACCESS ? "no-access" :
|
||||
@ -1690,9 +1690,9 @@ __mf_describe_object (__mf_object_t *obj)
|
||||
obj->type == __MF_TYPE_STATIC ? "static" :
|
||||
obj->type == __MF_TYPE_GUESS ? "guess" :
|
||||
"unknown"),
|
||||
obj->read_count, obj->write_count, obj->liveness,
|
||||
obj->read_count, obj->write_count, obj->liveness,
|
||||
obj->watching_p ? " watching" : "",
|
||||
obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
|
||||
obj->alloc_time.tv_sec, obj->alloc_time.tv_usec,
|
||||
(void *) obj->alloc_pc
|
||||
#ifdef LIBMUDFLAPTH
|
||||
, (unsigned) obj->alloc_thread
|
||||
@ -1715,7 +1715,7 @@ __mf_describe_object (__mf_object_t *obj)
|
||||
" thread=%u"
|
||||
#endif
|
||||
"\n",
|
||||
obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
|
||||
obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec,
|
||||
(void *) obj->dealloc_pc
|
||||
#ifdef LIBMUDFLAPTH
|
||||
, (unsigned) obj->dealloc_thread
|
||||
@ -1904,7 +1904,7 @@ __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
|
||||
ends up containing a non-NULL guess_pc, then trim everything
|
||||
before that. Otherwise, omit the first guess_omit_levels
|
||||
entries. */
|
||||
|
||||
|
||||
if (guess_pc != NULL)
|
||||
for (i=0; i<pc_array_size; i++)
|
||||
if (pc_array [i] == guess_pc)
|
||||
@ -1948,15 +1948,15 @@ __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels)
|
||||
/* __mf_violation */
|
||||
|
||||
void
|
||||
__mf_violation (void *ptr, size_t sz, uintptr_t pc,
|
||||
__mf_violation (void *ptr, size_t sz, uintptr_t pc,
|
||||
const char *location, int type)
|
||||
{
|
||||
char buf [128];
|
||||
static unsigned violation_number;
|
||||
DECLARE(void, free, void *ptr);
|
||||
|
||||
TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
|
||||
(void *) pc,
|
||||
TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n",
|
||||
(void *) pc,
|
||||
(location != NULL ? location : ""), type, ptr, (unsigned long) sz);
|
||||
|
||||
if (__mf_opts.collect_stats)
|
||||
@ -1978,14 +1978,14 @@ __mf_violation (void *ptr, size_t sz, uintptr_t pc,
|
||||
fprintf (stderr,
|
||||
"*******\n"
|
||||
"mudflap violation %u (%s): time=%lu.%06lu "
|
||||
"ptr=%p size=%lu\npc=%p%s%s%s\n",
|
||||
"ptr=%p size=%lu\npc=%p%s%s%s\n",
|
||||
violation_number,
|
||||
((type == __MF_VIOL_READ) ? "check/read" :
|
||||
(type == __MF_VIOL_WRITE) ? "check/write" :
|
||||
(type == __MF_VIOL_REGISTER) ? "register" :
|
||||
(type == __MF_VIOL_UNREGISTER) ? "unregister" :
|
||||
(type == __MF_VIOL_WATCH) ? "watch" : "unknown"),
|
||||
now.tv_sec, now.tv_usec,
|
||||
now.tv_sec, now.tv_usec,
|
||||
(void *) ptr, (unsigned long)sz, (void *) pc,
|
||||
(location != NULL ? " location=`" : ""),
|
||||
(location != NULL ? location : ""),
|
||||
@ -1995,26 +1995,26 @@ __mf_violation (void *ptr, size_t sz, uintptr_t pc,
|
||||
{
|
||||
char ** symbols;
|
||||
unsigned i, num;
|
||||
|
||||
|
||||
num = __mf_backtrace (& symbols, (void *) pc, 2);
|
||||
/* Note: backtrace_symbols calls malloc(). But since we're in
|
||||
__mf_violation and presumably __mf_check, it'll detect
|
||||
recursion, and not put the new string into the database. */
|
||||
|
||||
|
||||
for (i=0; i<num; i++)
|
||||
fprintf (stderr, " %s\n", symbols[i]);
|
||||
|
||||
|
||||
/* Calling free() here would trigger a violation. */
|
||||
CALL_REAL(free, symbols);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* Look for nearby objects. For this, we start with s_low/s_high
|
||||
pointing to the given area, looking for overlapping objects.
|
||||
If none show up, widen the search area and keep looking. */
|
||||
|
||||
|
||||
if (sz == 0) sz = 1;
|
||||
|
||||
|
||||
for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */
|
||||
{
|
||||
enum {max_objs = 3}; /* magic */
|
||||
@ -2023,7 +2023,7 @@ __mf_violation (void *ptr, size_t sz, uintptr_t pc,
|
||||
uintptr_t s_low, s_high;
|
||||
unsigned tries = 0;
|
||||
unsigned i;
|
||||
|
||||
|
||||
s_low = (uintptr_t) ptr;
|
||||
s_high = CLAMPSZ (ptr, sz);
|
||||
|
||||
@ -2131,7 +2131,7 @@ __mf_watch_or_not (void *ptr, size_t sz, char flag)
|
||||
|
||||
TRACE ("%s ptr=%p size=%lu\n",
|
||||
(flag ? "watch" : "unwatch"), ptr, (unsigned long) sz);
|
||||
|
||||
|
||||
switch (__mf_opts.mudflap_mode)
|
||||
{
|
||||
case mode_nop:
|
||||
@ -2235,7 +2235,7 @@ write_itoa (int fd, unsigned n)
|
||||
unsigned digit = n % 10;
|
||||
buf[bufsize-2-i] = digit + '0';
|
||||
n /= 10;
|
||||
if (n == 0)
|
||||
if (n == 0)
|
||||
{
|
||||
char *m = & buf [bufsize-2-i];
|
||||
buf[bufsize-1] = '\0';
|
||||
@ -2253,7 +2253,7 @@ __assert_fail (const char *msg, const char *file, unsigned line, const char *fun
|
||||
write2("mf");
|
||||
#ifdef LIBMUDFLAPTH
|
||||
write2("(");
|
||||
write_itoa (2, (unsigned) pthread_self ());
|
||||
write_itoa (2, (unsigned) pthread_self ());
|
||||
write2(")");
|
||||
#endif
|
||||
write2(": assertion failure: `");
|
||||
@ -2497,9 +2497,9 @@ mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key)
|
||||
degenerate access patterns. Unfortunately such patterns can occur
|
||||
e.g. during static initialization, where many static objects might
|
||||
be registered in increasing address sequence, or during a case where
|
||||
large tree-like heap data structures are allocated quickly.
|
||||
large tree-like heap data structures are allocated quickly.
|
||||
|
||||
On x86, this corresponds to roughly 200K of stack usage.
|
||||
On x86, this corresponds to roughly 200K of stack usage.
|
||||
XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */
|
||||
sp->max_depth = 2500;
|
||||
sp->rebalance_p = sp->depth = 0;
|
||||
@ -2609,7 +2609,7 @@ mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
|
||||
if (left)
|
||||
{
|
||||
sp->root = left;
|
||||
/* If there was a right child as well, hang it off the
|
||||
/* If there was a right child as well, hang it off the
|
||||
right-most leaf of the left child. */
|
||||
if (right)
|
||||
{
|
||||
@ -2623,7 +2623,7 @@ mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key)
|
||||
}
|
||||
}
|
||||
|
||||
/* Lookup KEY in SP, returning VALUE if present, and NULL
|
||||
/* Lookup KEY in SP, returning VALUE if present, and NULL
|
||||
otherwise. */
|
||||
|
||||
static mfsplay_tree_node
|
||||
@ -2696,7 +2696,7 @@ mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key)
|
||||
in-order traversal. If FN every returns a non-zero value, the
|
||||
iteration ceases immediately, and the value is returned.
|
||||
Otherwise, this function returns 0.
|
||||
|
||||
|
||||
This function simulates recursion using dynamically allocated
|
||||
arrays, since it may be called from mfsplay_tree_rebalance(), which
|
||||
in turn means that the tree is already uncomfortably deep for stack
|
||||
|
@ -51,7 +51,7 @@ extern "C" {
|
||||
|
||||
extern void __mf_check (void *ptr, __mf_size_t sz, int type, const char *location)
|
||||
__attribute((nothrow));
|
||||
extern void __mf_register (void *ptr, __mf_size_t sz, int type, const char *name)
|
||||
extern void __mf_register (void *ptr, __mf_size_t sz, int type, const char *name)
|
||||
__attribute((nothrow));
|
||||
extern void __mf_unregister (void *ptr, __mf_size_t sz, int type)
|
||||
__attribute((nothrow));
|
||||
@ -65,33 +65,33 @@ extern int __mf_set_options (const char *opts);
|
||||
done by simple #define rather than linker wrapping, since only
|
||||
instrumented modules are meant to be affected. */
|
||||
|
||||
#ifdef _MUDFLAP
|
||||
#pragma redefine_extname memcpy __mfwrap_memcpy
|
||||
#ifdef _MUDFLAP
|
||||
#pragma redefine_extname memcpy __mfwrap_memcpy
|
||||
#pragma redefine_extname memmove __mfwrap_memmove
|
||||
#pragma redefine_extname memset __mfwrap_memset
|
||||
#pragma redefine_extname memcmp __mfwrap_memcmp
|
||||
#pragma redefine_extname memchr __mfwrap_memchr
|
||||
#pragma redefine_extname memset __mfwrap_memset
|
||||
#pragma redefine_extname memcmp __mfwrap_memcmp
|
||||
#pragma redefine_extname memchr __mfwrap_memchr
|
||||
#pragma redefine_extname memrchr __mfwrap_memrchr
|
||||
#pragma redefine_extname strcpy __mfwrap_strcpy
|
||||
#pragma redefine_extname strncpy __mfwrap_strncpy
|
||||
#pragma redefine_extname strcat __mfwrap_strcat
|
||||
#pragma redefine_extname strcpy __mfwrap_strcpy
|
||||
#pragma redefine_extname strncpy __mfwrap_strncpy
|
||||
#pragma redefine_extname strcat __mfwrap_strcat
|
||||
#pragma redefine_extname strncat __mfwrap_strncat
|
||||
#pragma redefine_extname strcmp __mfwrap_strcmp
|
||||
#pragma redefine_extname strcasecmp __mfwrap_strcasecmp
|
||||
#pragma redefine_extname strncmp __mfwrap_strncmp
|
||||
#pragma redefine_extname strcmp __mfwrap_strcmp
|
||||
#pragma redefine_extname strcasecmp __mfwrap_strcasecmp
|
||||
#pragma redefine_extname strncmp __mfwrap_strncmp
|
||||
#pragma redefine_extname strncasecmp __mfwrap_strncasecmp
|
||||
#pragma redefine_extname strdup __mfwrap_strdup
|
||||
#pragma redefine_extname strndup __mfwrap_strndup
|
||||
#pragma redefine_extname strchr __mfwrap_strchr
|
||||
#pragma redefine_extname strdup __mfwrap_strdup
|
||||
#pragma redefine_extname strndup __mfwrap_strndup
|
||||
#pragma redefine_extname strchr __mfwrap_strchr
|
||||
#pragma redefine_extname strrchr __mfwrap_strrchr
|
||||
#pragma redefine_extname strstr __mfwrap_strstr
|
||||
#pragma redefine_extname memmem __mfwrap_memmem
|
||||
#pragma redefine_extname strlen __mfwrap_strlen
|
||||
#pragma redefine_extname strstr __mfwrap_strstr
|
||||
#pragma redefine_extname memmem __mfwrap_memmem
|
||||
#pragma redefine_extname strlen __mfwrap_strlen
|
||||
#pragma redefine_extname strnlen __mfwrap_strnlen
|
||||
#pragma redefine_extname bzero __mfwrap_bzero
|
||||
#pragma redefine_extname bcopy __mfwrap_bcopy
|
||||
#pragma redefine_extname bcmp __mfwrap_bcmp
|
||||
#pragma redefine_extname index __mfwrap_index
|
||||
#pragma redefine_extname bzero __mfwrap_bzero
|
||||
#pragma redefine_extname bcopy __mfwrap_bcopy
|
||||
#pragma redefine_extname bcmp __mfwrap_bcmp
|
||||
#pragma redefine_extname index __mfwrap_index
|
||||
#pragma redefine_extname rindex __mfwrap_rindex
|
||||
#pragma redefine_extname asctime __mfwrap_asctime
|
||||
#pragma redefine_extname ctime __mfwrap_ctime
|
||||
|
Loading…
Reference in New Issue
Block a user