Reformat malloc to gnu style.

This commit is contained in:
Ondřej Bílka 2014-01-02 09:38:18 +01:00
parent 9a3c6a6ff6
commit 6c8dbf00f5
18 changed files with 3843 additions and 3559 deletions

View File

@ -1,3 +1,37 @@
2013-01-02 Ondřej Bílka <neleai@seznam.cz>
* malloc/arena.c (malloc_atfork, free_atfork, ptmalloc_lock_all,
ptmalloc_unlock_all, ptmalloc_unlock_all2, next_env_entry,
__failing_morecore, ptmalloc_init, dump_heap, new_heap, grow_heap,
heap_trim, _int_new_arena, get_free_list, reused_arena, arena_get2):
Convert to GNU style.
* malloc/hooks.c (memalign_hook_ini, __malloc_check_init,
mem2mem_check, mem2chunk_check, top_check, realloc_check,
memalign_check, __malloc_set_state): Likewise.
* malloc/mallocbug.c (main): Likewise.
* malloc/malloc.c (__malloc_assert, malloc_init_state, free_perturb,
do_check_malloced_chunk, do_check_malloc_state, sysmalloc, systrim,
mremap_chunk, __libc_malloc, __libc_free, __libc_realloc, _mid_memalign,
_int_malloc, malloc_consolidate, _int_realloc, _int_memalign, mtrim,
musable, __libc_mallopt, __posix_memalign, malloc_info): Likewise.
* malloc/malloc.h: Likewise.
* malloc/mcheck.c (checkhdr, unlink_blk, link_blk, freehook, mallochook,
memalignhook, reallochook, mabort): Likewise.
* malloc/mcheck.h: Likewise.
* malloc/memusage.c (update_data, me, malloc, realloc, calloc, free, mmap,
mmap64, mremap, munmap, dest): Likewise.
* malloc/memusagestat.c (main, parse_opt, more_help): Likewise.
* malloc/morecore.c (__default_morecore): Likewise.
* malloc/mtrace.c (tr_break, lock_and_info, mtrace): Likewise.
* malloc/obstack.c (_obstack_begin, _obstack_newchunk,
_obstack_allocated_p, obstack_free, _obstack_memory_used,
print_and_abort): Likewise.
* malloc/obstack.h: Likewise.
* malloc/set-freeres.c (__libc_freeres): Likewise.
* malloc/tst-mallocstate.c (main): Likewise.
* malloc/tst-mtrace.c (main): Likewise.
* malloc/tst-realloc.c (do_test): Likewise.
2013-01-02 Siddhesh Poyarekar <siddhesh@redhat.com>
[BZ #16366]

View File

@ -21,12 +21,12 @@
/* Compile-time constants. */
#define HEAP_MIN_SIZE (32*1024)
#define HEAP_MIN_SIZE (32 * 1024)
#ifndef HEAP_MAX_SIZE
# ifdef DEFAULT_MMAP_THRESHOLD_MAX
# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
# else
# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
# define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
# endif
#endif
@ -39,7 +39,7 @@
#ifndef THREAD_STATS
#define THREAD_STATS 0
# define THREAD_STATS 0
#endif
/* If THREAD_STATS is non-zero, some statistics on mutex locking are
@ -53,7 +53,8 @@
malloc_chunks. It is allocated with mmap() and always starts at an
address aligned to HEAP_MAX_SIZE. */
typedef struct _heap_info {
typedef struct _heap_info
{
mstate ar_ptr; /* Arena for this heap. */
struct _heap_info *prev; /* Previous heap. */
size_t size; /* Current size in bytes. */
@ -80,9 +81,9 @@ static mstate free_list;
#if THREAD_STATS
static int stat_n_heaps;
#define THREAD_STAT(x) x
# define THREAD_STAT(x) x
#else
#define THREAD_STAT(x) do ; while(0)
# define THREAD_STAT(x) do ; while (0)
#endif
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
@ -103,28 +104,28 @@ int __malloc_initialized = -1;
in the new arena. */
#define arena_get(ptr, size) do { \
arena_lookup(ptr); \
arena_lock(ptr, size); \
} while(0)
arena_lookup (ptr); \
arena_lock (ptr, size); \
} while (0)
#define arena_lookup(ptr) do { \
void *vptr = NULL; \
ptr = (mstate)tsd_getspecific(arena_key, vptr); \
} while(0)
ptr = (mstate) tsd_getspecific (arena_key, vptr); \
} while (0)
# define arena_lock(ptr, size) do { \
if(ptr) \
(void)mutex_lock(&ptr->mutex); \
#define arena_lock(ptr, size) do { \
if (ptr) \
(void) mutex_lock (&ptr->mutex); \
else \
ptr = arena_get2(ptr, (size), NULL); \
} while(0)
ptr = arena_get2 (ptr, (size), NULL); \
} while (0)
/* find the heap and corresponding arena for a given ptr */
#define heap_for_ptr(ptr) \
((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
#define arena_for_chunk(ptr) \
(chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
(chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : &main_arena)
/**************************************************************************/
@ -133,51 +134,58 @@ int __malloc_initialized = -1;
/* atfork support. */
static void *(*save_malloc_hook) (size_t __size, const void *);
static void *(*save_malloc_hook)(size_t __size, const void *);
static void (*save_free_hook) (void *__ptr, const void *);
static void *save_arena;
#ifdef ATFORK_MEM
# ifdef ATFORK_MEM
ATFORK_MEM;
#endif
# endif
/* Magic value for the thread-specific arena pointer when
malloc_atfork() is in use. */
#define ATFORK_ARENA_PTR ((void*)-1)
# define ATFORK_ARENA_PTR ((void *) -1)
/* The following hooks are used while the `atfork' handling mechanism
is active. */
static void*
malloc_atfork(size_t sz, const void *caller)
static void *
malloc_atfork (size_t sz, const void *caller)
{
void *vptr = NULL;
void *victim;
tsd_getspecific(arena_key, vptr);
if(vptr == ATFORK_ARENA_PTR) {
tsd_getspecific (arena_key, vptr);
if (vptr == ATFORK_ARENA_PTR)
{
/* We are the only thread that may allocate at all. */
if(save_malloc_hook != malloc_check) {
return _int_malloc(&main_arena, sz);
} else {
if(top_check()<0)
return 0;
victim = _int_malloc(&main_arena, sz+1);
return mem2mem_check(victim, sz);
if (save_malloc_hook != malloc_check)
{
return _int_malloc (&main_arena, sz);
}
} else {
else
{
if (top_check () < 0)
return 0;
victim = _int_malloc (&main_arena, sz + 1);
return mem2mem_check (victim, sz);
}
}
else
{
/* Suspend the thread until the `atfork' handlers have completed.
By that time, the hooks will have been reset as well, so that
mALLOc() can be used again. */
(void)mutex_lock(&list_lock);
(void)mutex_unlock(&list_lock);
return __libc_malloc(sz);
(void) mutex_lock (&list_lock);
(void) mutex_unlock (&list_lock);
return __libc_malloc (sz);
}
}
static void
free_atfork(void* mem, const void *caller)
free_atfork (void *mem, const void *caller)
{
void *vptr = NULL;
mstate ar_ptr;
@ -186,17 +194,17 @@ free_atfork(void* mem, const void *caller)
if (mem == 0) /* free(0) has no effect */
return;
p = mem2chunk(mem); /* do not bother to replicate free_check here */
p = mem2chunk (mem); /* do not bother to replicate free_check here */
if (chunk_is_mmapped(p)) /* release mmapped memory. */
if (chunk_is_mmapped (p)) /* release mmapped memory. */
{
munmap_chunk(p);
munmap_chunk (p);
return;
}
ar_ptr = arena_for_chunk(p);
tsd_getspecific(arena_key, vptr);
_int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
ar_ptr = arena_for_chunk (p);
tsd_getspecific (arena_key, vptr);
_int_free (ar_ptr, p, vptr == ATFORK_ARENA_PTR);
}
@ -214,33 +222,36 @@ ptmalloc_lock_all (void)
{
mstate ar_ptr;
if(__malloc_initialized < 1)
if (__malloc_initialized < 1)
return;
if (mutex_trylock(&list_lock))
if (mutex_trylock (&list_lock))
{
void *my_arena;
tsd_getspecific(arena_key, my_arena);
tsd_getspecific (arena_key, my_arena);
if (my_arena == ATFORK_ARENA_PTR)
/* This is the same thread which already locks the global list.
Just bump the counter. */
goto out;
/* This thread has to wait its turn. */
(void)mutex_lock(&list_lock);
(void) mutex_lock (&list_lock);
}
for(ar_ptr = &main_arena;;) {
(void)mutex_lock(&ar_ptr->mutex);
for (ar_ptr = &main_arena;; )
{
(void) mutex_lock (&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
if (ar_ptr == &main_arena)
break;
}
save_malloc_hook = __malloc_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_atfork;
__free_hook = free_atfork;
/* Only the current thread may perform malloc/free calls now. */
tsd_getspecific(arena_key, save_arena);
tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
out:
tsd_getspecific (arena_key, save_arena);
tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
out:
++atfork_recursive_cntr;
}
@ -249,19 +260,23 @@ ptmalloc_unlock_all (void)
{
mstate ar_ptr;
if(__malloc_initialized < 1)
if (__malloc_initialized < 1)
return;
if (--atfork_recursive_cntr != 0)
return;
tsd_setspecific(arena_key, save_arena);
tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
for(ar_ptr = &main_arena;;) {
(void)mutex_unlock(&ar_ptr->mutex);
for (ar_ptr = &main_arena;; )
{
(void) mutex_unlock (&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
if (ar_ptr == &main_arena)
break;
}
(void)mutex_unlock(&list_lock);
(void) mutex_unlock (&list_lock);
}
# ifdef __linux__
@ -276,31 +291,33 @@ ptmalloc_unlock_all2 (void)
{
mstate ar_ptr;
if(__malloc_initialized < 1)
if (__malloc_initialized < 1)
return;
tsd_setspecific(arena_key, save_arena);
tsd_setspecific (arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
free_list = NULL;
for(ar_ptr = &main_arena;;) {
mutex_init(&ar_ptr->mutex);
if (ar_ptr != save_arena) {
for (ar_ptr = &main_arena;; )
{
mutex_init (&ar_ptr->mutex);
if (ar_ptr != save_arena)
{
ar_ptr->next_free = free_list;
free_list = ar_ptr;
}
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
if (ar_ptr == &main_arena)
break;
}
mutex_init(&list_lock);
mutex_init (&list_lock);
atfork_recursive_cntr = 0;
}
# else
# define ptmalloc_unlock_all2 ptmalloc_unlock_all
# endif
#endif /* !NO_THREADS */
/* Initialization routine. */
@ -353,7 +370,9 @@ libc_hidden_proto (_dl_open_hook);
static void
ptmalloc_init (void)
{
if(__malloc_initialized >= 0) return;
if (__malloc_initialized >= 0)
return;
__malloc_initialized = 0;
#ifdef SHARED
@ -368,9 +387,9 @@ ptmalloc_init (void)
__morecore = __failing_morecore;
#endif
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (void *)&main_arena);
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
tsd_key_create (&arena_key, NULL);
tsd_setspecific (arena_key, (void *) &main_arena);
thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
const char *s = NULL;
if (__builtin_expect (_environ != NULL, 1))
{
@ -395,37 +414,37 @@ ptmalloc_init (void)
s = &envline[7];
break;
case 8:
if (! __builtin_expect (__libc_enable_secure, 0))
if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TOP_PAD_", 8) == 0)
__libc_mallopt(M_TOP_PAD, atoi(&envline[9]));
__libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
else if (memcmp (envline, "PERTURB_", 8) == 0)
__libc_mallopt(M_PERTURB, atoi(&envline[9]));
__libc_mallopt (M_PERTURB, atoi (&envline[9]));
}
break;
case 9:
if (! __builtin_expect (__libc_enable_secure, 0))
if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "MMAP_MAX_", 9) == 0)
__libc_mallopt(M_MMAP_MAX, atoi(&envline[10]));
__libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
else if (memcmp (envline, "ARENA_MAX", 9) == 0)
__libc_mallopt(M_ARENA_MAX, atoi(&envline[10]));
__libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
}
break;
case 10:
if (! __builtin_expect (__libc_enable_secure, 0))
if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "ARENA_TEST", 10) == 0)
__libc_mallopt(M_ARENA_TEST, atoi(&envline[11]));
__libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
}
break;
case 15:
if (! __builtin_expect (__libc_enable_secure, 0))
if (!__builtin_expect (__libc_enable_secure, 0))
{
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
__libc_mallopt(M_TRIM_THRESHOLD, atoi(&envline[16]));
__libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
__libc_mallopt(M_MMAP_THRESHOLD, atoi(&envline[16]));
__libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
}
break;
default:
@ -433,10 +452,11 @@ ptmalloc_init (void)
}
}
}
if(s && s[0]) {
__libc_mallopt(M_CHECK_ACTION, (int)(s[0] - '0'));
if (s && s[0])
{
__libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
if (check_action != 0)
__malloc_check_init();
__malloc_check_init ();
}
void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
if (hook != NULL)
@ -446,11 +466,11 @@ ptmalloc_init (void)
/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
#ifdef thread_atfork_static
thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
thread_atfork_static (ptmalloc_lock_all, ptmalloc_unlock_all, \
ptmalloc_unlock_all2)
#endif
/* Managing heaps and arenas (for concurrent threads) */
@ -459,30 +479,33 @@ thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
/* Print the complete contents of a single heap to stderr. */
static void
dump_heap(heap_info *heap)
dump_heap (heap_info *heap)
{
char *ptr;
mchunkptr p;
fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
(char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
(char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
~MALLOC_ALIGN_MASK);
for(;;) {
fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
if(p == top(heap->ar_ptr)) {
fprintf(stderr, " (top)\n");
break;
} else if(p->size == (0|PREV_INUSE)) {
fprintf(stderr, " (fence)\n");
for (;; )
{
fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
if (p == top (heap->ar_ptr))
{
fprintf (stderr, " (top)\n");
break;
}
fprintf(stderr, "\n");
p = next_chunk(p);
else if (p->size == (0 | PREV_INUSE))
{
fprintf (stderr, " (fence)\n");
break;
}
fprintf (stderr, "\n");
p = next_chunk (p);
}
}
#endif /* MALLOC_DEBUG > 1 */
/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
@ -500,18 +523,18 @@ static char *aligned_heap_area;
static heap_info *
internal_function
new_heap(size_t size, size_t top_pad)
new_heap (size_t size, size_t top_pad)
{
size_t page_mask = GLRO(dl_pagesize) - 1;
size_t page_mask = GLRO (dl_pagesize) - 1;
char *p1, *p2;
unsigned long ul;
heap_info *h;
if(size+top_pad < HEAP_MIN_SIZE)
if (size + top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
else if(size+top_pad <= HEAP_MAX_SIZE)
else if (size + top_pad <= HEAP_MAX_SIZE)
size += top_pad;
else if(size > HEAP_MAX_SIZE)
else if (size > HEAP_MAX_SIZE)
return 0;
else
size = HEAP_MAX_SIZE;
@ -522,46 +545,55 @@ new_heap(size_t size, size_t top_pad)
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
p2 = MAP_FAILED;
if(aligned_heap_area) {
p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
if (aligned_heap_area)
{
p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
MAP_NORESERVE);
aligned_heap_area = NULL;
if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
__munmap(p2, HEAP_MAX_SIZE);
if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
{
__munmap (p2, HEAP_MAX_SIZE);
p2 = MAP_FAILED;
}
}
if(p2 == MAP_FAILED) {
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
& ~(HEAP_MAX_SIZE-1));
if (p2 == MAP_FAILED)
{
p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
if (p1 != MAP_FAILED)
{
p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
& ~(HEAP_MAX_SIZE - 1));
ul = p2 - p1;
if (ul)
__munmap(p1, ul);
__munmap (p1, ul);
else
aligned_heap_area = p2 + HEAP_MAX_SIZE;
__munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
} else {
__munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
}
else
{
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
if(p2 == MAP_FAILED)
p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
if (p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
__munmap(p2, HEAP_MAX_SIZE);
if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
{
__munmap (p2, HEAP_MAX_SIZE);
return 0;
}
}
}
if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
__munmap(p2, HEAP_MAX_SIZE);
if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
{
__munmap (p2, HEAP_MAX_SIZE);
return 0;
}
h = (heap_info *)p2;
h = (heap_info *) p2;
h->size = size;
h->mprotect_size = size;
THREAD_STAT(stat_n_heaps++);
THREAD_STAT (stat_n_heaps++);
LIBC_PROBE (memory_heap_new, 2, h, h->size);
return h;
}
@ -570,20 +602,23 @@ new_heap(size_t size, size_t top_pad)
multiple of the page size. */
static int
grow_heap(heap_info *h, long diff)
grow_heap (heap_info *h, long diff)
{
size_t page_mask = GLRO(dl_pagesize) - 1;
size_t page_mask = GLRO (dl_pagesize) - 1;
long new_size;
diff = (diff + page_mask) & ~page_mask;
new_size = (long)h->size + diff;
if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
new_size = (long) h->size + diff;
if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
return -1;
if((unsigned long) new_size > h->mprotect_size) {
if (__mprotect((char *)h + h->mprotect_size,
if ((unsigned long) new_size > h->mprotect_size)
{
if (__mprotect ((char *) h + h->mprotect_size,
(unsigned long) new_size - h->mprotect_size,
PROT_READ|PROT_WRITE) != 0)
PROT_READ | PROT_WRITE) != 0)
return -2;
h->mprotect_size = new_size;
}
@ -595,24 +630,26 @@ grow_heap(heap_info *h, long diff)
/* Shrink a heap. */
static int
shrink_heap(heap_info *h, long diff)
shrink_heap (heap_info *h, long diff)
{
long new_size;
new_size = (long)h->size - diff;
if(new_size < (long)sizeof(*h))
new_size = (long) h->size - diff;
if (new_size < (long) sizeof (*h))
return -1;
/* Try to re-map the extra heap space freshly to save memory, and make it
inaccessible. See malloc-sysdep.h to know when this is true. */
if (__builtin_expect (check_may_shrink_heap (), 0))
{
if((char *)MMAP((char *)h + new_size, diff, PROT_NONE,
if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
MAP_FIXED) == (char *) MAP_FAILED)
return -2;
h->mprotect_size = new_size;
}
else
__madvise ((char *)h + new_size, diff, MADV_DONTNEED);
__madvise ((char *) h + new_size, diff, MADV_DONTNEED);
/*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
h->size = new_size;
@ -624,65 +661,69 @@ shrink_heap(heap_info *h, long diff)
#define delete_heap(heap) \
do { \
if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area) \
aligned_heap_area = NULL; \
__munmap((char*)(heap), HEAP_MAX_SIZE); \
__munmap ((char *) (heap), HEAP_MAX_SIZE); \
} while (0)
static int
internal_function
heap_trim(heap_info *heap, size_t pad)
heap_trim (heap_info *heap, size_t pad)
{
mstate ar_ptr = heap->ar_ptr;
unsigned long pagesz = GLRO(dl_pagesize);
mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
unsigned long pagesz = GLRO (dl_pagesize);
mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
heap_info *prev_heap;
long new_size, top_size, extra, prev_size, misalign;
/* Can this heap go away completely? */
while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
{
prev_heap = heap->prev;
prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
p = chunk_at_offset(prev_heap, prev_size);
prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
p = chunk_at_offset (prev_heap, prev_size);
/* fencepost must be properly aligned. */
misalign = ((long) p) & MALLOC_ALIGN_MASK;
p = chunk_at_offset(prev_heap, prev_size - misalign);
assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
p = prev_chunk(p);
new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
assert(new_size>0 && new_size<(long)(2*MINSIZE));
if(!prev_inuse(p))
p = chunk_at_offset (prev_heap, prev_size - misalign);
assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
p = prev_chunk (p);
new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
if (!prev_inuse (p))
new_size += p->prev_size;
assert(new_size>0 && new_size<HEAP_MAX_SIZE);
if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
break;
ar_ptr->system_mem -= heap->size;
arena_mem -= heap->size;
LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
delete_heap(heap);
delete_heap (heap);
heap = prev_heap;
if(!prev_inuse(p)) { /* consolidate backward */
p = prev_chunk(p);
unlink(p, bck, fwd);
if (!prev_inuse (p)) /* consolidate backward */
{
p = prev_chunk (p);
unlink (p, bck, fwd);
}
assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
top(ar_ptr) = top_chunk = p;
set_head(top_chunk, new_size | PREV_INUSE);
assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
assert (((char *) p + new_size) == ((char *) heap + heap->size));
top (ar_ptr) = top_chunk = p;
set_head (top_chunk, new_size | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
}
top_size = chunksize(top_chunk);
top_size = chunksize (top_chunk);
extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
if(extra < (long)pagesz)
if (extra < (long) pagesz)
return 0;
/* Try to shrink. */
if(shrink_heap(heap, extra) != 0)
if (shrink_heap (heap, extra) != 0)
return 0;
ar_ptr->system_mem -= extra;
arena_mem -= extra;
/* Success. Adjust top accordingly. */
set_head(top_chunk, (top_size - extra) | PREV_INUSE);
set_head (top_chunk, (top_size - extra) | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
return 1;
}
@ -690,52 +731,53 @@ heap_trim(heap_info *heap, size_t pad)
/* Create a new arena with initial size "size". */
static mstate
_int_new_arena(size_t size)
_int_new_arena (size_t size)
{
mstate a;
heap_info *h;
char *ptr;
unsigned long misalign;
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
mp_.top_pad);
if(!h) {
if (!h)
{
/* Maybe size is too large to fit in a single heap. So, just try
to create a minimally-sized arena and let _int_malloc() attempt
to deal with the large request via mmap_chunk(). */
h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if(!h)
h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if (!h)
return 0;
}
a = h->ar_ptr = (mstate)(h+1);
malloc_init_state(a);
a = h->ar_ptr = (mstate) (h + 1);
malloc_init_state (a);
/*a->next = NULL;*/
a->system_mem = a->max_system_mem = h->size;
arena_mem += h->size;
/* Set up the top chunk, with proper alignment. */
ptr = (char *)(a + 1);
misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
ptr = (char *) (a + 1);
misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
if (misalign > 0)
ptr += MALLOC_ALIGNMENT - misalign;
top(a) = (mchunkptr)ptr;
set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
top (a) = (mchunkptr) ptr;
set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
LIBC_PROBE (memory_arena_new, 2, a, size);
tsd_setspecific(arena_key, (void *)a);
mutex_init(&a->mutex);
(void)mutex_lock(&a->mutex);
tsd_setspecific (arena_key, (void *) a);
mutex_init (&a->mutex);
(void) mutex_lock (&a->mutex);
(void)mutex_lock(&list_lock);
(void) mutex_lock (&list_lock);
/* Add the new arena to the global list. */
a->next = main_arena.next;
atomic_write_barrier ();
main_arena.next = a;
(void)mutex_unlock(&list_lock);
(void) mutex_unlock (&list_lock);
THREAD_STAT(++(a->stat_lock_loop));
THREAD_STAT (++(a->stat_lock_loop));
return a;
}
@ -747,18 +789,18 @@ get_free_list (void)
mstate result = free_list;
if (result != NULL)
{
(void)mutex_lock(&list_lock);
(void) mutex_lock (&list_lock);
result = free_list;
if (result != NULL)
free_list = result->next_free;
(void)mutex_unlock(&list_lock);
(void) mutex_unlock (&list_lock);
if (result != NULL)
{
LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
(void)mutex_lock(&result->mutex);
tsd_setspecific(arena_key, (void *)result);
THREAD_STAT(++(result->stat_lock_loop));
(void) mutex_lock (&result->mutex);
tsd_setspecific (arena_key, (void *) result);
THREAD_STAT (++(result->stat_lock_loop));
}
}
@ -779,7 +821,7 @@ reused_arena (mstate avoid_arena)
result = next_to_use;
do
{
if (!mutex_trylock(&result->mutex))
if (!mutex_trylock (&result->mutex))
goto out;
result = result->next;
@ -793,12 +835,12 @@ reused_arena (mstate avoid_arena)
/* No arena available. Wait for the next in line. */
LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
(void)mutex_lock(&result->mutex);
(void) mutex_lock (&result->mutex);
out:
out:
LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
tsd_setspecific(arena_key, (void *)result);
THREAD_STAT(++(result->stat_lock_loop));
tsd_setspecific (arena_key, (void *) result);
THREAD_STAT (++(result->stat_lock_loop));
next_to_use = result->next;
return result;
@ -806,7 +848,7 @@ reused_arena (mstate avoid_arena)
static mstate
internal_function
arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
{
mstate a;
@ -863,15 +905,18 @@ static mstate
arena_get_retry (mstate ar_ptr, size_t bytes)
{
LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
if(ar_ptr != &main_arena) {
(void)mutex_unlock(&ar_ptr->mutex);
if (ar_ptr != &main_arena)
{
(void) mutex_unlock (&ar_ptr->mutex);
ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex);
} else {
(void) mutex_lock (&ar_ptr->mutex);
}
else
{
/* Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
(void) mutex_unlock (&ar_ptr->mutex);
ar_ptr = arena_get2 (prev, bytes, ar_ptr);
}
return ar_ptr;
@ -881,15 +926,15 @@ static void __attribute__ ((section ("__libc_thread_freeres_fn")))
arena_thread_freeres (void)
{
void *vptr = NULL;
mstate a = tsd_getspecific(arena_key, vptr);
tsd_setspecific(arena_key, NULL);
mstate a = tsd_getspecific (arena_key, vptr);
tsd_setspecific (arena_key, NULL);
if (a != NULL)
{
(void)mutex_lock(&list_lock);
(void) mutex_lock (&list_lock);
a->next_free = free_list;
free_list = a;
(void)mutex_unlock(&list_lock);
(void) mutex_unlock (&list_lock);
}
}
text_set_element (__libc_thread_subfreeres, arena_thread_freeres);

View File

@ -24,29 +24,29 @@
/* Hooks for debugging versions. The initial hooks just call the
initialization routine, then do the normal work. */
static void*
malloc_hook_ini(size_t sz, const void *caller)
static void *
malloc_hook_ini (size_t sz, const void *caller)
{
__malloc_hook = NULL;
ptmalloc_init();
return __libc_malloc(sz);
ptmalloc_init ();
return __libc_malloc (sz);
}
static void*
realloc_hook_ini(void* ptr, size_t sz, const void *caller)
static void *
realloc_hook_ini (void *ptr, size_t sz, const void *caller)
{
__malloc_hook = NULL;
__realloc_hook = NULL;
ptmalloc_init();
return __libc_realloc(ptr, sz);
ptmalloc_init ();
return __libc_realloc (ptr, sz);
}
static void*
memalign_hook_ini(size_t alignment, size_t sz, const void *caller)
static void *
memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
{
__memalign_hook = NULL;
ptmalloc_init();
return __libc_memalign(alignment, sz);
ptmalloc_init ();
return __libc_memalign (alignment, sz);
}
/* Whether we are using malloc checking. */
@ -71,7 +71,8 @@ static int disallow_malloc_check;
void
__malloc_check_init (void)
{
if (disallow_malloc_check) {
if (disallow_malloc_check)
{
disallow_malloc_check = 0;
return;
}
@ -87,7 +88,7 @@ __malloc_check_init (void)
overruns. The goal here is to avoid obscure crashes due to invalid
usage, unlike in the MALLOC_DEBUG code. */
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
#define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF)
/* Visualize the chunk as being partitioned into blocks of 256 bytes from the
highest address of the chunk, downwards. The beginning of each block tells
@ -96,53 +97,58 @@ __malloc_check_init (void)
must reach it with this iteration, otherwise we have witnessed a memory
corruption. */
static size_t
malloc_check_get_size(mchunkptr p)
malloc_check_get_size (mchunkptr p)
{
size_t size;
unsigned char c;
unsigned char magic = MAGICBYTE(p);
unsigned char magic = MAGICBYTE (p);
assert(using_malloc_checking == 1);
assert (using_malloc_checking == 1);
for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ);
(c = ((unsigned char*)p)[size]) != magic;
size -= c) {
if(c<=0 || size<(c+2*SIZE_SZ)) {
malloc_printerr(check_action, "malloc_check_get_size: memory corruption",
chunk2mem(p));
for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
(c = ((unsigned char *) p)[size]) != magic;
size -= c)
{
if (c <= 0 || size < (c + 2 * SIZE_SZ))
{
malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
chunk2mem (p));
return 0;
}
}
/* chunk2mem size. */
return size - 2*SIZE_SZ;
return size - 2 * SIZE_SZ;
}
/* Instrument a chunk with overrun detector byte(s) and convert it
into a user pointer with requested size sz. */
static void*
static void *
internal_function
mem2mem_check(void *ptr, size_t sz)
mem2mem_check (void *ptr, size_t sz)
{
mchunkptr p;
unsigned char* m_ptr = ptr;
unsigned char *m_ptr = ptr;
size_t i;
if (!ptr)
return ptr;
p = mem2chunk(ptr);
for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
p = mem2chunk (ptr);
for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1);
i > sz;
i -= 0xFF) {
if(i-sz < 0x100) {
m_ptr[i] = (unsigned char)(i-sz);
i -= 0xFF)
{
if (i - sz < 0x100)
{
m_ptr[i] = (unsigned char) (i - sz);
break;
}
m_ptr[i] = 0xFF;
}
m_ptr[sz] = MAGICBYTE(p);
return (void*)m_ptr;
m_ptr[sz] = MAGICBYTE (p);
return (void *) m_ptr;
}
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
@ -150,53 +156,64 @@ mem2mem_check(void *ptr, size_t sz)
static mchunkptr
internal_function
mem2chunk_check(void* mem, unsigned char **magic_p)
mem2chunk_check (void *mem, unsigned char **magic_p)
{
mchunkptr p;
INTERNAL_SIZE_T sz, c;
unsigned char magic;
if(!aligned_OK(mem)) return NULL;
p = mem2chunk(mem);
if (!chunk_is_mmapped(p)) {
/* Must be a chunk in conventional heap memory. */
int contig = contiguous(&main_arena);
sz = chunksize(p);
if((contig &&
((char*)p<mp_.sbrk_base ||
((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
(contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
next_chunk(prev_chunk(p))!=p) ))
if (!aligned_OK (mem))
return NULL;
p = mem2chunk (mem);
if (!chunk_is_mmapped (p))
{
/* Must be a chunk in conventional heap memory. */
int contig = contiguous (&main_arena);
sz = chunksize (p);
if ((contig &&
((char *) p < mp_.sbrk_base ||
((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
(!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK ||
(contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
next_chunk (prev_chunk (p)) != p)))
return NULL;
magic = MAGICBYTE (p);
for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
{
if (c <= 0 || sz < (c + 2 * SIZE_SZ))
return NULL;
magic = MAGICBYTE(p);
for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
} else {
unsigned long offset, page_mask = GLRO(dl_pagesize)-1;
}
else
{
unsigned long offset, page_mask = GLRO (dl_pagesize) - 1;
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
alignment relative to the beginning of a page. Check this
first. */
offset = (unsigned long)mem & page_mask;
if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
offset<0x2000) ||
!chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
offset = (unsigned long) mem & page_mask;
if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 &&
offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
offset < 0x2000) ||
!chunk_is_mmapped (p) || (p->size & PREV_INUSE) ||
((((unsigned long) p - p->prev_size) & page_mask) != 0) ||
((sz = chunksize (p)), ((p->prev_size + sz) & page_mask) != 0))
return NULL;
magic = MAGICBYTE (p);
for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
{
if (c <= 0 || sz < (c + 2 * SIZE_SZ))
return NULL;
magic = MAGICBYTE(p);
for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
}
((unsigned char*)p)[sz] ^= 0xFF;
((unsigned char *) p)[sz] ^= 0xFF;
if (magic_p)
*magic_p = (unsigned char *)p + sz;
*magic_p = (unsigned char *) p + sz;
return p;
}
@ -205,32 +222,32 @@ mem2chunk_check(void* mem, unsigned char **magic_p)
static int
internal_function
top_check(void)
top_check (void)
{
mchunkptr t = top(&main_arena);
char* brk, * new_brk;
mchunkptr t = top (&main_arena);
char *brk, *new_brk;
INTERNAL_SIZE_T front_misalign, sbrk_size;
unsigned long pagesz = GLRO(dl_pagesize);
unsigned long pagesz = GLRO (dl_pagesize);
if (t == initial_top(&main_arena) ||
(!chunk_is_mmapped(t) &&
chunksize(t)>=MINSIZE &&
prev_inuse(t) &&
(!contiguous(&main_arena) ||
(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
if (t == initial_top (&main_arena) ||
(!chunk_is_mmapped (t) &&
chunksize (t) >= MINSIZE &&
prev_inuse (t) &&
(!contiguous (&main_arena) ||
(char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
return 0;
malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
/* Try to set up a new top chunk. */
brk = MORECORE(0);
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
brk = MORECORE (0);
front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
front_misalign = MALLOC_ALIGNMENT - front_misalign;
sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
new_brk = (char*)(MORECORE (sbrk_size));
if (new_brk == (char*)(MORECORE_FAILURE))
sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
new_brk = (char *) (MORECORE (sbrk_size));
if (new_brk == (char *) (MORECORE_FAILURE))
{
__set_errno (ENOMEM);
return -1;
@ -238,128 +255,148 @@ top_check(void)
/* Call the `morecore' hook if necessary. */
void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
if (hook)
(*hook) ();
(*hook)();
main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
top(&main_arena) = (mchunkptr)(brk + front_misalign);
set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
top (&main_arena) = (mchunkptr) (brk + front_misalign);
set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
return 0;
}
static void*
malloc_check(size_t sz, const void *caller)
static void *
malloc_check (size_t sz, const void *caller)
{
void *victim;
if (sz+1 == 0) {
if (sz + 1 == 0)
{
__set_errno (ENOMEM);
return NULL;
}
(void)mutex_lock(&main_arena.mutex);
victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(victim, sz);
(void) mutex_lock (&main_arena.mutex);
victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
(void) mutex_unlock (&main_arena.mutex);
return mem2mem_check (victim, sz);
}
static void
free_check(void* mem, const void *caller)
free_check (void *mem, const void *caller)
{
mchunkptr p;
if(!mem) return;
(void)mutex_lock(&main_arena.mutex);
p = mem2chunk_check(mem, NULL);
if(!p) {
(void)mutex_unlock(&main_arena.mutex);
if (!mem)
return;
malloc_printerr(check_action, "free(): invalid pointer", mem);
(void) mutex_lock (&main_arena.mutex);
p = mem2chunk_check (mem, NULL);
if (!p)
{
(void) mutex_unlock (&main_arena.mutex);
malloc_printerr (check_action, "free(): invalid pointer", mem);
return;
}
if (chunk_is_mmapped(p)) {
(void)mutex_unlock(&main_arena.mutex);
munmap_chunk(p);
if (chunk_is_mmapped (p))
{
(void) mutex_unlock (&main_arena.mutex);
munmap_chunk (p);
return;
}
_int_free(&main_arena, p, 1);
(void)mutex_unlock(&main_arena.mutex);
_int_free (&main_arena, p, 1);
(void) mutex_unlock (&main_arena.mutex);
}
static void*
realloc_check(void* oldmem, size_t bytes, const void *caller)
static void *
realloc_check (void *oldmem, size_t bytes, const void *caller)
{
INTERNAL_SIZE_T nb;
void* newmem = 0;
void *newmem = 0;
unsigned char *magic_p;
if (bytes+1 == 0) {
if (bytes + 1 == 0)
{
__set_errno (ENOMEM);
return NULL;
}
if (oldmem == 0) return malloc_check(bytes, NULL);
if (bytes == 0) {
if (oldmem == 0)
return malloc_check (bytes, NULL);
if (bytes == 0)
{
free_check (oldmem, NULL);
return NULL;
}
(void)mutex_lock(&main_arena.mutex);
const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
(void)mutex_unlock(&main_arena.mutex);
if(!oldp) {
malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
return malloc_check(bytes, NULL);
(void) mutex_lock (&main_arena.mutex);
const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
(void) mutex_unlock (&main_arena.mutex);
if (!oldp)
{
malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
return malloc_check (bytes, NULL);
}
const INTERNAL_SIZE_T oldsize = chunksize(oldp);
const INTERNAL_SIZE_T oldsize = chunksize (oldp);
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
checked_request2size (bytes + 1, nb);
(void) mutex_lock (&main_arena.mutex);
if (chunk_is_mmapped(oldp)) {
if (chunk_is_mmapped (oldp))
{
#if HAVE_MREMAP
mchunkptr newp = mremap_chunk(oldp, nb);
if(newp)
newmem = chunk2mem(newp);
mchunkptr newp = mremap_chunk (oldp, nb);
if (newp)
newmem = chunk2mem (newp);
else
#endif
{
/* Note the extra SIZE_SZ overhead. */
if(oldsize - SIZE_SZ >= nb)
if (oldsize - SIZE_SZ >= nb)
newmem = oldmem; /* do nothing */
else {
else
{
/* Must alloc, copy, free. */
if (top_check() >= 0)
newmem = _int_malloc(&main_arena, bytes+1);
if (newmem) {
memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
munmap_chunk(oldp);
if (top_check () >= 0)
newmem = _int_malloc (&main_arena, bytes + 1);
if (newmem)
{
memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
munmap_chunk (oldp);
}
}
}
} else {
if (top_check() >= 0) {
}
else
{
if (top_check () >= 0)
{
INTERNAL_SIZE_T nb;
checked_request2size(bytes + 1, nb);
newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
checked_request2size (bytes + 1, nb);
newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
}
}
/* mem2chunk_check changed the magic byte in the old chunk.
If newmem is NULL, then the old chunk will still be used though,
so we need to invert that change here. */
if (newmem == NULL) *magic_p ^= 0xFF;
if (newmem == NULL)
*magic_p ^= 0xFF;
(void)mutex_unlock(&main_arena.mutex);
(void) mutex_unlock (&main_arena.mutex);
return mem2mem_check(newmem, bytes);
return mem2mem_check (newmem, bytes);
}
static void*
memalign_check(size_t alignment, size_t bytes, const void *caller)
static void *
memalign_check (size_t alignment, size_t bytes, const void *caller)
{
void* mem;
void *mem;
if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
if (alignment < MINSIZE) alignment = MINSIZE;
if (alignment <= MALLOC_ALIGNMENT)
return malloc_check (bytes, NULL);
if (alignment < MINSIZE)
alignment = MINSIZE;
/* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
power of 2 and will cause overflow in the check below. */
@ -377,17 +414,19 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
}
/* Make sure alignment is power of 2. */
if (!powerof2(alignment)) {
if (!powerof2 (alignment))
{
size_t a = MALLOC_ALIGNMENT * 2;
while (a < alignment) a <<= 1;
while (a < alignment)
a <<= 1;
alignment = a;
}
(void)mutex_lock(&main_arena.mutex);
mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
(void) mutex_lock (&main_arena.mutex);
mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(mem, bytes);
(void) mutex_unlock (&main_arena.mutex);
return mem2mem_check (mem, bytes);
}
@ -408,13 +447,14 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
then the hooks are reset to 0. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */
struct malloc_save_state {
struct malloc_save_state
{
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char* sbrk_base;
char *sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
@ -434,31 +474,34 @@ struct malloc_save_state {
unsigned long narenas;
};
void*
__malloc_get_state(void)
void *
__malloc_get_state (void)
{
struct malloc_save_state* ms;
struct malloc_save_state *ms;
int i;
mbinptr b;
ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms));
ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms));
if (!ms)
return 0;
(void)mutex_lock(&main_arena.mutex);
malloc_consolidate(&main_arena);
(void) mutex_lock (&main_arena.mutex);
malloc_consolidate (&main_arena);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = 0;
ms->av[1] = 0; /* used to be binblocks, now no longer used */
ms->av[2] = top(&main_arena);
ms->av[2] = top (&main_arena);
ms->av[3] = 0; /* used to be undefined */
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(first(b) == b)
ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
else {
ms->av[2*i+2] = first(b);
ms->av[2*i+3] = last(b);
for (i = 1; i < NBINS; i++)
{
b = bin_at (&main_arena, i);
if (first (b) == b)
ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */
else
{
ms->av[2 * i + 2] = first (b);
ms->av[2 * i + 3] = last (b);
}
}
ms->sbrk_base = mp_.sbrk_base;
@ -475,72 +518,86 @@ __malloc_get_state(void)
ms->mmapped_mem = mp_.mmapped_mem;
ms->max_mmapped_mem = mp_.max_mmapped_mem;
ms->using_malloc_checking = using_malloc_checking;
ms->max_fast = get_max_fast();
ms->max_fast = get_max_fast ();
ms->arena_test = mp_.arena_test;
ms->arena_max = mp_.arena_max;
ms->narenas = narenas;
(void)mutex_unlock(&main_arena.mutex);
return (void*)ms;
(void) mutex_unlock (&main_arena.mutex);
return (void *) ms;
}
int
__malloc_set_state(void* msptr)
__malloc_set_state (void *msptr)
{
struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
size_t i;
mbinptr b;
disallow_malloc_check = 1;
ptmalloc_init();
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
ptmalloc_init ();
if (ms->magic != MALLOC_STATE_MAGIC)
return -1;
/* Must fail if the major version is too high. */
if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
(void)mutex_lock(&main_arena.mutex);
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
return -2;
(void) mutex_lock (&main_arena.mutex);
/* There are no fastchunks. */
clear_fastchunks(&main_arena);
clear_fastchunks (&main_arena);
if (ms->version >= 4)
set_max_fast(ms->max_fast);
set_max_fast (ms->max_fast);
else
set_max_fast(64); /* 64 used to be the value we always used. */
for (i=0; i<NFASTBINS; ++i)
set_max_fast (64); /* 64 used to be the value we always used. */
for (i = 0; i < NFASTBINS; ++i)
fastbin (&main_arena, i) = 0;
for (i=0; i<BINMAPSIZE; ++i)
for (i = 0; i < BINMAPSIZE; ++i)
main_arena.binmap[i] = 0;
top(&main_arena) = ms->av[2];
top (&main_arena) = ms->av[2];
main_arena.last_remainder = 0;
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(ms->av[2*i+2] == 0) {
assert(ms->av[2*i+3] == 0);
first(b) = last(b) = b;
} else {
if(ms->version >= 3 &&
(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
largebin_index(chunksize(ms->av[2*i+3]))==i))) {
first(b) = ms->av[2*i+2];
last(b) = ms->av[2*i+3];
for (i = 1; i < NBINS; i++)
{
b = bin_at (&main_arena, i);
if (ms->av[2 * i + 2] == 0)
{
assert (ms->av[2 * i + 3] == 0);
first (b) = last (b) = b;
}
else
{
if (ms->version >= 3 &&
(i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i &&
largebin_index (chunksize (ms->av[2 * i + 3])) == i)))
{
first (b) = ms->av[2 * i + 2];
last (b) = ms->av[2 * i + 3];
/* Make sure the links to the bins within the heap are correct. */
first(b)->bk = b;
last(b)->fd = b;
first (b)->bk = b;
last (b)->fd = b;
/* Set bit in binblocks. */
mark_bin(&main_arena, i);
} else {
mark_bin (&main_arena, i);
}
else
{
/* Oops, index computation from chunksize must have changed.
Link the whole list into unsorted_chunks. */
first(b) = last(b) = b;
b = unsorted_chunks(&main_arena);
ms->av[2*i+2]->bk = b;
ms->av[2*i+3]->fd = b->fd;
b->fd->bk = ms->av[2*i+3];
b->fd = ms->av[2*i+2];
first (b) = last (b) = b;
b = unsorted_chunks (&main_arena);
ms->av[2 * i + 2]->bk = b;
ms->av[2 * i + 3]->fd = b->fd;
b->fd->bk = ms->av[2 * i + 3];
b->fd = ms->av[2 * i + 2];
}
}
}
if (ms->version < 3) {
if (ms->version < 3)
{
/* Clear fd_nextsize and bk_nextsize fields. */
b = unsorted_chunks(&main_arena)->fd;
while (b != unsorted_chunks(&main_arena)) {
if (!in_smallbin_range(chunksize(b))) {
b = unsorted_chunks (&main_arena)->fd;
while (b != unsorted_chunks (&main_arena))
{
if (!in_smallbin_range (chunksize (b)))
{
b->fd_nextsize = NULL;
b->bk_nextsize = NULL;
}
@ -560,13 +617,15 @@ __malloc_set_state(void* msptr)
mp_.mmapped_mem = ms->mmapped_mem;
mp_.max_mmapped_mem = ms->max_mmapped_mem;
/* add version-dependent code here */
if (ms->version >= 1) {
if (ms->version >= 1)
{
/* Check whether it is safe to enable malloc checking, or whether
it is necessary to disable it. */
if (ms->using_malloc_checking && !using_malloc_checking &&
!disallow_malloc_check)
__malloc_check_init ();
else if (!ms->using_malloc_checking && using_malloc_checking) {
else if (!ms->using_malloc_checking && using_malloc_checking)
{
__malloc_hook = NULL;
__free_hook = NULL;
__realloc_hook = NULL;
@ -574,14 +633,15 @@ __malloc_set_state(void* msptr)
using_malloc_checking = 0;
}
}
if (ms->version >= 4) {
if (ms->version >= 4)
{
mp_.arena_test = ms->arena_test;
mp_.arena_max = ms->arena_max;
narenas = ms->narenas;
}
check_malloc_state(&main_arena);
check_malloc_state (&main_arena);
(void)mutex_unlock(&main_arena.mutex);
(void) mutex_unlock (&main_arena.mutex);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,7 @@ extern void *malloc (size_t __size) __THROW __attribute_malloc__ __wur;
/* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
extern void *calloc (size_t __nmemb, size_t __size)
__THROW __attribute_malloc__ __wur;
__THROW __attribute_malloc__ __wur;
/* Re-allocate the previously allocated block in __ptr, making the new
block SIZE bytes long. */
@ -47,7 +47,7 @@ extern void *calloc (size_t __nmemb, size_t __size)
the same pointer that was passed to it, aliasing needs to be allowed
between objects pointed by the old and new pointers. */
extern void *realloc (void *__ptr, size_t __size)
__THROW __attribute_warn_unused_result__;
__THROW __attribute_warn_unused_result__;
/* Free a block allocated by `malloc', `realloc' or `calloc'. */
extern void free (void *__ptr) __THROW;
@ -57,14 +57,14 @@ extern void cfree (void *__ptr) __THROW;
/* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
extern void *memalign (size_t __alignment, size_t __size)
__THROW __attribute_malloc__ __wur;
__THROW __attribute_malloc__ __wur;
/* Allocate SIZE bytes on a page boundary. */
extern void *valloc (size_t __size) __THROW __attribute_malloc__ __wur;
/* Equivalent to valloc(minimum-page-that-holds(n)), that is, round up
__size to nearest pagesize. */
extern void * pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
extern void *pvalloc (size_t __size) __THROW __attribute_malloc__ __wur;
/* Underlying allocation function; successive calls should return
contiguous pieces of memory. */
@ -72,7 +72,7 @@ extern void *(*__morecore) (ptrdiff_t __size);
/* Default value of `__morecore'. */
extern void *__default_morecore (ptrdiff_t __size)
__THROW __attribute_malloc__;
__THROW __attribute_malloc__;
/* SVID2/XPG mallinfo structure */
@ -145,22 +145,22 @@ extern int malloc_set_state (void *__ptr) __THROW;
the application provides the preferred way to set up the hook
pointers. */
extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void)
__MALLOC_DEPRECATED;
__MALLOC_DEPRECATED;
/* Hooks for debugging and user-defined versions. */
extern void (*__MALLOC_HOOK_VOLATILE __free_hook) (void *__ptr,
const void *)
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __malloc_hook) (size_t __size,
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __malloc_hook)(size_t __size,
const void *)
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __realloc_hook) (void *__ptr,
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __realloc_hook)(void *__ptr,
size_t __size,
const void *)
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __memalign_hook) (size_t __alignment,
__MALLOC_DEPRECATED;
extern void *(*__MALLOC_HOOK_VOLATILE __memalign_hook)(size_t __alignment,
size_t __size,
const void *)
__MALLOC_DEPRECATED;
__MALLOC_DEPRECATED;
extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
/* Activate a standard set of debugging hooks. */
@ -168,5 +168,4 @@ extern void __malloc_check_init (void) __THROW __MALLOC_DEPRECATED;
__END_DECLS
#endif /* malloc.h */

View File

@ -28,7 +28,7 @@
#endif
/* Old hook values. */
static void (*old_free_hook) (__ptr_t ptr, const __ptr_t);
static void (*old_free_hook)(__ptr_t ptr, const __ptr_t);
static __ptr_t (*old_malloc_hook) (size_t size, const __ptr_t);
static __ptr_t (*old_memalign_hook) (size_t alignment, size_t size,
const __ptr_t);
@ -46,14 +46,14 @@ static void (*abortfunc) (enum mcheck_status);
#define FREEFLOOD ((char) 0x95)
struct hdr
{
{
size_t size; /* Exact size requested by user. */
unsigned long int magic; /* Magic number to check header integrity. */
struct hdr *prev;
struct hdr *next;
__ptr_t block; /* Real block allocated, for memalign. */
unsigned long int magic2; /* Extra, keeps us doubleword aligned. */
};
};
/* This is the beginning of the list of all memory blocks allocated.
It is only constructed if the pedantic testing is requested. */
@ -69,11 +69,10 @@ static int pedantic;
# define flood memset
#else
static void flood (__ptr_t, int, size_t);
static void
flood (ptr, val, size)
__ptr_t ptr;
int val;
size_t size;
static void flood (ptr, val, size)
__ptr_t ptr;
int val;
size_t size;
{
char *cp = ptr;
while (size--)
@ -194,7 +193,7 @@ freehook (__ptr_t ptr, const __ptr_t caller)
}
__free_hook = old_free_hook;
if (old_free_hook != NULL)
(*old_free_hook) (ptr, caller);
(*old_free_hook)(ptr, caller);
else
free (ptr);
__free_hook = freehook;
@ -216,7 +215,7 @@ mallochook (size_t size, const __ptr_t caller)
__malloc_hook = old_malloc_hook;
if (old_malloc_hook != NULL)
hdr = (struct hdr *) (*old_malloc_hook) (sizeof (struct hdr) + size + 1,
hdr = (struct hdr *) (*old_malloc_hook)(sizeof (struct hdr) + size + 1,
caller);
else
hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
@ -244,7 +243,7 @@ memalignhook (size_t alignment, size_t size,
if (pedantic)
mcheck_check_all ();
slop = (sizeof *hdr + alignment - 1) & -alignment;
slop = (sizeof *hdr + alignment - 1) & - alignment;
if (size > ~((size_t) 0) - (slop + 1))
{
@ -254,7 +253,7 @@ memalignhook (size_t alignment, size_t size,
__memalign_hook = old_memalign_hook;
if (old_memalign_hook != NULL)
block = (*old_memalign_hook) (alignment, slop + size + 1, caller);
block = (*old_memalign_hook)(alignment, slop + size + 1, caller);
else
block = memalign (alignment, slop + size + 1);
__memalign_hook = memalignhook;
@ -313,7 +312,7 @@ reallochook (__ptr_t ptr, size_t size, const __ptr_t caller)
__memalign_hook = old_memalign_hook;
__realloc_hook = old_realloc_hook;
if (old_realloc_hook != NULL)
hdr = (struct hdr *) (*old_realloc_hook) ((__ptr_t) hdr,
hdr = (struct hdr *) (*old_realloc_hook)((__ptr_t) hdr,
sizeof (struct hdr) + size + 1,
caller);
else
@ -344,19 +343,19 @@ mabort (enum mcheck_status status)
switch (status)
{
case MCHECK_OK:
msg = _("memory is consistent, library is buggy\n");
msg = _ ("memory is consistent, library is buggy\n");
break;
case MCHECK_HEAD:
msg = _("memory clobbered before allocated block\n");
msg = _ ("memory clobbered before allocated block\n");
break;
case MCHECK_TAIL:
msg = _("memory clobbered past end of allocated block\n");
msg = _ ("memory clobbered past end of allocated block\n");
break;
case MCHECK_FREE:
msg = _("block freed twice\n");
msg = _ ("block freed twice\n");
break;
default:
msg = _("bogus mcheck_status, library is buggy\n");
msg = _ ("bogus mcheck_status, library is buggy\n");
break;
}
#ifdef _LIBC
@ -370,11 +369,10 @@ mabort (enum mcheck_status status)
/* Memory barrier so that GCC does not optimize out the argument. */
#define malloc_opt_barrier(x) \
({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
int
mcheck (func)
void (*func) (enum mcheck_status);
int mcheck (func)
void (*func)(enum mcheck_status);
{
abortfunc = (func != NULL) ? func : &mabort;
@ -404,9 +402,8 @@ mcheck (func)
libc_hidden_def (mcheck)
#endif
int
mcheck_pedantic (func)
void (*func) (enum mcheck_status);
int mcheck_pedantic (func)
void (*func)(enum mcheck_status);
{
int res = mcheck (func);
if (res == 0)

View File

@ -25,24 +25,24 @@ __BEGIN_DECLS
/* Return values for `mprobe': these are the kinds of inconsistencies that
`mcheck' enables detection of. */
enum mcheck_status
{
{
MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
MCHECK_OK, /* Block is fine. */
MCHECK_FREE, /* Block freed twice. */
MCHECK_HEAD, /* Memory before the block was clobbered. */
MCHECK_TAIL /* Memory after the block was clobbered. */
};
};
/* Activate a standard collection of debugging hooks. This must be called
before `malloc' is ever called. ABORTFUNC is called with an error code
(see enum above) when an inconsistency is detected. If ABORTFUNC is
null, the standard function prints on stderr and then calls `abort'. */
extern int mcheck (void (*__abortfunc) (enum mcheck_status)) __THROW;
extern int mcheck (void (*__abortfunc)(enum mcheck_status)) __THROW;
/* Similar to `mcheck' but performs checks for all block whenever one of
the memory handling functions is called. This can be very slow. */
extern int mcheck_pedantic (void (*__abortfunc) (enum mcheck_status)) __THROW;
extern int mcheck_pedantic (void (*__abortfunc)(enum mcheck_status)) __THROW;
/* Force check of all blocks now. */
extern void mcheck_check_all (void);
@ -57,5 +57,4 @@ extern void mtrace (void) __THROW;
extern void muntrace (void) __THROW;
__END_DECLS
#endif /* mcheck.h */

View File

@ -38,7 +38,7 @@
/* Pointer to the real functions. These are determined used `dlsym'
when really needed. */
static void *(*mallocp) (size_t);
static void *(*mallocp)(size_t);
static void *(*reallocp) (void *, size_t);
static void *(*callocp) (size_t, size_t);
static void (*freep) (void *);
@ -221,19 +221,19 @@ me (void)
size_t prog_len = strlen (__progname);
initialized = -1;
mallocp = (void *(*) (size_t)) dlsym (RTLD_NEXT, "malloc");
reallocp = (void *(*) (void *, size_t)) dlsym (RTLD_NEXT, "realloc");
callocp = (void *(*) (size_t, size_t)) dlsym (RTLD_NEXT, "calloc");
freep = (void (*) (void *)) dlsym (RTLD_NEXT, "free");
mallocp = (void *(*)(size_t))dlsym (RTLD_NEXT, "malloc");
reallocp = (void *(*)(void *, size_t))dlsym (RTLD_NEXT, "realloc");
callocp = (void *(*)(size_t, size_t))dlsym (RTLD_NEXT, "calloc");
freep = (void (*)(void *))dlsym (RTLD_NEXT, "free");
mmapp = (void *(*) (void *, size_t, int, int, int, off_t)) dlsym (RTLD_NEXT,
mmapp = (void *(*)(void *, size_t, int, int, int, off_t))dlsym (RTLD_NEXT,
"mmap");
mmap64p =
(void *(*) (void *, size_t, int, int, int, off64_t)) dlsym (RTLD_NEXT,
(void *(*)(void *, size_t, int, int, int, off64_t))dlsym (RTLD_NEXT,
"mmap64");
mremapp = (void *(*) (void *, size_t, size_t, int, void *)) dlsym (RTLD_NEXT,
mremapp = (void *(*)(void *, size_t, size_t, int, void *))dlsym (RTLD_NEXT,
"mremap");
munmapp = (int (*) (void *, size_t)) dlsym (RTLD_NEXT, "munmap");
munmapp = (int (*)(void *, size_t))dlsym (RTLD_NEXT, "munmap");
initialized = 1;
if (env != NULL)
@ -317,7 +317,7 @@ __attribute__ ((constructor))
init (void)
{
start_sp = GETSP ();
if (! initialized)
if (!initialized)
me ();
}
@ -334,12 +334,13 @@ malloc (size_t len)
{
if (initialized == -1)
return NULL;
me ();
}
/* If this is not the correct program just use the normal function. */
if (not_me)
return (*mallocp) (len);
return (*mallocp)(len);
/* Keep track of number of calls. */
catomic_increment (&calls[idx_malloc]);
@ -356,7 +357,7 @@ malloc (size_t len)
catomic_increment (&calls_total);
/* Do the real work. */
result = (struct header *) (*mallocp) (len + sizeof (struct header));
result = (struct header *) (*mallocp)(len + sizeof (struct header));
if (result == NULL)
{
catomic_increment (&failed[idx_malloc]);
@ -385,12 +386,13 @@ realloc (void *old, size_t len)
{
if (initialized == -1)
return NULL;
me ();
}
/* If this is not the correct program just use the normal function. */
if (not_me)
return (*reallocp) (old, len);
return (*reallocp)(old, len);
if (old == NULL)
{
@ -403,7 +405,8 @@ realloc (void *old, size_t len)
real = ((struct header *) old) - 1;
if (real->magic != MAGIC)
/* This is no memory allocated here. */
return (*reallocp) (old, len);
return (*reallocp)(old, len);
old_len = real->length;
}
@ -442,7 +445,7 @@ realloc (void *old, size_t len)
catomic_increment (&calls_total);
/* Do the real work. */
result = (struct header *) (*reallocp) (real, len + sizeof (struct header));
result = (struct header *) (*reallocp)(real, len + sizeof (struct header));
if (result == NULL)
{
catomic_increment (&failed[idx_realloc]);
@ -477,12 +480,13 @@ calloc (size_t n, size_t len)
{
if (initialized == -1)
return NULL;
me ();
}
/* If this is not the correct program just use the normal function. */
if (not_me)
return (*callocp) (n, len);
return (*callocp)(n, len);
/* Keep track of number of calls. */
catomic_increment (&calls[idx_calloc]);
@ -499,7 +503,7 @@ calloc (size_t n, size_t len)
++calls_total;
/* Do the real work. */
result = (struct header *) (*mallocp) (size + sizeof (struct header));
result = (struct header *) (*mallocp)(size + sizeof (struct header));
if (result == NULL)
{
catomic_increment (&failed[idx_calloc]);
@ -526,6 +530,7 @@ free (void *ptr)
{
if (initialized == -1)
return;
me ();
}
@ -577,11 +582,12 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
{
if (initialized == -1)
return NULL;
me ();
}
/* Always get a block. We don't need extra memory. */
result = (*mmapp) (start, len, prot, flags, fd, offset);
result = (*mmapp)(start, len, prot, flags, fd, offset);
if (!not_me && trace_mmap)
{
@ -629,11 +635,12 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
{
if (initialized == -1)
return NULL;
me ();
}
/* Always get a block. We don't need extra memory. */
result = (*mmap64p) (start, len, prot, flags, fd, offset);
result = (*mmap64p)(start, len, prot, flags, fd, offset);
if (!not_me && trace_mmap)
{
@ -686,11 +693,12 @@ mremap (void *start, size_t old_len, size_t len, int flags, ...)
{
if (initialized == -1)
return NULL;
me ();
}
/* Always get a block. We don't need extra memory. */
result = (*mremapp) (start, old_len, len, flags, newaddr);
result = (*mremapp)(start, old_len, len, flags, newaddr);
if (!not_me && trace_mmap)
{
@ -746,11 +754,12 @@ munmap (void *start, size_t len)
{
if (initialized == -1)
return -1;
me ();
}
/* Do the real work. */
result = (*munmapp) (start, len);
result = (*munmapp)(start, len);
if (!not_me && trace_mmap)
{
@ -785,6 +794,7 @@ dest (void)
/* If we haven't done anything here just return. */
if (not_me)
return;
/* If we should call any of the memory functions don't do any profiling. */
not_me = true;

View File

@ -53,24 +53,24 @@
/* Definitions of arguments for argp functions. */
static const struct argp_option options[] =
{
{ "output", 'o', N_("FILE"), 0, N_("Name output file") },
{ "string", 's', N_("STRING"), 0, N_("Title string used in output graphic") },
{ "time", 't', NULL, 0, N_("\
{ "output", 'o', N_ ("FILE"), 0, N_ ("Name output file") },
{ "string", 's', N_ ("STRING"), 0, N_ ("Title string used in output graphic") },
{ "time", 't', NULL, 0, N_ (" \
Generate output linear to time (default is linear to number of function calls)\
") },
{ "total", 'T', NULL, 0,
N_("Also draw graph for total memory consumption") },
{ "x-size", 'x', N_("VALUE"), 0,
N_("Make output graphic VALUE pixels wide") },
{ "y-size", 'y', "VALUE", 0, N_("Make output graphic VALUE pixels high") },
N_ ("Also draw graph for total memory consumption") },
{ "x-size", 'x', N_ ("VALUE"), 0,
N_ ("Make output graphic VALUE pixels wide") },
{ "y-size", 'y', "VALUE", 0, N_ ("Make output graphic VALUE pixels high") },
{ NULL, 0, NULL, 0, NULL }
};
/* Short description of program. */
static const char doc[] = N_("Generate graphic from memory profiling data");
static const char doc[] = N_ ("Generate graphic from memory profiling data");
/* Strings for arguments in help texts. */
static const char args_doc[] = N_("DATAFILE [OUTFILE]");
static const char args_doc[] = N_ ("DATAFILE [OUTFILE]");
/* Prototype for option handler. */
static error_t parse_opt (int key, char *arg, struct argp_state *state);
@ -439,7 +439,7 @@ main (int argc, char *argv[])
gdImageString (im_out, gdFontSmall, 40 + (xsize - 39 * 6 - 80) / 2,
ysize - 12,
(unsigned char *) "\
(unsigned char *) " \
# memory handling function calls / time", blue);
for (cnt = 0; cnt < 20; cnt += 2)
@ -564,7 +564,9 @@ more_help (int key, const char *text, void *input)
For bug reporting instructions, please see:\n\
%s.\n"), REPORT_BUGS_TO) < 0)
return NULL;
return tp;
default:
break;
}

View File

@ -16,26 +16,26 @@
<http://www.gnu.org/licenses/>. */
#ifndef _MALLOC_INTERNAL
#define _MALLOC_INTERNAL
#include <malloc.h>
# define _MALLOC_INTERNAL
# include <malloc.h>
#endif
#ifndef __GNU_LIBRARY__
#define __sbrk sbrk
# define __sbrk sbrk
#endif
#ifdef __GNU_LIBRARY__
/* It is best not to declare this and cast its result on foreign operating
systems with potentially hostile include files. */
#include <stddef.h>
#include <stdlib.h>
# include <stddef.h>
# include <stdlib.h>
extern void *__sbrk (ptrdiff_t increment) __THROW;
libc_hidden_proto (__sbrk)
#endif
#ifndef NULL
#define NULL 0
# define NULL 0
#endif
/* Allocate INCREMENT more bytes of data space,
@ -47,6 +47,7 @@ __default_morecore (ptrdiff_t increment)
void *result = (void *) __sbrk (increment);
if (result == (void *) -1)
return NULL;
return result;
}
libc_hidden_def (__default_morecore)

View File

@ -19,10 +19,10 @@
<http://www.gnu.org/licenses/>. */
#ifndef _MALLOC_INTERNAL
#define _MALLOC_INTERNAL
#include <malloc.h>
#include <mcheck.h>
#include <bits/libc-lock.h>
# define _MALLOC_INTERNAL
# include <malloc.h>
# include <mcheck.h>
# include <bits/libc-lock.h>
#endif
#include <dlfcn.h>
@ -48,7 +48,7 @@
#define TRACE_BUFFER_SIZE 512
static FILE *mallstream;
static const char mallenv[]= "MALLOC_TRACE";
static const char mallenv[] = "MALLOC_TRACE";
static char *malloc_trace_buffer;
__libc_lock_define_initialized (static, lock);
@ -79,10 +79,9 @@ libc_hidden_def (tr_break)
static void tr_where (const __ptr_t, Dl_info *) __THROW internal_function;
static void
internal_function
tr_where (caller, info)
const __ptr_t caller;
Dl_info *info;
internal_function tr_where (caller, info)
const __ptr_t caller;
Dl_info *info;
{
if (caller != NULL)
{
@ -107,7 +106,7 @@ tr_where (caller, info)
}
fprintf (mallstream, "@ %s%s%s[%p] ",
info->dli_fname ?: "", info->dli_fname ? ":" : "",
info->dli_fname ? : "", info->dli_fname ? ":" : "",
buf, caller);
}
else
@ -131,10 +130,9 @@ lock_and_info (const __ptr_t caller, Dl_info *mem)
static void tr_freehook (__ptr_t, const __ptr_t) __THROW;
static void
tr_freehook (ptr, caller)
__ptr_t ptr;
const __ptr_t caller;
static void tr_freehook (ptr, caller)
__ptr_t ptr;
const __ptr_t caller;
{
if (ptr == NULL)
return;
@ -152,7 +150,7 @@ tr_freehook (ptr, caller)
}
__free_hook = tr_old_free_hook;
if (tr_old_free_hook != NULL)
(*tr_old_free_hook) (ptr, caller);
(*tr_old_free_hook)(ptr, caller);
else
free (ptr);
__free_hook = tr_freehook;
@ -160,10 +158,9 @@ tr_freehook (ptr, caller)
}
static __ptr_t tr_mallochook (size_t, const __ptr_t) __THROW;
static __ptr_t
tr_mallochook (size, caller)
size_t size;
const __ptr_t caller;
static __ptr_t tr_mallochook (size, caller)
size_t size;
const __ptr_t caller;
{
__ptr_t hdr;
@ -172,7 +169,7 @@ tr_mallochook (size, caller)
__malloc_hook = tr_old_malloc_hook;
if (tr_old_malloc_hook != NULL)
hdr = (__ptr_t) (*tr_old_malloc_hook) (size, caller);
hdr = (__ptr_t) (*tr_old_malloc_hook)(size, caller);
else
hdr = (__ptr_t) malloc (size);
__malloc_hook = tr_mallochook;
@ -190,12 +187,11 @@ tr_mallochook (size, caller)
}
static __ptr_t tr_reallochook (__ptr_t, size_t, const __ptr_t)
__THROW;
static __ptr_t
tr_reallochook (ptr, size, caller)
__ptr_t ptr;
size_t size;
const __ptr_t caller;
__THROW;
static __ptr_t tr_reallochook (ptr, size, caller)
__ptr_t ptr;
size_t size;
const __ptr_t caller;
{
__ptr_t hdr;
@ -209,7 +205,7 @@ tr_reallochook (ptr, size, caller)
__malloc_hook = tr_old_malloc_hook;
__realloc_hook = tr_old_realloc_hook;
if (tr_old_realloc_hook != NULL)
hdr = (__ptr_t) (*tr_old_realloc_hook) (ptr, size, caller);
hdr = (__ptr_t) (*tr_old_realloc_hook)(ptr, size, caller);
else
hdr = (__ptr_t) realloc (ptr, size);
__free_hook = tr_freehook;
@ -244,10 +240,9 @@ tr_reallochook (ptr, size, caller)
static __ptr_t tr_memalignhook (size_t, size_t,
const __ptr_t) __THROW;
static __ptr_t
tr_memalignhook (alignment, size, caller)
size_t alignment, size;
const __ptr_t caller;
static __ptr_t tr_memalignhook (alignment, size, caller)
size_t alignment, size;
const __ptr_t caller;
{
__ptr_t hdr;
@ -257,7 +252,7 @@ tr_memalignhook (alignment, size, caller)
__memalign_hook = tr_old_memalign_hook;
__malloc_hook = tr_old_malloc_hook;
if (tr_old_memalign_hook != NULL)
hdr = (__ptr_t) (*tr_old_memalign_hook) (alignment, size, caller);
hdr = (__ptr_t) (*tr_old_memalign_hook)(alignment, size, caller);
else
hdr = (__ptr_t) memalign (alignment, size);
__memalign_hook = tr_memalignhook;
@ -352,7 +347,7 @@ mtrace (void)
{
extern void *__dso_handle __attribute__ ((__weak__));
added_atexit_handler = 1;
__cxa_atexit ((void (*) (void *)) release_libc_mem, NULL,
__cxa_atexit ((void (*)(void *))release_libc_mem, NULL,
&__dso_handle ? __dso_handle : NULL);
}
#endif

View File

@ -78,10 +78,10 @@ struct fooalign
But in fact it might be less smart and round addresses to as much as
DEFAULT_ROUNDING. So we prepare for it to do that. */
enum
{
{
DEFAULT_ALIGNMENT = offsetof (struct fooalign, u),
DEFAULT_ROUNDING = sizeof (union fooround)
};
};
/* When we copy a long block of data, this is the unit to do it with.
On some machines, copying successive ints does not work;
@ -127,19 +127,19 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
do not allow (expr) ? void : void. */
# define CALL_CHUNKFUN(h, size) \
(((h) -> use_extra_arg) \
? (*(h)->chunkfun) ((h)->extra_arg, (size)) \
: (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size)))
(((h)->use_extra_arg) \
? (*(h)->chunkfun)((h)->extra_arg, (size)) \
: (*(struct _obstack_chunk *(*)(long))(h)->chunkfun)((size)))
# define CALL_FREEFUN(h, old_chunk) \
do { \
if ((h) -> use_extra_arg) \
(*(h)->freefun) ((h)->extra_arg, (old_chunk)); \
if ((h)->use_extra_arg) \
(*(h)->freefun)((h)->extra_arg, (old_chunk)); \
else \
(*(void (*) (void *)) (h)->freefun) ((old_chunk)); \
(*(void (*)(void *))(h)->freefun)((old_chunk)); \
} while (0)
/* Initialize an obstack H for use. Specify chunk size SIZE (0 means default).
Objects start on multiples of ALIGNMENT (0 means use default).
CHUNKFUN is the function to use to allocate chunks,
@ -151,8 +151,8 @@ compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0);
int
_obstack_begin (struct obstack *h,
int size, int alignment,
void *(*chunkfun) (long),
void (*freefun) (void *))
void *(*chunkfun)(long),
void (*freefun)(void *))
{
struct _obstack_chunk *chunk; /* points to new chunk */
@ -175,15 +175,15 @@ _obstack_begin (struct obstack *h,
size = 4096 - extra;
}
h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun;
h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
h->chunkfun = (struct _obstack_chunk * (*)(void *, long))chunkfun;
h->freefun = (void (*)(void *, struct _obstack_chunk *))freefun;
h->chunk_size = size;
h->alignment_mask = alignment - 1;
h->use_extra_arg = 0;
chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
if (!chunk)
(*obstack_alloc_failed_handler) ();
(*obstack_alloc_failed_handler)();
h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
alignment - 1);
h->chunk_limit = chunk->limit
@ -197,8 +197,8 @@ _obstack_begin (struct obstack *h,
int
_obstack_begin_1 (struct obstack *h, int size, int alignment,
void *(*chunkfun) (void *, long),
void (*freefun) (void *, void *),
void *(*chunkfun)(void *, long),
void (*freefun)(void *, void *),
void *arg)
{
struct _obstack_chunk *chunk; /* points to new chunk */
@ -222,16 +222,16 @@ _obstack_begin_1 (struct obstack *h, int size, int alignment,
size = 4096 - extra;
}
h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun;
h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
h->chunkfun = (struct _obstack_chunk * (*)(void *, long))chunkfun;
h->freefun = (void (*)(void *, struct _obstack_chunk *))freefun;
h->chunk_size = size;
h->alignment_mask = alignment - 1;
h->extra_arg = arg;
h->use_extra_arg = 1;
chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
chunk = h->chunk = CALL_CHUNKFUN (h, h->chunk_size);
if (!chunk)
(*obstack_alloc_failed_handler) ();
(*obstack_alloc_failed_handler)();
h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
alignment - 1);
h->chunk_limit = chunk->limit
@ -268,7 +268,7 @@ _obstack_newchunk (struct obstack *h, int length)
/* Allocate and initialize the new chunk. */
new_chunk = CALL_CHUNKFUN (h, new_size);
if (!new_chunk)
(*obstack_alloc_failed_handler) ();
(*obstack_alloc_failed_handler)();
h->chunk = new_chunk;
new_chunk->prev = old_chunk;
new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size;
@ -284,8 +284,8 @@ _obstack_newchunk (struct obstack *h, int length)
{
for (i = obj_size / sizeof (COPYING_UNIT) - 1;
i >= 0; i--)
((COPYING_UNIT *)object_base)[i]
= ((COPYING_UNIT *)h->object_base)[i];
((COPYING_UNIT *) object_base)[i]
= ((COPYING_UNIT *) h->object_base)[i];
/* We used to copy the odd few remaining bytes as one extra COPYING_UNIT,
but that can cross a page boundary on a machine
which does not do strict alignment for COPYING_UNITS. */
@ -300,7 +300,7 @@ _obstack_newchunk (struct obstack *h, int length)
/* If the object just copied was the only data in OLD_CHUNK,
free that chunk and remove it from the chain.
But not if that chunk might contain an empty object. */
if (! h->maybe_empty_object
if (!h->maybe_empty_object
&& (h->object_base
== __PTR_ALIGN ((char *) old_chunk, old_chunk->contents,
h->alignment_mask)))
@ -343,7 +343,7 @@ _obstack_allocated_p (struct obstack *h, void *obj)
}
return lp != 0;
}
/* Free objects in obstack H, including OBJ and everything allocate
more recently than OBJ. If OBJ is zero, free everything in H. */
@ -384,11 +384,11 @@ obstack_free (struct obstack *h, void *obj)
called by non-GCC compilers. */
strong_alias (obstack_free, _obstack_free)
# endif
int
_obstack_memory_used (struct obstack *h)
{
struct _obstack_chunk* lp;
struct _obstack_chunk *lp;
int nbytes = 0;
for (lp = h->chunk; lp != 0; lp = lp->prev)
@ -397,7 +397,7 @@ _obstack_memory_used (struct obstack *h)
}
return nbytes;
}
/* Define the error handler. */
# ifdef _LIBC
# include <libintl.h>
@ -429,11 +429,10 @@ print_and_abort (void)
like this and the translation should be reused instead of creating
a very similar string which requires a separate translation. */
# ifdef _LIBC
(void) __fxprintf (NULL, "%s\n", _("memory exhausted"));
(void) __fxprintf (NULL, "%s\n", _ ("memory exhausted"));
# else
fprintf (stderr, "%s\n", _("memory exhausted"));
fprintf (stderr, "%s\n", _ ("memory exhausted"));
# endif
exit (obstack_exit_failure);
}
#endif /* !ELIDE_CODE */

View File

@ -18,73 +18,73 @@
/* Summary:
All the apparent functions defined here are macros. The idea
is that you would use these pre-tested macros to solve a
very specific set of problems, and they would run fast.
Caution: no side-effects in arguments please!! They may be
evaluated MANY times!!
All the apparent functions defined here are macros. The idea
is that you would use these pre-tested macros to solve a
very specific set of problems, and they would run fast.
Caution: no side-effects in arguments please!! They may be
evaluated MANY times!!
These macros operate a stack of objects. Each object starts life
small, and may grow to maturity. (Consider building a word syllable
by syllable.) An object can move while it is growing. Once it has
been "finished" it never changes address again. So the "top of the
stack" is typically an immature growing object, while the rest of the
stack is of mature, fixed size and fixed address objects.
These macros operate a stack of objects. Each object starts life
small, and may grow to maturity. (Consider building a word syllable
by syllable.) An object can move while it is growing. Once it has
been "finished" it never changes address again. So the "top of the
stack" is typically an immature growing object, while the rest of the
stack is of mature, fixed size and fixed address objects.
These routines grab large chunks of memory, using a function you
supply, called `obstack_chunk_alloc'. On occasion, they free chunks,
by calling `obstack_chunk_free'. You must define them and declare
them before using any obstack macros.
These routines grab large chunks of memory, using a function you
supply, called `obstack_chunk_alloc'. On occasion, they free chunks,
by calling `obstack_chunk_free'. You must define them and declare
them before using any obstack macros.
Each independent stack is represented by a `struct obstack'.
Each of the obstack macros expects a pointer to such a structure
as the first argument.
Each independent stack is represented by a `struct obstack'.
Each of the obstack macros expects a pointer to such a structure
as the first argument.
One motivation for this package is the problem of growing char strings
in symbol tables. Unless you are "fascist pig with a read-only mind"
--Gosper's immortal quote from HAKMEM item 154, out of context--you
would not like to put any arbitrary upper limit on the length of your
symbols.
One motivation for this package is the problem of growing char strings
in symbol tables. Unless you are "fascist pig with a read-only mind"
--Gosper's immortal quote from HAKMEM item 154, out of context--you
would not like to put any arbitrary upper limit on the length of your
symbols.
In practice this often means you will build many short symbols and a
few long symbols. At the time you are reading a symbol you don't know
how long it is. One traditional method is to read a symbol into a
buffer, realloc()ating the buffer every time you try to read a symbol
that is longer than the buffer. This is beaut, but you still will
want to copy the symbol from the buffer to a more permanent
symbol-table entry say about half the time.
In practice this often means you will build many short symbols and a
few long symbols. At the time you are reading a symbol you don't know
how long it is. One traditional method is to read a symbol into a
buffer, realloc()ating the buffer every time you try to read a symbol
that is longer than the buffer. This is beaut, but you still will
want to copy the symbol from the buffer to a more permanent
symbol-table entry say about half the time.
With obstacks, you can work differently. Use one obstack for all symbol
names. As you read a symbol, grow the name in the obstack gradually.
When the name is complete, finalize it. Then, if the symbol exists already,
free the newly read name.
With obstacks, you can work differently. Use one obstack for all symbol
names. As you read a symbol, grow the name in the obstack gradually.
When the name is complete, finalize it. Then, if the symbol exists already,
free the newly read name.
The way we do this is to take a large chunk, allocating memory from
low addresses. When you want to build a symbol in the chunk you just
add chars above the current "high water mark" in the chunk. When you
have finished adding chars, because you got to the end of the symbol,
you know how long the chars are, and you can create a new object.
Mostly the chars will not burst over the highest address of the chunk,
because you would typically expect a chunk to be (say) 100 times as
long as an average object.
The way we do this is to take a large chunk, allocating memory from
low addresses. When you want to build a symbol in the chunk you just
add chars above the current "high water mark" in the chunk. When you
have finished adding chars, because you got to the end of the symbol,
you know how long the chars are, and you can create a new object.
Mostly the chars will not burst over the highest address of the chunk,
because you would typically expect a chunk to be (say) 100 times as
long as an average object.
In case that isn't clear, when we have enough chars to make up
the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
so we just point to it where it lies. No moving of chars is
needed and this is the second win: potentially long strings need
never be explicitly shuffled. Once an object is formed, it does not
change its address during its lifetime.
In case that isn't clear, when we have enough chars to make up
the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
so we just point to it where it lies. No moving of chars is
needed and this is the second win: potentially long strings need
never be explicitly shuffled. Once an object is formed, it does not
change its address during its lifetime.
When the chars burst over a chunk boundary, we allocate a larger
chunk, and then copy the partly formed object from the end of the old
chunk to the beginning of the new larger chunk. We then carry on
accreting characters to the end of the object as we normally would.
When the chars burst over a chunk boundary, we allocate a larger
chunk, and then copy the partly formed object from the end of the old
chunk to the beginning of the new larger chunk. We then carry on
accreting characters to the end of the object as we normally would.
A special macro is provided to add a single char at a time to a
growing object. This allows the use of register variables, which
break the ordinary 'growth' macro.
A special macro is provided to add a single char at a time to a
growing object. This allows the use of register variables, which
break the ordinary 'growth' macro.
Summary:
Summary:
We allocate large chunks.
We carve out one object at a time from the current chunk.
Once carved, an object never moves.
@ -96,7 +96,7 @@ Summary:
Because of the way we do it, you can `unwind' an obstack
back to a previous state. (You may remove objects much
as you would with a stack.)
*/
*/
/* Don't do the contents of this file more than once. */
@ -107,7 +107,7 @@ Summary:
#ifdef __cplusplus
extern "C" {
#endif
/* We need the type of a pointer subtraction. If __PTRDIFF_TYPE__ is
defined, as with GNU C, use that; that way we don't pollute the
namespace with <stddef.h>'s symbols. Otherwise, include <stddef.h>
@ -124,7 +124,7 @@ extern "C" {
aligning P to the next multiple of A + 1. B and P must be of type
char *. A + 1 must be a power of 2. */
#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A)))
#define __BPTR_ALIGN(B, P, A) ((B) + (((P) -(B) + (A)) & ~(A)))
/* Similiar to _BPTR_ALIGN (B, P, A), except optimize the common case
where pointers can be converted to integers, aligned as integers,
@ -165,12 +165,12 @@ struct obstack /* control current object in current chunk */
struct _obstack_chunk *(*chunkfun) (void *, long);
void (*freefun) (void *, struct _obstack_chunk *);
void *extra_arg; /* first arg for chunk alloc/dealloc funcs */
unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */
unsigned maybe_empty_object:1;/* There is a possibility that the current
unsigned use_extra_arg : 1; /* chunk alloc/dealloc funcs take extra arg */
unsigned maybe_empty_object : 1; /* There is a possibility that the current
chunk contains a zero-length object. This
prevents freeing the chunk if we allocate
a bigger chunk to replace it. */
unsigned alloc_failed:1; /* No longer used, as we now call the failed
unsigned alloc_failed : 1; /* No longer used, as we now call the failed
handler on error, but retained for binary
compatibility. */
};
@ -179,15 +179,15 @@ struct obstack /* control current object in current chunk */
extern void _obstack_newchunk (struct obstack *, int);
extern int _obstack_begin (struct obstack *, int, int,
void *(*) (long), void (*) (void *));
void *(*)(long), void (*)(void *));
extern int _obstack_begin_1 (struct obstack *, int, int,
void *(*) (void *, long),
void (*) (void *, void *), void *);
void *(*)(void *, long),
void (*)(void *, void *), void *);
extern int _obstack_memory_used (struct obstack *);
void obstack_free (struct obstack *__obstack, void *__glibc_block);
/* Error handler called when `obstack_chunk_alloc' failed to allocate
more memory. This can be set to a user defined function which
should either abort gracefully or use longjump - but shouldn't
@ -196,7 +196,7 @@ extern void (*obstack_alloc_failed_handler) (void);
/* Exit value used when `print_and_abort' is used. */
extern int obstack_exit_failure;
/* Pointer to beginning of object being allocated or to be allocated next.
Note that this might not be the final address of the object
because a new chunk might be needed to hold the final size. */
@ -218,36 +218,36 @@ extern int obstack_exit_failure;
/* To prevent prototype warnings provide complete argument list. */
#define obstack_init(h) \
_obstack_begin ((h), 0, 0, \
(void *(*) (long)) obstack_chunk_alloc, \
(void (*) (void *)) obstack_chunk_free)
(void *(*)(long))obstack_chunk_alloc, \
(void (*)(void *))obstack_chunk_free)
#define obstack_begin(h, size) \
_obstack_begin ((h), (size), 0, \
(void *(*) (long)) obstack_chunk_alloc, \
(void (*) (void *)) obstack_chunk_free)
(void *(*)(long))obstack_chunk_alloc, \
(void (*)(void *))obstack_chunk_free)
#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \
_obstack_begin ((h), (size), (alignment), \
(void *(*) (long)) (chunkfun), \
(void (*) (void *)) (freefun))
(void *(*)(long))(chunkfun), \
(void (*)(void *))(freefun))
#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
_obstack_begin_1 ((h), (size), (alignment), \
(void *(*) (void *, long)) (chunkfun), \
(void (*) (void *, void *)) (freefun), (arg))
(void *(*)(void *, long))(chunkfun), \
(void (*)(void *, void *))(freefun), (arg))
#define obstack_chunkfun(h, newchunkfun) \
((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun))
((h)->chunkfun = (struct _obstack_chunk *(*)(void *, long))(newchunkfun))
#define obstack_freefun(h, newfreefun) \
((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun))
((h)->freefun = (void (*)(void *, struct _obstack_chunk *))(newfreefun))
#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar))
#define obstack_1grow_fast(h, achar) (*((h)->next_free)++ = (achar))
#define obstack_blank_fast(h,n) ((h)->next_free += (n))
#define obstack_blank_fast(h, n) ((h)->next_free += (n))
#define obstack_memory_used(h) _obstack_memory_used (h)
#if defined __GNUC__
/* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and
does not implement __extension__. But that compiler doesn't define
@ -271,9 +271,9 @@ extern int obstack_exit_failure;
({ struct obstack const *__o = (OBSTACK); \
(unsigned) (__o->chunk_limit - __o->next_free); })
# define obstack_make_room(OBSTACK,length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_make_room(OBSTACK, length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
int __len = (length); \
if (__o->chunk_limit - __o->next_free < __len) \
_obstack_newchunk (__o, __len); \
@ -287,9 +287,9 @@ __extension__ \
__o->chunk->contents, \
__o->alignment_mask)); })
# define obstack_grow(OBSTACK,where,length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_grow(OBSTACK, where, length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
int __len = (length); \
if (__o->next_free + __len > __o->chunk_limit) \
_obstack_newchunk (__o, __len); \
@ -297,9 +297,9 @@ __extension__ \
__o->next_free += __len; \
(void) 0; })
# define obstack_grow0(OBSTACK,where,length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_grow0(OBSTACK, where, length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
int __len = (length); \
if (__o->next_free + __len + 1 > __o->chunk_limit) \
_obstack_newchunk (__o, __len + 1); \
@ -308,9 +308,9 @@ __extension__ \
*(__o->next_free)++ = 0; \
(void) 0; })
# define obstack_1grow(OBSTACK,datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_1grow(OBSTACK, datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
if (__o->next_free + 1 > __o->chunk_limit) \
_obstack_newchunk (__o, 1); \
obstack_1grow_fast (__o, datum); \
@ -320,86 +320,86 @@ __extension__ \
or ints, and that the data added so far to the current object
shares that much alignment. */
# define obstack_ptr_grow(OBSTACK,datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_ptr_grow(OBSTACK, datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
if (__o->next_free + sizeof (void *) > __o->chunk_limit) \
_obstack_newchunk (__o, sizeof (void *)); \
obstack_ptr_grow_fast (__o, datum); }) \
# define obstack_int_grow(OBSTACK,datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_int_grow(OBSTACK, datum) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
if (__o->next_free + sizeof (int) > __o->chunk_limit) \
_obstack_newchunk (__o, sizeof (int)); \
obstack_int_grow_fast (__o, datum); })
# define obstack_ptr_grow_fast(OBSTACK,aptr) \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
# define obstack_ptr_grow_fast(OBSTACK, aptr) \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
*(const void **) __o1->next_free = (aptr); \
__o1->next_free += sizeof (const void *); \
(void) 0; })
# define obstack_int_grow_fast(OBSTACK,aint) \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
# define obstack_int_grow_fast(OBSTACK, aint) \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
*(int *) __o1->next_free = (aint); \
__o1->next_free += sizeof (int); \
(void) 0; })
# define obstack_blank(OBSTACK,length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
# define obstack_blank(OBSTACK, length) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
int __len = (length); \
if (__o->chunk_limit - __o->next_free < __len) \
_obstack_newchunk (__o, __len); \
obstack_blank_fast (__o, __len); \
(void) 0; })
# define obstack_alloc(OBSTACK,length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
# define obstack_alloc(OBSTACK, length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
obstack_blank (__h, (length)); \
obstack_finish (__h); })
# define obstack_copy(OBSTACK,where,length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
# define obstack_copy(OBSTACK, where, length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
obstack_grow (__h, (where), (length)); \
obstack_finish (__h); })
# define obstack_copy0(OBSTACK,where,length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
# define obstack_copy0(OBSTACK, where, length) \
__extension__ \
({ struct obstack *__h = (OBSTACK); \
obstack_grow0 (__h, (where), (length)); \
obstack_finish (__h); })
/* The local variable is named __o1 to avoid a name conflict
when obstack_blank is called. */
# define obstack_finish(OBSTACK) \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
__extension__ \
({ struct obstack *__o1 = (OBSTACK); \
void *__value = (void *) __o1->object_base; \
if (__o1->next_free == __value) \
__o1->maybe_empty_object = 1; \
__o1->next_free \
= __PTR_ALIGN (__o1->object_base, __o1->next_free, \
__o1->alignment_mask); \
if (__o1->next_free - (char *)__o1->chunk \
> __o1->chunk_limit - (char *)__o1->chunk) \
if (__o1->next_free - (char *) __o1->chunk \
> __o1->chunk_limit - (char *) __o1->chunk) \
__o1->next_free = __o1->chunk_limit; \
__o1->object_base = __o1->next_free; \
__value; })
# define obstack_free(OBSTACK, OBJ) \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
__extension__ \
({ struct obstack *__o = (OBSTACK); \
void *__obj = (OBJ); \
if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit) \
__o->next_free = __o->object_base = (char *)__obj; \
if (__obj > (void *) __o->chunk && __obj < (void *) __o->chunk_limit) \
__o->next_free = __o->object_base = (char *) __obj; \
else (obstack_free) (__o, __obj); })
#else /* not __GNUC__ */
# define obstack_object_size(h) \
@ -420,64 +420,64 @@ __extension__ \
Casting the third operand to void was tried before,
but some compilers won't accept it. */
# define obstack_make_room(h,length) \
( (h)->temp.tempint = (length), \
# define obstack_make_room(h, length) \
((h)->temp.tempint = (length), \
(((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \
? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0))
# define obstack_grow(h,where,length) \
( (h)->temp.tempint = (length), \
# define obstack_grow(h, where, length) \
((h)->temp.tempint = (length), \
(((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \
? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \
memcpy ((h)->next_free, where, (h)->temp.tempint), \
(h)->next_free += (h)->temp.tempint)
# define obstack_grow0(h,where,length) \
( (h)->temp.tempint = (length), \
# define obstack_grow0(h, where, length) \
((h)->temp.tempint = (length), \
(((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit) \
? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0), \
memcpy ((h)->next_free, where, (h)->temp.tempint), \
(h)->next_free += (h)->temp.tempint, \
*((h)->next_free)++ = 0)
# define obstack_1grow(h,datum) \
( (((h)->next_free + 1 > (h)->chunk_limit) \
# define obstack_1grow(h, datum) \
((((h)->next_free + 1 > (h)->chunk_limit) \
? (_obstack_newchunk ((h), 1), 0) : 0), \
obstack_1grow_fast (h, datum))
# define obstack_ptr_grow(h,datum) \
( (((h)->next_free + sizeof (char *) > (h)->chunk_limit) \
# define obstack_ptr_grow(h, datum) \
((((h)->next_free + sizeof (char *) > (h)->chunk_limit) \
? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \
obstack_ptr_grow_fast (h, datum))
# define obstack_int_grow(h,datum) \
( (((h)->next_free + sizeof (int) > (h)->chunk_limit) \
# define obstack_int_grow(h, datum) \
((((h)->next_free + sizeof (int) > (h)->chunk_limit) \
? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \
obstack_int_grow_fast (h, datum))
# define obstack_ptr_grow_fast(h,aptr) \
# define obstack_ptr_grow_fast(h, aptr) \
(((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr))
# define obstack_int_grow_fast(h,aint) \
# define obstack_int_grow_fast(h, aint) \
(((int *) ((h)->next_free += sizeof (int)))[-1] = (aint))
# define obstack_blank(h,length) \
( (h)->temp.tempint = (length), \
# define obstack_blank(h, length) \
((h)->temp.tempint = (length), \
(((h)->chunk_limit - (h)->next_free < (h)->temp.tempint) \
? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \
obstack_blank_fast (h, (h)->temp.tempint))
# define obstack_alloc(h,length) \
# define obstack_alloc(h, length) \
(obstack_blank ((h), (length)), obstack_finish ((h)))
# define obstack_copy(h,where,length) \
# define obstack_copy(h, where, length) \
(obstack_grow ((h), (where), (length)), obstack_finish ((h)))
# define obstack_copy0(h,where,length) \
# define obstack_copy0(h, where, length) \
(obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
# define obstack_finish(h) \
( ((h)->next_free == (h)->object_base \
(((h)->next_free == (h)->object_base \
? (((h)->maybe_empty_object = 1), 0) \
: 0), \
(h)->temp.tempptr = (h)->object_base, \
@ -490,18 +490,16 @@ __extension__ \
(h)->object_base = (h)->next_free, \
(h)->temp.tempptr)
# define obstack_free(h,obj) \
( (h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk, \
# define obstack_free(h, obj) \
((h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk, \
((((h)->temp.tempint > 0 \
&& (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk)) \
? (((h)->next_free = (h)->object_base \
= (h)->temp.tempint + (char *) (h)->chunk), 0) \
: ((obstack_free) ((h), (h)->temp.tempint + (char *) (h)->chunk), 0)))
#endif /* not __GNUC__ */
#ifdef __cplusplus
} /* C++ */
#endif
#endif /* obstack.h */

View File

@ -33,16 +33,16 @@ __libc_freeres (void)
protect for multiple executions since these are fatal. */
static long int already_called;
if (! atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
if (!atomic_compare_and_exchange_bool_acq (&already_called, 1, 0))
{
void * const *p;
void *const *p;
_IO_cleanup ();
RUN_HOOK (__libc_subfreeres, ());
for (p = symbol_set_first_element (__libc_freeres_ptrs);
! symbol_set_end_p (__libc_freeres_ptrs, p); ++p)
!symbol_set_end_p (__libc_freeres_ptrs, p); ++p)
free (*p);
}
}

View File

@ -48,7 +48,7 @@ main (void)
free (malloc (10));
for (i=0; i<100; ++i)
for (i = 0; i < 100; ++i)
{
save_state = malloc_get_state ();
if (save_state == NULL)
@ -58,7 +58,7 @@ main (void)
}
/*free (malloc (10)); This could change the top chunk! */
malloc_set_state (save_state);
p1 = realloc (p1, i*4 + 4);
p1 = realloc (p1, i * 4 + 4);
if (p1 == NULL)
merror ("realloc (i*4) failed.");
free (save_state);

View File

@ -65,7 +65,7 @@ main (void)
abort ();
p = (char **) tsearch (copy, &root,
(int (*) (const void *, const void *)) strcmp);
(int (*)(const void *, const void *))strcmp);
if (*p != copy)
/* This line wasn't added. */
free (copy);