2002-01-18  Wolfram Gloger  <wg@malloc.de>

	* malloc/malloc.c: Rewrite, adapted from Doug Lea's malloc-2.7.0.c.
	* malloc/malloc.h: Likewise.
	* malloc/arena.c: New file.
	* malloc/hooks.c: New file.
	* malloc/tst-mallocstate.c: New file.
	* malloc/Makefile: Add new testcase tst-mallocstate.
	Add arena.c and hooks.c to distribute.  Fix commented CPPFLAGS.

2002-01-28  Ulrich Drepper  <drepper@redhat.com>

	* stdlib/msort.c: Remove last patch.  The optimization violates the
	same rule which qsort.c had problems with.

2002-01-27  Paul Eggert  <eggert@twinsun.com>

	* stdlib/qsort.c (_quicksort): Do not apply the comparison function
	to a pivot element that lies outside the array to be sorted, as
	ISO C99 requires that the comparison function be called only with
	addresses of array elements [PR libc/2880].
This commit is contained in:
Ulrich Drepper 2002-01-29 07:54:51 +00:00
parent db2ebcef24
commit fa8d436c87
9 changed files with 6029 additions and 4592 deletions

View File

@ -1,3 +1,25 @@
2002-01-18 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c: Rewrite, adapted from Doug Lea's malloc-2.7.0.c.
* malloc/malloc.h: Likewise.
* malloc/arena.c: New file.
* malloc/hooks.c: New file.
* malloc/tst-mallocstate.c: New file.
* malloc/Makefile: Add new testcase tst-mallocstate.
Add arena.c and hooks.c to distribute. Fix commented CPPFLAGS.
2002-01-28 Ulrich Drepper <drepper@redhat.com>
* stdlib/msort.c: Remove last patch. The optimization violates the
same rule which qsort.c had problems with.
2002-01-27 Paul Eggert <eggert@twinsun.com>
* stdlib/qsort.c (_quicksort): Do not apply the comparison function
to a pivot element that lies outside the array to be sorted, as
ISO C99 requires that the comparison function be called only with
addresses of array elements [PR libc/2880].
2002-01-28 Ulrich Drepper <drepper@redhat.com>
* elf/dl-load.c (_dl_map_object): Remove incorrect optimization

View File

@ -1,4 +1,4 @@
# Copyright (C) 1991-1999, 2000, 2001 Free Software Foundation, Inc.
# Copyright (C) 1991-1999, 2000, 2001, 2002 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
@ -25,11 +25,12 @@ all:
dist-headers := malloc.h
headers := $(dist-headers) obstack.h mcheck.h
tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack
tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
tst-mallocstate
test-srcs = tst-mtrace
distribute = thread-m.h mtrace.pl mcheck-init.c stackinfo.h memusage.h \
memusage.sh memusagestat.c tst-mtrace.sh
memusage.sh memusagestat.c tst-mtrace.sh arena.c hooks.c
# Things which get pasted together into gmalloc.c.
gmalloc-routines := malloc morecore
@ -109,7 +110,7 @@ endif
endif
# Uncomment this for test releases. For public releases it is too expensive.
#CPPFLAGS-malloc.o += -DMALLOC_DEBUG
#CPPFLAGS-malloc.o += -DMALLOC_DEBUG=1
$(objpfx)mtrace: mtrace.pl
rm -f $@.new
@ -126,3 +127,6 @@ $(objpfx)memusage: memusage.sh
# The implementation uses `dlsym'
$(objpfx)libmemusage.so: $(common-objpfx)dlfcn/libdl.so
# Extra dependencies
$(foreach o,$(all-object-suffixes),$(objpfx)malloc$(o)): arena.c hooks.c

746
malloc/arena.c Normal file
View File

@ -0,0 +1,746 @@
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* $Id$ */
/* Compile-time constants. */
#define HEAP_MIN_SIZE (32*1024)
#ifndef HEAP_MAX_SIZE
#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
#endif
/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
that are dynamically created for multi-threaded programs. The
maximum size must be a power of two, for fast determination of
which heap belongs to a chunk. It should be much larger than the
mmap threshold, so that requests with a size just below that
threshold can be fulfilled without creating too many heaps. */
#ifndef THREAD_STATS
#define THREAD_STATS 0
#endif
/* If THREAD_STATS is non-zero, some statistics on mutex locking are
computed. */
/***************************************************************************/
#define top(ar_ptr) ((ar_ptr)->top)
/* A heap is a single contiguous memory region holding (coalesceable)
malloc_chunks. It is allocated with mmap() and always starts at an
address aligned to HEAP_MAX_SIZE. Not used unless compiling with
USE_ARENAS. */
typedef struct _heap_info {
mstate ar_ptr; /* Arena for this heap. */
struct _heap_info *prev; /* Previous heap. */
size_t size; /* Current size in bytes. */
size_t pad; /* Make sure the following data is properly aligned. */
} heap_info;
/* Thread specific data */
static tsd_key_t arena_key;
static mutex_t list_lock;
#if THREAD_STATS
static int stat_n_heaps;
#define THREAD_STAT(x) x
#else
#define THREAD_STAT(x) do ; while(0)
#endif
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
static unsigned long arena_mem;
/**************************************************************************/
#if USE_ARENAS
/* arena_get() acquires an arena and locks the corresponding mutex.
First, try the one last locked successfully by this thread. (This
is the common case and handled with a macro for speed.) Then, loop
once over the circularly linked list of arenas. If no arena is
readily available, create a new one. In this latter case, `size'
is just a hint as to how much memory will be required immediately
in the new arena. */
#define arena_get(ptr, size) do { \
Void_t *vptr = NULL; \
ptr = (mstate)tsd_getspecific(arena_key, vptr); \
if(ptr && !mutex_trylock(&ptr->mutex)) { \
THREAD_STAT(++(ptr->stat_lock_direct)); \
} else \
ptr = arena_get2(ptr, (size)); \
} while(0)
/* find the heap and corresponding arena for a given ptr */
#define heap_for_ptr(ptr) \
((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
#define arena_for_chunk(ptr) \
(chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
#else /* !USE_ARENAS */
/* There is only one arena, main_arena. */
#if THREAD_STATS
#define arena_get(ar_ptr, sz) do { \
ar_ptr = &main_arena; \
if(!mutex_trylock(&ar_ptr->mutex)) \
++(ar_ptr->stat_lock_direct); \
else { \
(void)mutex_lock(&ar_ptr->mutex); \
++(ar_ptr->stat_lock_wait); \
} \
} while(0)
#else
#define arena_get(ar_ptr, sz) do { \
ar_ptr = &main_arena; \
(void)mutex_lock(&ar_ptr->mutex); \
} while(0)
#endif
#define arena_for_chunk(ptr) (&main_arena)
#endif /* USE_ARENAS */
/**************************************************************************/
#ifndef NO_THREADS
/* atfork support. */
static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
__const __malloc_ptr_t));
static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
__const __malloc_ptr_t));
static Void_t* save_arena;
/* Magic value for the thread-specific arena pointer when
malloc_atfork() is in use. */
#define ATFORK_ARENA_PTR ((Void_t*)-1)
/* The following hooks are used while the `atfork' handling mechanism
is active. */
static Void_t*
malloc_atfork(size_t sz, const Void_t *caller)
{
Void_t *vptr = NULL;
Void_t *victim;
tsd_getspecific(arena_key, vptr);
if(vptr == ATFORK_ARENA_PTR) {
/* We are the only thread that may allocate at all. */
if(save_malloc_hook != malloc_check) {
return _int_malloc(&main_arena, sz);
} else {
if(top_check()<0)
return 0;
victim = _int_malloc(&main_arena, sz+1);
return mem2mem_check(victim, sz);
}
} else {
/* Suspend the thread until the `atfork' handlers have completed.
By that time, the hooks will have been reset as well, so that
mALLOc() can be used again. */
(void)mutex_lock(&list_lock);
(void)mutex_unlock(&list_lock);
return public_mALLOc(sz);
}
}
static void
free_atfork(Void_t* mem, const Void_t *caller)
{
Void_t *vptr = NULL;
mstate ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
if (mem == 0) /* free(0) has no effect */
return;
p = mem2chunk(mem); /* do not bother to replicate free_check here */
#if HAVE_MMAP
if (chunk_is_mmapped(p)) /* release mmapped memory. */
{
munmap_chunk(p);
return;
}
#endif
ar_ptr = arena_for_chunk(p);
tsd_getspecific(arena_key, vptr);
if(vptr != ATFORK_ARENA_PTR)
(void)mutex_lock(&ar_ptr->mutex);
_int_free(ar_ptr, mem);
if(vptr != ATFORK_ARENA_PTR)
(void)mutex_unlock(&ar_ptr->mutex);
}
/* The following two functions are registered via thread_atfork() to
make sure that the mutexes remain in a consistent state in the
fork()ed version of a thread. Also adapt the malloc and free hooks
temporarily, because the `atfork' handler mechanism may use
malloc/free internally (e.g. in LinuxThreads). */
static void
ptmalloc_lock_all __MALLOC_P((void))
{
mstate ar_ptr;
(void)mutex_lock(&list_lock);
for(ar_ptr = &main_arena;;) {
(void)mutex_lock(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
save_malloc_hook = __malloc_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_atfork;
__free_hook = free_atfork;
/* Only the current thread may perform malloc/free calls now. */
tsd_getspecific(arena_key, save_arena);
tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
}
static void
ptmalloc_unlock_all __MALLOC_P((void))
{
mstate ar_ptr;
tsd_setspecific(arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
for(ar_ptr = &main_arena;;) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
(void)mutex_unlock(&list_lock);
}
#ifdef __linux__
/* In LinuxThreads, unlocking a mutex in the child process after a
fork() is currently unsafe, whereas re-initializing it is safe and
does not leak resources. Therefore, a special atfork handler is
installed for the child. */
static void
ptmalloc_unlock_all2 __MALLOC_P((void))
{
mstate ar_ptr;
#if defined _LIBC || defined MALLOC_HOOKS
tsd_setspecific(arena_key, save_arena);
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
#endif
for(ar_ptr = &main_arena;;) {
(void)mutex_init(&ar_ptr->mutex);
ar_ptr = ar_ptr->next;
if(ar_ptr == &main_arena) break;
}
(void)mutex_init(&list_lock);
}
#else
#define ptmalloc_unlock_all2 ptmalloc_unlock_all
#endif
#endif /* !defined NO_THREADS */
/* Already initialized? */
int __malloc_initialized = -1;
/* Initialization routine. */
#ifdef _LIBC
#include <string.h>
extern char **_environ;
static char *
internal_function
next_env_entry (char ***position)
{
char **current = *position;
char *result = NULL;
while (*current != NULL)
{
if (__builtin_expect ((*current)[0] == 'M', 0)
&& (*current)[1] == 'A'
&& (*current)[2] == 'L'
&& (*current)[3] == 'L'
&& (*current)[4] == 'O'
&& (*current)[5] == 'C'
&& (*current)[6] == '_')
{
result = &(*current)[7];
/* Save current position for next visit. */
*position = ++current;
break;
}
++current;
}
return result;
}
#endif /* _LIBC */
static void
ptmalloc_init __MALLOC_P((void))
{
#if __STD_C
const char* s;
#else
char* s;
#endif
int secure = 0;
if(__malloc_initialized >= 0) return;
__malloc_initialized = 0;
mp_.top_pad = DEFAULT_TOP_PAD;
mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
mp_.pagesize = malloc_getpagesize;
#ifndef NO_THREADS
/* With some threads implementations, creating thread-specific data
or initializing a mutex may call malloc() itself. Provide a
simple starter version (realloc() won't work). */
save_malloc_hook = __malloc_hook;
save_free_hook = __free_hook;
__malloc_hook = malloc_starter;
__free_hook = free_starter;
#ifdef _LIBC
/* Initialize the pthreads interface. */
if (__pthread_initialize != NULL)
__pthread_initialize();
#endif
#endif /* !defined NO_THREADS */
mutex_init(&main_arena.mutex);
main_arena.next = &main_arena;
mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (Void_t *)&main_arena);
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
#ifndef NO_THREADS
__malloc_hook = save_malloc_hook;
__free_hook = save_free_hook;
#endif
#ifdef _LIBC
secure = __libc_enable_secure;
s = NULL;
{
char **runp = _environ;
char *envline;
while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
0))
{
size_t len = strcspn (envline, "=");
if (envline[len] != '=')
/* This is a "MALLOC_" variable at the end of the string
without a '=' character. Ignore it since otherwise we
will access invalid memory below. */
continue;
switch (len)
{
case 6:
if (memcmp (envline, "CHECK_", 6) == 0)
s = &envline[7];
break;
case 8:
if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0)
mALLOPt(M_TOP_PAD, atoi(&envline[9]));
break;
case 9:
if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0)
mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
break;
case 15:
if (! secure)
{
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
}
break;
default:
break;
}
}
}
#else
if (! secure)
{
if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
mALLOPt(M_TRIM_THRESHOLD, atoi(s));
if((s = getenv("MALLOC_TOP_PAD_")))
mALLOPt(M_TOP_PAD, atoi(s));
if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
mALLOPt(M_MMAP_THRESHOLD, atoi(s));
if((s = getenv("MALLOC_MMAP_MAX_")))
mALLOPt(M_MMAP_MAX, atoi(s));
}
s = getenv("MALLOC_CHECK_");
#endif
if(s) {
if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
__malloc_check_init();
}
if(__malloc_initialize_hook != NULL)
(*__malloc_initialize_hook)();
__malloc_initialized = 1;
}
/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
#ifdef thread_atfork_static
thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
ptmalloc_unlock_all2)
#endif
/* Managing heaps and arenas (for concurrent threads) */
#if USE_ARENAS
#if MALLOC_DEBUG > 1
/* Print the complete contents of a single heap to stderr. */
static void
#if __STD_C
dump_heap(heap_info *heap)
#else
dump_heap(heap) heap_info *heap;
#endif
{
char *ptr;
mchunkptr p;
fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
ptr = (heap->ar_ptr != (mstate)(heap+1)) ?
(char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
~MALLOC_ALIGN_MASK);
for(;;) {
fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
if(p == top(heap->ar_ptr)) {
fprintf(stderr, " (top)\n");
break;
} else if(p->size == (0|PREV_INUSE)) {
fprintf(stderr, " (fence)\n");
break;
}
fprintf(stderr, "\n");
p = next_chunk(p);
}
}
#endif /* MALLOC_DEBUG > 1 */
/* Create a new heap. size is automatically rounded up to a multiple
of the page size. */
static heap_info *
internal_function
#if __STD_C
new_heap(size_t size, size_t top_pad)
#else
new_heap(size, top_pad) size_t size, top_pad;
#endif
{
size_t page_mask = malloc_getpagesize - 1;
char *p1, *p2;
unsigned long ul;
heap_info *h;
if(size+top_pad < HEAP_MIN_SIZE)
size = HEAP_MIN_SIZE;
else if(size+top_pad <= HEAP_MAX_SIZE)
size += top_pad;
else if(size > HEAP_MAX_SIZE)
return 0;
else
size = HEAP_MAX_SIZE;
size = (size + page_mask) & ~page_mask;
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
No swap space needs to be reserved for the following large
mapping (on Linux, this is the case for all non-writable mappings
anyway). */
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p1 != MAP_FAILED) {
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1));
ul = p2 - p1;
munmap(p1, ul);
munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
} else {
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
is already aligned. */
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
if(p2 == MAP_FAILED)
return 0;
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
}
}
if(mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
munmap(p2, HEAP_MAX_SIZE);
return 0;
}
h = (heap_info *)p2;
h->size = size;
THREAD_STAT(stat_n_heaps++);
return h;
}
/* Grow or shrink a heap. size is automatically rounded up to a
multiple of the page size if it is positive. */
static int
#if __STD_C
grow_heap(heap_info *h, long diff)
#else
grow_heap(h, diff) heap_info *h; long diff;
#endif
{
size_t page_mask = malloc_getpagesize - 1;
long new_size;
if(diff >= 0) {
diff = (diff + page_mask) & ~page_mask;
new_size = (long)h->size + diff;
if(new_size > HEAP_MAX_SIZE)
return -1;
if(mprotect((char *)h + h->size, diff, PROT_READ|PROT_WRITE) != 0)
return -2;
} else {
new_size = (long)h->size + diff;
if(new_size < (long)sizeof(*h))
return -1;
/* Try to re-map the extra heap space freshly to save memory, and
make it inaccessible. */
if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
return -2;
/*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
}
h->size = new_size;
return 0;
}
/* Delete a heap. */
#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
static int
internal_function
#if __STD_C
heap_trim(heap_info *heap, size_t pad)
#else
heap_trim(heap, pad) heap_info *heap; size_t pad;
#endif
{
mstate ar_ptr = heap->ar_ptr;
unsigned long pagesz = mp_.pagesize;
mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
heap_info *prev_heap;
long new_size, top_size, extra;
/* Can this heap go away completely? */
while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
prev_heap = heap->prev;
p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
p = prev_chunk(p);
new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
assert(new_size>0 && new_size<(long)(2*MINSIZE));
if(!prev_inuse(p))
new_size += p->prev_size;
assert(new_size>0 && new_size<HEAP_MAX_SIZE);
if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
break;
ar_ptr->system_mem -= heap->size;
arena_mem -= heap->size;
delete_heap(heap);
heap = prev_heap;
if(!prev_inuse(p)) { /* consolidate backward */
p = prev_chunk(p);
unlink(p, bck, fwd);
}
assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
top(ar_ptr) = top_chunk = p;
set_head(top_chunk, new_size | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
}
top_size = chunksize(top_chunk);
extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
if(extra < (long)pagesz)
return 0;
/* Try to shrink. */
if(grow_heap(heap, -extra) != 0)
return 0;
ar_ptr->system_mem -= extra;
arena_mem -= extra;
/* Success. Adjust top accordingly. */
set_head(top_chunk, (top_size - extra) | PREV_INUSE);
/*check_chunk(ar_ptr, top_chunk);*/
return 1;
}
static mstate
internal_function
#if __STD_C
arena_get2(mstate a_tsd, size_t size)
#else
arena_get2(a_tsd, size) mstate a_tsd; size_t size;
#endif
{
mstate a;
int err;
if(!a_tsd)
a = a_tsd = &main_arena;
else {
a = a_tsd->next;
if(!a) {
/* This can only happen while initializing the new arena. */
(void)mutex_lock(&main_arena.mutex);
THREAD_STAT(++(main_arena.stat_lock_wait));
return &main_arena;
}
}
/* Check the global, circularly linked list for available arenas. */
repeat:
do {
if(!mutex_trylock(&a->mutex)) {
THREAD_STAT(++(a->stat_lock_loop));
tsd_setspecific(arena_key, (Void_t *)a);
return a;
}
a = a->next;
} while(a != a_tsd);
/* If not even the list_lock can be obtained, try again. This can
happen during `atfork', or for example on systems where thread
creation makes it temporarily impossible to obtain _any_
locks. */
if(mutex_trylock(&list_lock)) {
a = a_tsd;
goto repeat;
}
(void)mutex_unlock(&list_lock);
/* Nothing immediately available, so generate a new arena. */
a = _int_new_arena(size);
if(!a)
return 0;
tsd_setspecific(arena_key, (Void_t *)a);
mutex_init(&a->mutex);
err = mutex_lock(&a->mutex); /* remember result */
/* Add the new arena to the global list. */
(void)mutex_lock(&list_lock);
a->next = main_arena.next;
main_arena.next = a;
(void)mutex_unlock(&list_lock);
if(err) /* locking failed; keep arena for further attempts later */
return 0;
THREAD_STAT(++(a->stat_lock_loop));
return a;
}
/* Create a new arena with initial size "size". */
mstate
_int_new_arena(size_t size)
{
mstate a;
heap_info *h;
char *ptr;
unsigned long misalign;
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
mp_.top_pad);
if(!h) {
/* Maybe size is too large to fit in a single heap. So, just try
to create a minimally-sized arena and let _int_malloc() attempt
to deal with the large request via mmap_chunk(). */
h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
if(!h)
return 0;
}
a = h->ar_ptr = (mstate)(h+1);
malloc_init_state(a);
/*a->next = NULL;*/
a->system_mem = a->max_system_mem = h->size;
arena_mem += h->size;
#ifdef NO_THREADS
if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
mp_.max_total_mem)
mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
#endif
/* Set up the top chunk, with proper alignment. */
ptr = (char *)(a + 1);
misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
if (misalign > 0)
ptr += MALLOC_ALIGNMENT - misalign;
top(a) = (mchunkptr)ptr;
set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
return a;
}
#endif /* USE_ARENAS */
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

631
malloc/hooks.c Normal file
View File

@ -0,0 +1,631 @@
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* $Id$ */
#ifndef weak_variable
#define weak_variable /**/
#endif
#ifndef DEFAULT_CHECK_ACTION
#define DEFAULT_CHECK_ACTION 1
#endif
/* What to do if the standard debugging hooks are in place and a
corrupt pointer is detected: do nothing (0), print an error message
(1), or call abort() (2). */
/* Hooks for debugging versions. The initial hooks just call the
initialization routine, then do the normal work. */
static Void_t*
#if __STD_C
malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
#else
malloc_hook_ini(sz, caller)
size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
ptmalloc_init();
return public_mALLOc(sz);
}
static Void_t*
#if __STD_C
realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
#else
realloc_hook_ini(ptr, sz, caller)
Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
#endif
{
__malloc_hook = NULL;
__realloc_hook = NULL;
ptmalloc_init();
return public_rEALLOc(ptr, sz);
}
static Void_t*
#if __STD_C
memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
#else
memalign_hook_ini(alignment, sz, caller)
size_t alignment; size_t sz; const __malloc_ptr_t caller;
#endif
{
__memalign_hook = NULL;
ptmalloc_init();
return public_mEMALIGn(alignment, sz);
}
void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;
void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
const __malloc_ptr_t)) = NULL;
__malloc_ptr_t weak_variable (*__malloc_hook)
__MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
__malloc_ptr_t weak_variable (*__realloc_hook)
__MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
= realloc_hook_ini;
__malloc_ptr_t weak_variable (*__memalign_hook)
__MALLOC_P ((size_t __alignment, size_t __size, const __malloc_ptr_t))
= memalign_hook_ini;
void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
static int check_action = DEFAULT_CHECK_ACTION;
/* Whether we are using malloc checking. */
static int using_malloc_checking;
/* A flag that is set by malloc_set_state, to signal that malloc checking
must not be enabled on the request from the user (via the MALLOC_CHECK_
environment variable). It is reset by __malloc_check_init to tell
malloc_set_state that the user has requested malloc checking.
The purpose of this flag is to make sure that malloc checking is not
enabled when the heap to be restored was constructed without malloc
checking, and thus does not contain the required magic bytes.
Otherwise the heap would be corrupted by calls to free and realloc. If
it turns out that the heap was created with malloc checking and the
user has requested it malloc_set_state just calls __malloc_check_init
again to enable it. On the other hand, reusing such a heap without
further malloc checking is safe. */
static int disallow_malloc_check;
/* Activate a standard set of debugging hooks. */
void
__malloc_check_init()
{
if (disallow_malloc_check) {
disallow_malloc_check = 0;
return;
}
using_malloc_checking = 1;
__malloc_hook = malloc_check;
__free_hook = free_check;
__realloc_hook = realloc_check;
__memalign_hook = memalign_check;
if(check_action & 1)
fprintf(stderr, "malloc: using debugging hooks\n");
}
/* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or
overruns. The goal here is to avoid obscure crashes due to invalid
usage, unlike in the MALLOC_DEBUG code. */
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
/* Instrument a chunk with overrun detector byte(s) and convert it
into a user pointer with requested size sz. */
static Void_t*
internal_function
#if __STD_C
mem2mem_check(Void_t *ptr, size_t sz)
#else
mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
#endif
{
mchunkptr p;
unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
size_t i;
if (!ptr)
return ptr;
p = mem2chunk(ptr);
for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
i > sz;
i -= 0xFF) {
if(i-sz < 0x100) {
m_ptr[i] = (unsigned char)(i-sz);
break;
}
m_ptr[i] = 0xFF;
}
m_ptr[sz] = MAGICBYTE(p);
return (Void_t*)m_ptr;
}
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
pointer. If the provided pointer is not valid, return NULL. */
static mchunkptr
internal_function
#if __STD_C
mem2chunk_check(Void_t* mem)
#else
mem2chunk_check(mem) Void_t* mem;
#endif
{
mchunkptr p;
INTERNAL_SIZE_T sz, c;
unsigned char magic;
p = mem2chunk(mem);
if(!aligned_OK(p)) return NULL;
if( (char*)p>=mp_.sbrk_base &&
(char*)p<(mp_.sbrk_base+main_arena.system_mem) ) {
/* Must be a chunk in conventional heap memory. */
if(chunk_is_mmapped(p) ||
( (sz = chunksize(p)),
((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) ) ||
sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
(long)prev_chunk(p)<(long)mp_.sbrk_base ||
next_chunk(prev_chunk(p))!=p) ))
return NULL;
magic = MAGICBYTE(p);
for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
} else {
unsigned long offset, page_mask = malloc_getpagesize-1;
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
alignment relative to the beginning of a page. Check this
first. */
offset = (unsigned long)mem & page_mask;
if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
offset<0x2000) ||
!chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
return NULL;
magic = MAGICBYTE(p);
for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
}
return p;
}
/* Check for corruption of the top chunk, and try to recover if
necessary. */
static int
internal_function
#if __STD_C
top_check(void)
#else
top_check()
#endif
{
mchunkptr t = top(&main_arena);
char* brk, * new_brk;
INTERNAL_SIZE_T front_misalign, sbrk_size;
unsigned long pagesz = malloc_getpagesize;
if((char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem ||
t == initial_top(&main_arena)) return 0;
if(check_action & 1)
fprintf(stderr, "malloc: top chunk is corrupt\n");
if(check_action & 2)
abort();
/* Try to set up a new top chunk. */
brk = MORECORE(0);
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
if (front_misalign > 0)
front_misalign = MALLOC_ALIGNMENT - front_misalign;
sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
new_brk = (char*)(MORECORE (sbrk_size));
if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
(*__after_morecore_hook) ();
main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
top(&main_arena) = (mchunkptr)(brk + front_misalign);
set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
return 0;
}
static Void_t*
#if __STD_C
malloc_check(size_t sz, const Void_t *caller)
#else
malloc_check(sz, caller) size_t sz; const Void_t *caller;
#endif
{
Void_t *victim;
(void)mutex_lock(&main_arena.mutex);
victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(victim, sz);
}
static void
#if __STD_C
free_check(Void_t* mem, const Void_t *caller)
#else
free_check(mem, caller) Void_t* mem; const Void_t *caller;
#endif
{
mchunkptr p;
if(!mem) return;
(void)mutex_lock(&main_arena.mutex);
p = mem2chunk_check(mem);
if(!p) {
(void)mutex_unlock(&main_arena.mutex);
if(check_action & 1)
fprintf(stderr, "free(): invalid pointer %p!\n", mem);
if(check_action & 2)
abort();
return;
}
#if HAVE_MMAP
if (chunk_is_mmapped(p)) {
(void)mutex_unlock(&main_arena.mutex);
munmap_chunk(p);
return;
}
#endif
#if 0 /* Erase freed memory. */
memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
#endif
_int_free(&main_arena, mem);
(void)mutex_unlock(&main_arena.mutex);
}
static Void_t*
#if __STD_C
realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
#else
realloc_check(oldmem, bytes, caller)
Void_t* oldmem; size_t bytes; const Void_t *caller;
#endif
{
mchunkptr oldp, newp = 0;
INTERNAL_SIZE_T nb, oldsize;
Void_t* newmem = 0;
if (oldmem == 0) return malloc_check(bytes, NULL);
(void)mutex_lock(&main_arena.mutex);
oldp = mem2chunk_check(oldmem);
(void)mutex_unlock(&main_arena.mutex);
if(!oldp) {
if(check_action & 1)
fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
if(check_action & 2)
abort();
return malloc_check(bytes, NULL);
}
oldsize = chunksize(oldp);
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
#if HAVE_MMAP
if (chunk_is_mmapped(oldp)) {
#if HAVE_MREMAP
newp = mremap_chunk(oldp, nb);
if(!newp) {
#endif
/* Note the extra SIZE_SZ overhead. */
if(oldsize - SIZE_SZ >= nb)
newmem = oldmem; /* do nothing */
else {
/* Must alloc, copy, free. */
if (top_check() >= 0)
newmem = _int_malloc(&main_arena, bytes+1);
if (newmem) {
MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
munmap_chunk(oldp);
}
}
#if HAVE_MREMAP
}
#endif
} else {
#endif /* HAVE_MMAP */
if (top_check() >= 0)
newmem = _int_realloc(&main_arena, oldmem, bytes+1);
#if 0 /* Erase freed memory. */
if(newmem)
newp = mem2chunk(newmem);
nb = chunksize(newp);
if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
memset((char*)oldmem + 2*sizeof(mbinptr), 0,
oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
} else if(nb > oldsize+SIZE_SZ) {
memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
0, nb - (oldsize+SIZE_SZ));
}
#endif
#if HAVE_MMAP
}
#endif
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(newmem, bytes);
}
static Void_t*
#if __STD_C
memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
#else
memalign_check(alignment, bytes, caller)
size_t alignment; size_t bytes; const Void_t *caller;
#endif
{
INTERNAL_SIZE_T nb;
Void_t* mem;
if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
if (alignment < MINSIZE) alignment = MINSIZE;
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
NULL;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(mem, bytes);
}
#ifndef NO_THREADS
/* The following hooks are used when the global initialization in
ptmalloc_init() hasn't completed yet. */
static Void_t*
#if __STD_C
malloc_starter(size_t sz, const Void_t *caller)
#else
malloc_starter(sz, caller) size_t sz; const Void_t *caller;
#endif
{
Void_t* victim;
victim = _int_malloc(&main_arena, sz);
return victim ? BOUNDED_N(victim, sz) : 0;
}
static void
#if __STD_C
free_starter(Void_t* mem, const Void_t *caller)
#else
free_starter(mem, caller) Void_t* mem; const Void_t *caller;
#endif
{
mchunkptr p;
if(!mem) return;
p = mem2chunk(mem);
#if HAVE_MMAP
if (chunk_is_mmapped(p)) {
munmap_chunk(p);
return;
}
#endif
_int_free(&main_arena, mem);
}
#endif /* NO_THREADS */
/* Get/set state: malloc_get_state() records the current state of all
malloc variables (_except_ for the actual heap contents and `hook'
function pointers) in a system dependent, opaque data structure.
This data structure is dynamically allocated and can be free()d
after use. malloc_set_state() restores the state of all malloc
variables to the previously obtained state. This is especially
useful when using this malloc as part of a shared library, and when
the heap contents are saved/restored via some other method. The
primary example for this is GNU Emacs with its `dumping' procedure.
`Hook' function pointers are never saved or restored by these
functions, with two exceptions: If malloc checking was in use when
malloc_get_state() was called, then malloc_set_state() calls
__malloc_check_init() if possible; if malloc checking was not in
use in the recorded state but the user requested malloc checking,
then the hooks are reset to 0. */
#define MALLOC_STATE_MAGIC 0x444c4541l
#define MALLOC_STATE_VERSION (0*0x100l + 2l) /* major*0x100 + minor */
struct malloc_save_state {
long magic;
long version;
mbinptr av[NBINS * 2 + 2];
char* sbrk_base;
int sbrked_mem_bytes;
unsigned long trim_threshold;
unsigned long top_pad;
unsigned int n_mmaps_max;
unsigned long mmap_threshold;
int check_action;
unsigned long max_sbrked_mem;
unsigned long max_total_mem;
unsigned int n_mmaps;
unsigned int max_n_mmaps;
unsigned long mmapped_mem;
unsigned long max_mmapped_mem;
int using_malloc_checking;
};
Void_t*
public_gET_STATe(void)
{
struct malloc_save_state* ms;
int i;
mbinptr b;
ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
if (!ms)
return 0;
(void)mutex_lock(&main_arena.mutex);
malloc_consolidate(&main_arena);
ms->magic = MALLOC_STATE_MAGIC;
ms->version = MALLOC_STATE_VERSION;
ms->av[0] = 0;
ms->av[1] = 0; /* used to be binblocks, now no longer used */
ms->av[2] = top(&main_arena);
ms->av[3] = 0; /* used to be undefined */
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(first(b) == b)
ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
else {
ms->av[2*i+2] = first(b);
ms->av[2*i+3] = last(b);
}
}
ms->sbrk_base = mp_.sbrk_base;
ms->sbrked_mem_bytes = main_arena.system_mem;
ms->trim_threshold = mp_.trim_threshold;
ms->top_pad = mp_.top_pad;
ms->n_mmaps_max = mp_.n_mmaps_max;
ms->mmap_threshold = mp_.mmap_threshold;
ms->check_action = check_action;
ms->max_sbrked_mem = main_arena.max_system_mem;
#ifdef NO_THREADS
ms->max_total_mem = max_total_mem;
#else
ms->max_total_mem = 0;
#endif
ms->n_mmaps = mp_.n_mmaps;
ms->max_n_mmaps = mp_.max_n_mmaps;
ms->mmapped_mem = mp_.mmapped_mem;
ms->max_mmapped_mem = mp_.max_mmapped_mem;
ms->using_malloc_checking = using_malloc_checking;
(void)mutex_unlock(&main_arena.mutex);
return (Void_t*)ms;
}
int
public_sET_STATe(Void_t* msptr)
{
struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
int i;
mbinptr b;
disallow_malloc_check = 1;
ptmalloc_init();
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
/* Must fail if the major version is too high. */
if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
(void)mutex_lock(&main_arena.mutex);
/* There are no fastchunks. */
clear_fastchunks(&main_arena);
set_max_fast(&main_arena, DEFAULT_MXFAST);
for (i=0; i<NFASTBINS; ++i)
main_arena.fastbins[i] = 0;
for (i=0; i<BINMAPSIZE; ++i)
main_arena.binmap[i] = 0;
top(&main_arena) = ms->av[2];
main_arena.last_remainder = 0;
for(i=1; i<NBINS; i++) {
b = bin_at(&main_arena, i);
if(ms->av[2*i+2] == 0) {
assert(ms->av[2*i+3] == 0);
first(b) = last(b) = b;
} else {
if(i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
largebin_index(chunksize(ms->av[2*i+3]))==i)) {
first(b) = ms->av[2*i+2];
last(b) = ms->av[2*i+3];
/* Make sure the links to the bins within the heap are correct. */
first(b)->bk = b;
last(b)->fd = b;
/* Set bit in binblocks. */
mark_bin(&main_arena, i);
} else {
/* Oops, index computation from chunksize must have changed.
Link the whole list into unsorted_chunks. */
first(b) = last(b) = b;
b = unsorted_chunks(&main_arena);
ms->av[2*i+2]->bk = b;
ms->av[2*i+3]->fd = b->fd;
b->fd->bk = ms->av[2*i+3];
b->fd = ms->av[2*i+2];
}
}
}
mp_.sbrk_base = ms->sbrk_base;
main_arena.system_mem = ms->sbrked_mem_bytes;
mp_.trim_threshold = ms->trim_threshold;
mp_.top_pad = ms->top_pad;
mp_.n_mmaps_max = ms->n_mmaps_max;
mp_.mmap_threshold = ms->mmap_threshold;
check_action = ms->check_action;
main_arena.max_system_mem = ms->max_sbrked_mem;
#ifdef NO_THREADS
mp_.max_total_mem = ms->max_total_mem;
#endif
mp_.n_mmaps = ms->n_mmaps;
mp_.max_n_mmaps = ms->max_n_mmaps;
mp_.mmapped_mem = ms->mmapped_mem;
mp_.max_mmapped_mem = ms->max_mmapped_mem;
/* add version-dependent code here */
if (ms->version >= 1) {
/* Check whether it is safe to enable malloc checking, or whether
it is necessary to disable it. */
if (ms->using_malloc_checking && !using_malloc_checking &&
!disallow_malloc_check)
__malloc_check_init ();
else if (!ms->using_malloc_checking && using_malloc_checking) {
__malloc_hook = 0;
__free_hook = 0;
__realloc_hook = 0;
__memalign_hook = 0;
using_malloc_checking = 0;
}
}
check_malloc_state(&main_arena);
(void)mutex_unlock(&main_arena.mutex);
return 0;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

File diff suppressed because it is too large Load Diff

View File

@ -20,24 +20,26 @@
#ifndef _MALLOC_H
#define _MALLOC_H 1
#ifdef _LIBC
#include <features.h>
#endif
/*
`ptmalloc', a malloc implementation for multiple threads without
lock contention, by Wolfram Gloger <wmglo@dent.med.uni-muenchen.de>.
See the files `ptmalloc.c' or `COPYRIGHT' for copying conditions.
$Id$
`ptmalloc2', a malloc implementation for multiple threads without
lock contention, by Wolfram Gloger <wg@malloc.de>.
VERSION 2.6.4-pt Wed Dec 4 00:35:54 MET 1996
VERSION 2.7.0
This work is mainly derived from malloc-2.6.4 by Doug Lea
This work is mainly derived from malloc-2.7.0 by Doug Lea
<dl@cs.oswego.edu>, which is available from:
ftp://g.oswego.edu/pub/misc/malloc.c
ftp://gee.cs.oswego.edu/pub/misc/malloc.c
This trimmed-down header file only provides function prototypes and
the exported data structures. For more detailed function
descriptions and compile-time options, see the source file
`ptmalloc.c'.
`malloc.c'.
*/
#if defined(__STDC__) || defined (__cplusplus)
@ -112,11 +114,6 @@ extern "C" {
#endif
extern int __malloc_initialized;
/* Initialize global configuration. Not needed with GNU libc. */
#ifndef __GLIBC__
extern void ptmalloc_init __MALLOC_P ((void));
#endif
/* Allocate SIZE bytes of memory. */
extern __malloc_ptr_t malloc __MALLOC_P ((size_t __size)) __attribute_malloc__;
@ -156,16 +153,17 @@ extern __malloc_ptr_t __default_morecore __MALLOC_P ((ptrdiff_t __size))
__attribute_malloc__;
/* SVID2/XPG mallinfo structure */
struct mallinfo {
int arena; /* total space allocated from system */
int ordblks; /* number of non-inuse chunks */
int smblks; /* unused -- always zero */
int arena; /* non-mmapped space allocated from system */
int ordblks; /* number of free chunks */
int smblks; /* number of fastbin blocks */
int hblks; /* number of mmapped regions */
int hblkhd; /* total space in mmapped regions */
int usmblks; /* unused -- always zero */
int fsmblks; /* unused -- always zero */
int hblkhd; /* space in mmapped regions */
int usmblks; /* maximum total allocated space */
int fsmblks; /* space available in freed fastbin blocks */
int uordblks; /* total allocated space */
int fordblks; /* total non-inuse space */
int fordblks; /* total free space */
int keepcost; /* top-most, releasable (via malloc_trim) space */
};
@ -174,7 +172,7 @@ extern struct mallinfo mallinfo __MALLOC_P ((void));
/* SVID2/XPG mallopt options */
#ifndef M_MXFAST
# define M_MXFAST 1 /* UNUSED in this malloc */
# define M_MXFAST 1 /* maximum request size for "fastbins" */
#endif
#ifndef M_NLBLKS
# define M_NLBLKS 2 /* UNUSED in this malloc */
@ -214,7 +212,6 @@ extern __malloc_ptr_t malloc_get_state __MALLOC_P ((void));
malloc_get_state(). */
extern int malloc_set_state __MALLOC_P ((__malloc_ptr_t __ptr));
#if defined __GLIBC__ || defined MALLOC_HOOKS
/* Called once when malloc is initialized; redefining this variable in
the application provides the preferred way to set up the hook
pointers. */
@ -234,7 +231,19 @@ extern void (*__after_morecore_hook) __MALLOC_PMT ((void));
/* Activate a standard set of debugging hooks. */
extern void __malloc_check_init __MALLOC_P ((void));
#endif
/* Internal routines, operating on "arenas". */
struct malloc_state;
typedef struct malloc_state *mstate;
extern mstate _int_new_arena __MALLOC_P ((size_t __ini_size));
extern __malloc_ptr_t _int_malloc __MALLOC_P ((mstate __m, size_t __size));
extern void _int_free __MALLOC_P ((mstate __m, __malloc_ptr_t __ptr));
extern __malloc_ptr_t _int_realloc __MALLOC_P ((mstate __m,
__malloc_ptr_t __ptr,
size_t __size));
extern __malloc_ptr_t _int_memalign __MALLOC_P ((mstate __m, size_t __alignment,
size_t __size));
#ifdef __cplusplus
}; /* end of extern "C" */

82
malloc/tst-mallocstate.c Normal file
View File

@ -0,0 +1,82 @@
/* Copyright (C) 2001 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <errno.h>
#include <stdio.h>
#include "malloc.h"
static int errors = 0;
static void
merror (const char *msg)
{
++errors;
printf ("Error: %s\n", msg);
}
int
main (void)
{
void *p1, *p2;
void *save_state;
long i;
errno = 0;
p1 = malloc (10);
if (p1 == NULL)
merror ("malloc (10) failed.");
p2 = malloc (20);
if (p2 == NULL)
merror ("malloc (20) failed.");
free (malloc (10));
for (i=0; i<100; ++i)
{
save_state = malloc_get_state ();
if (save_state == NULL)
{
merror ("malloc_get_state () failed.");
break;
}
/*free (malloc (10)); This could change the top chunk! */
malloc_set_state (save_state);
p1 = realloc (p1, i*4 + 4);
if (p1 == NULL)
merror ("realloc (i*4) failed.");
free (save_state);
}
p1 = realloc (p1, 40);
free (p2);
p2 = malloc (10);
if (p2 == NULL)
merror ("malloc (10) failed.");
free (p1);
return errors != 0;
}
/*
* Local variables:
* c-basic-offset: 2
* End:
*/

View File

@ -1,9 +1,7 @@
/* An alternative to qsort, with an identical interface.
This file is part of the GNU C Library.
Copyright (C) 1992, 1995-1997, 1999, 2000, 2001, 2002
Free Software Foundation, Inc.
Original Implementation by Mike Haertel, September 1988.
Towers of Hanoi Mergesort by Roger Sayle, January 2002.
Copyright (C) 1992, 1995-1997, 1999, 2000, 2001 Free Software Foundation, Inc.
Written by Mike Haertel, September 1988.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
@ -21,372 +19,70 @@
02111-1307 USA. */
#include <alloca.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <memcopy.h>
#include <errno.h>
/* Check whether pointer P is aligned for access by type T. */
#define TYPE_ALIGNED(P,T) (((char *) (P) - (char *) 0) % __alignof__ (T) == 0)
static int hanoi_sort (char *b, size_t n, size_t s,
__compar_fn_t cmp, char *t);
static int hanoi_sort_int (int *b, size_t n,
__compar_fn_t cmp, int *t);
#if INT_MAX != LONG_MAX
static int hanoi_sort_long (long int *b, size_t n,
__compar_fn_t cmp, long int *t);
#endif
static void msort_with_tmp (void *b, size_t n, size_t s,
__compar_fn_t cmp, void *t);
/* This routine implements "Towers of Hanoi Mergesort". The algorithm
sorts the n elements of size s pointed to by array b using comparison
function cmp. The argument t points to a suitable temporary buffer.
If the return value is zero, the sorted array is returned in b, and
for non-zero return values the sorted array is returned in t. */
static int
hanoi_sort (char *b, size_t n, size_t s, __compar_fn_t cmp, char *t)
{
size_t n1, n2;
char *b1,*b2;
char *t1,*t2;
char *s1,*s2;
size_t size;
int result;
char *ptr;
if (n <= 1)
return 0;
if (n == 2)
{
b2 = b + s;
if ((*cmp) (b, b2) <= 0)
return 0;
memcpy (__mempcpy (t, b2, s), b, s);
return 1;
}
n1 = n/2;
n2 = n - n1;
/* n1 < n2! */
size = n1 * s;
b1 = b;
b2 = b + size;
t1 = t;
t2 = t + size;
/* Recursively call hanoi_sort to sort the two halves of the array.
Depending upon the return values, determine the values s1 and s2
the locations of the two sorted subarrays, ptr, the location to
contain the sorted array and result, the return value for this
function. Note that "ptr = result? t : b". */
if (hanoi_sort (b1, n1, s, cmp, t1))
{
if (hanoi_sort (b2, n2, s, cmp, t2))
{
result = 0;
ptr = b;
s1 = t1;
s2 = t2;
}
else
{
result = 0;
ptr = b;
s1 = t1;
s2 = b2;
}
}
else
{
if (hanoi_sort (b2, n2, s, cmp, t2))
{
result = 1;
ptr = t;
s1 = b1;
s2 = t2;
}
else
{
result = 1;
ptr = t;
s1 = b1;
s2 = b2;
}
}
/* Merge the two sorted arrays s1 and s2 of n1 and n2 elements
respectively, placing the result in ptr. On entry, n1 > 0
&& n2 > 0, and with each iteration either n1 or n2 is decreased
until either reaches zero, and the loop terminates via return. */
for (;;)
{
if ((*cmp) (s1, s2) <= 0)
{
ptr = (char *) __mempcpy (ptr, s1, s);
s1 += s;
--n1;
if (n1 == 0)
{
if (ptr != s2)
memcpy (ptr, s2, n2 * s);
return result;
}
}
else
{
ptr = (char *) __mempcpy (ptr, s2, s);
s2 += s;
--n2;
if (n2 == 0)
{
memcpy (ptr, s1, n1 * s);
return result;
}
}
}
}
/* This routine is a variant of hanoi_sort that is optimized for the
case where items to be sorted are the size of ints, and both b and
t are suitably aligned. The parameter s in not needed as it is
known to be sizeof(int). */
static int
hanoi_sort_int (int *b, size_t n, __compar_fn_t cmp, int *t)
{
size_t n1, n2;
int *b1,*b2;
int *t1,*t2;
int *s1,*s2;
int result;
int *ptr;
if (n <= 1)
return 0;
if (n == 2)
{
if ((*cmp) (b, b + 1) <= 0)
return 0;
t[0] = b[1];
t[1] = b[0];
return 1;
}
n1 = n/2;
n2 = n - n1;
/* n1 < n2! */
b1 = b;
b2 = b + n1;
t1 = t;
t2 = t + n1;
/* Recursively call hanoi_sort_int to sort the two halves. */
if (hanoi_sort_int (b1, n1, cmp, t1))
{
if (hanoi_sort_int (b2, n2, cmp, t2))
{
result = 0;
ptr = b;
s1 = t1;
s2 = t2;
}
else
{
result = 0;
ptr = b;
s1 = t1;
s2 = b2;
}
}
else
{
if (hanoi_sort_int (b2, n2, cmp, t2))
{
result = 1;
ptr = t;
s1 = b1;
s2 = t2;
}
else
{
result = 1;
ptr = t;
s1 = b1;
s2 = b2;
}
}
/* Merge n1 elements from s1 and n2 elements from s2 into ptr. */
for (;;)
{
if ((*cmp) (s1, s2) <= 0)
{
*ptr++ = *s1++;
--n1;
if (n1 == 0)
{
if (ptr != s2)
memcpy (ptr, s2, n2 * sizeof (int));
return result;
}
}
else
{
*ptr++ = *s2++;
--n2;
if (n2 == 0)
{
memcpy (ptr, s1, n1 * sizeof (int));
return result;
}
}
}
}
#if INT_MAX != LONG_MAX
/* This routine is a variant of hanoi_sort that is optimized for the
case where items to be sorted are the size of longs, and both b and
t are suitably aligned. The parameter s in not needed as it is
known to be sizeof(long). In case sizeof(int)== sizeof(long) we
do not need this code since it would be the same as hanoi_sort_int. */
static int
hanoi_sort_long (long int *b, size_t n, __compar_fn_t cmp, long int *t)
{
size_t n1, n2;
long int *b1,*b2;
long int *t1,*t2;
long int *s1,*s2;
int result;
long int *ptr;
if (n <= 1)
return 0;
if (n == 2)
{
if ((*cmp) (b, b + 1) <= 0)
return 0;
t[0] = b[1];
t[1] = b[0];
return 1;
}
n1 = n/2;
n2 = n - n1;
/* n1 < n2! */
b1 = b;
b2 = b + n1;
t1 = t;
t2 = t + n1;
/* Recursively call hanoi_sort_long to sort the two halves. */
if (hanoi_sort_long (b1, n1, cmp, t1))
{
if (hanoi_sort_long (b2, n2, cmp, t2))
{
result = 0;
ptr = b;
s1 = t1;
s2 = t2;
}
else
{
result = 0;
ptr = b;
s1 = t1;
s2 = b2;
}
}
else
{
if (hanoi_sort_long (b2, n2, cmp, t2))
{
result = 1;
ptr = t;
s1 = b1;
s2 = t2;
}
else
{
result = 1;
ptr = t;
s1 = b1;
s2 = b2;
}
}
/* Merge n1 elements from s1 and n2 elements from s2 into ptr. */
for (;;)
{
if ((*cmp) (s1, s2) <= 0)
{
*ptr++ = *s1++;
--n1;
if (n1 == 0)
{
if (ptr != s2)
memcpy (ptr, s2, n2 * sizeof (long));
return result;
}
}
else
{
*ptr++ = *s2++;
--n2;
if (n2 == 0)
{
memcpy (ptr, s1, n1 * sizeof (long));
return result;
}
}
}
}
#endif
/* This routine preserves the original interface to msort_with_tmp and
determines which variant of hanoi_sort to call, based upon item size
and alignment. */
__compar_fn_t cmp, char *t);
static void
msort_with_tmp (void *b, size_t n, size_t s, __compar_fn_t cmp, void *t)
msort_with_tmp (void *b, size_t n, size_t s, __compar_fn_t cmp,
char *t)
{
const size_t size = n * s;
char *tmp;
char *b1, *b2;
size_t n1, n2;
if (s == sizeof (int) && TYPE_ALIGNED (b, int))
{
if (hanoi_sort_int (b, n, cmp, t))
memcpy (b, t, size);
}
#if INT_MAX != LONG_MAX
else if (s == sizeof (long int) && TYPE_ALIGNED (b, long int))
{
if (hanoi_sort_long (b, n, cmp, t))
memcpy (b, t, size);
}
#endif
if (n <= 1)
return;
n1 = n / 2;
n2 = n - n1;
b1 = b;
b2 = (char *) b + (n1 * s);
msort_with_tmp (b1, n1, s, cmp, t);
msort_with_tmp (b2, n2, s, cmp, t);
tmp = t;
if (s == OPSIZ && (b1 - (char *) 0) % OPSIZ == 0)
/* We are operating on aligned words. Use direct word stores. */
while (n1 > 0 && n2 > 0)
{
if ((*cmp) (b1, b2) <= 0)
{
--n1;
*((op_t *) tmp)++ = *((op_t *) b1)++;
}
else
{
--n2;
*((op_t *) tmp)++ = *((op_t *) b2)++;
}
}
else
{
/* Call the generic implementation. */
if (hanoi_sort (b, n, s, cmp, t))
memcpy (b, t, size);
}
while (n1 > 0 && n2 > 0)
{
if ((*cmp) (b1, b2) <= 0)
{
tmp = (char *) __mempcpy (tmp, b1, s);
b1 += s;
--n1;
}
else
{
tmp = (char *) __mempcpy (tmp, b2, s);
b2 += s;
--n2;
}
}
if (n1 > 0)
memcpy (tmp, b1, n1 * s);
memcpy (b, t, (n - n2) * s);
}
void
@ -397,7 +93,7 @@ qsort (void *b, size_t n, size_t s, __compar_fn_t cmp)
if (size < 1024)
{
void *buf = __alloca (size);
/* The temporary array is small, so put it on the stack. */
msort_with_tmp (b, n, s, cmp, buf);
}
@ -434,7 +130,7 @@ qsort (void *b, size_t n, size_t s, __compar_fn_t cmp)
measured in bytes. */
/* If the memory requirements are too high don't allocate memory. */
if ((long int) (size / pagesize) > phys_pages)
if (size / pagesize > phys_pages)
_quicksort (b, n, s, cmp);
else
{

View File

@ -92,9 +92,6 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
{
register char *base_ptr = (char *) pbase;
/* Allocating SIZE bytes for a pivot buffer facilitates a better
algorithm below since we can do comparisons directly on the pivot. */
char *pivot_buffer = (char *) __alloca (size);
const size_t max_thresh = MAX_THRESH * size;
if (total_elems == 0)
@ -113,8 +110,6 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
char *left_ptr;
char *right_ptr;
char *pivot = pivot_buffer;
/* Select median value from among LO, MID, and HI. Rearrange
LO and HI so the three values are sorted. This lowers the
probability of picking a pathological pivot value and
@ -132,8 +127,6 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
if ((*cmp) ((void *) mid, (void *) lo) < 0)
SWAP (mid, lo, size);
jump_over:;
memcpy (pivot, mid, size);
pivot = pivot_buffer;
left_ptr = lo + size;
right_ptr = hi - size;
@ -143,15 +136,19 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
that this algorithm runs much faster than others. */
do
{
while ((*cmp) ((void *) left_ptr, (void *) pivot) < 0)
while ((*cmp) ((void *) left_ptr, (void *) mid) < 0)
left_ptr += size;
while ((*cmp) ((void *) pivot, (void *) right_ptr) < 0)
while ((*cmp) ((void *) mid, (void *) right_ptr) < 0)
right_ptr -= size;
if (left_ptr < right_ptr)
{
SWAP (left_ptr, right_ptr, size);
if (mid == left_ptr)
mid = right_ptr;
else if (mid == right_ptr)
mid = left_ptr;
left_ptr += size;
right_ptr -= size;
}