This commit was generated by cvs2svn to compensate for changes in r30327,

which included commits to RCS files with non-trunk default branches.

From-SVN: r30328
This commit is contained in:
Tom Tromey 1999-11-01 20:48:52 +00:00
parent fd6a6309db
commit 85f29b3bb5
10 changed files with 428 additions and 77 deletions

View File

@ -41,7 +41,7 @@
* This interface is fairly big, largely for performance reasons.
* The most basic constants and functions:
*
* CORD - the type fo a cord;
* CORD - the type of a cord;
* CORD_EMPTY - empty cord;
* CORD_len(cord) - length of a cord;
* CORD_cat(cord1,cord2) - concatenation of two cords;

View File

@ -582,7 +582,7 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
state -> lf_cache[i] = 0;
}
state -> lf_current = 0;
GC_register_finalizer(state, CORD_lf_close_proc, 0, 0, 0);
GC_REGISTER_FINALIZER(state, CORD_lf_close_proc, 0, 0, 0);
return(CORD_from_fn(CORD_lf_func, state, len));
}

View File

@ -58,9 +58,11 @@
# if defined(__STDC__) || defined(__cplusplus)
# define GC_PROTO(args) args
typedef void * GC_PTR;
# define GC_CONST const
# else
# define GC_PROTO(args) ()
typedef char * GC_PTR;
# define GC_CONST
# endif
# ifdef __cplusplus
@ -96,11 +98,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
GC_API int GC_find_leak;
/* Do not actually garbage collect, but simply */
/* report inaccessible memory that was not */
/* deallocated with GC_free. Initial value */
/* is determined by FIND_LEAK macro. */
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
GC_API int GC_finalize_on_demand;
/* If nonzero, finalizers will only be run in */
/* response to an eplit GC_invoke_finalizers */
/* call. The default is determined by whether */
/* the FINALIZE_ON_DEMAND macro is defined */
/* when the collector is built. */
GC_API int GC_java_finalization;
/* Mark objects reachable from finalizable */
/* objects in a separate postpass. This makes */
/* it a bit safer to use non-topologically- */
/* ordered finalization. Default value is */
/* determined by JAVA_FINALIZATION macro. */
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@ -111,6 +133,12 @@ GC_API int GC_dont_expand;
GC_API int GC_full_freq; /* Number of partial collections between */
/* full collections. Matters only if */
/* GC_incremental is set. */
/* Full collections are also triggered if */
/* the collector detects a substantial */
/* increase in the number of in-use heap */
/* blocks. Values in the tens are now */
/* perfectly reasonable, unlike for */
/* earlier GC versions. */
GC_API GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
@ -277,6 +305,9 @@ GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Includes some pages that were allocated but never written. */
GC_API size_t GC_get_heap_size GC_PROTO((void));
/* Return a lower bound on the number of free bytes in the heap. */
GC_API size_t GC_get_free_bytes GC_PROTO((void));
/* Return the number of bytes allocated since the last collection. */
GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
@ -321,10 +352,11 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * descr_string,
int descr_int
#else
# define GC_EXTRAS __FILE__, __LINE__
# define GC_EXTRA_PARAMS char * descr_string, int descr_int
# define GC_EXTRA_PARAMS GC_CONST char * descr_string, int descr_int
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
@ -510,7 +542,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
/* FINALIZE_ON_DEMAND is defined, it must be called */
/* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@ -668,7 +700,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
#if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@ -687,11 +719,12 @@ GC_API void (*GC_is_visible_print_proc)
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
defined(IRIX_JDK_THREADS)
defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
/* Any flavor of threads except SRC_M3. */
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */

View File

@ -43,11 +43,21 @@
# define OPENBSD
# define mach_type_known
# endif
# if defined(__OpenBSD__) && defined(__sparc__)
# define SPARC
# define OPENBSD
# define mach_type_known
# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
# define mach_type_known
# endif
# if defined(__NetBSD__) && defined(arm32)
# define ARM32
# define NETBSD
# define mach_type_known
# endif
# if defined(vax)
# define VAX
# ifdef ultrix
@ -100,7 +110,8 @@
# endif
# define mach_type_known
# endif
# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
&& !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@ -124,15 +135,22 @@
# define SYSV
# define mach_type_known
# endif
# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) \
# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
|| defined(hppa) || defined(__hppa__)
# define HP_PA
# ifndef LINUX
# define HPUX
# endif
# define mach_type_known
# endif
# if defined(LINUX) && defined(i386)
# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
# if defined(LINUX) && (defined(__ia64__) || defined(__ia64))
# define IA64
# define mach_type_known
# endif
# if defined(LINUX) && defined(powerpc)
# define POWERPC
# define mach_type_known
@ -141,9 +159,8 @@
# define M68K
# define mach_type_known
# endif
# if defined(linux) && defined(sparc)
# if defined(LINUX) && defined(sparc)
# define SPARC
# define LINUX
# define mach_type_known
# endif
# if defined(__alpha) || defined(__alpha__)
@ -153,9 +170,11 @@
# endif
# define mach_type_known
# endif
# if defined(_AMIGA)
# define M68K
# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
# endif
# ifdef AMIGA
# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@ -168,6 +187,11 @@
# define MACOS
# define mach_type_known
# endif
# if defined(macosx)
# define MACOSX
# define POWERPC
# define mach_type_known
# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@ -241,6 +265,10 @@
# define UTS4
# define mach_type_known
# endif
# if defined(__pj__)
# define PJ
# define mach_type_known
# endif
/* Ivan Demakov */
# if defined(__WATCOMC__) && defined(__386__)
# define I386
@ -486,8 +514,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
# define ALIGNMENT 2
# ifdef MACOS
# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@ -497,14 +525,29 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
# define ALIGNMENT 4 /* Guess. Can someone verify? */
/* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
/* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
/* Others have reported better success with */
/* extern int __data_start; */
/*# define DATASTART (&__data_start) */
/* and disabling the GC_data_start */
/* initialization code. */
extern int _end;
# define DATAEND (&_end)
# endif
# ifdef MACOSX
# define ALIGNMENT 4
# define OS_TYPE "MACOSX"
# define DATASTART ((ptr_t) get_etext())
# define STACKBOTTOM ((ptr_t) 0xc0000000)
# define DATAEND /* not needed */
# endif
# endif
# ifdef VAX
@ -603,6 +646,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
# ifdef OPENBSD
# define OS_TYPE "OPENBSD"
# define STACKBOTTOM ((ptr_t) 0xf8000000)
# define DATASTART ((ptr_t)(&etext))
# endif
# endif
# ifdef I386
@ -657,10 +705,13 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define STACKBOTTOM ((ptr_t)0xc0000000)
/* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
/* Probably needs to be more flexible, but I don't yet */
/* fully understand how flexible. */
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
/* STACKBOTTOM is usually 0xc0000000, but this changes with */
/* different kernel configurations. In particular, systems */
/* with 2GB physical memory will usually move the user */
/* address space limit, and hence initial SP to 0x80000000. */
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@ -862,9 +913,17 @@
# endif
# ifdef HP_PA
/* OS is assumed to be HP/UX */
# define MACH_TYPE "HP_PA"
# define ALIGNMENT 4
# define ALIGN_DOUBLE
# define OS_TYPE "HPUX"
# ifdef __LP64__
# define CPP_WORDSZ 64
# define ALIGNMENT 8
# else
# define CPP_WORDSZ 32
# define ALIGNMENT 4
# define ALIGN_DOUBLE
# endif
extern int __data_start;
# define DATASTART ((ptr_t)(&__data_start))
# if 0
@ -881,6 +940,9 @@
# endif
# define STACK_GROWS_UP
# define DYNAMIC_LOADING
# ifndef HPUX_THREADS
# define MPROTECT_VDB
# endif
# include <unistd.h>
# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
/* They misspelled the Posix macro? */
@ -909,9 +971,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
# if 0
/* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
# define DYNAMIC_LOADING
# endif
# define DATASTART GC_data_start
# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@ -923,6 +989,31 @@
# endif
# endif
# ifdef IA64
# define MACH_TYPE "IA64"
# define ALIGN_DOUBLE
/* Requires 16 byte alignment for malloc */
# define ALIGNMENT 8
# ifdef HPUX
--> needs work
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define CPP_WORDSZ 64
/* This should really be done through /proc, but that */
/* requires we run on an IA64 kernel. */
# define STACKBOTTOM ((ptr_t) 0xa000000000000000l)
/* We also need the base address of the register stack */
/* backing store. There is probably a better way to */
/* get that, too ... */
# define BACKING_STORE_BASE ((ptr_t) 0x9fffffff80000000l)
# define DATASTART GC_data_start
# define DYNAMIC_LOADING
extern int _end;
# define DATAEND (&_end)
# endif
# endif
# ifdef M88K
# define MACH_TYPE "M88K"
# define ALIGNMENT 4
@ -953,6 +1044,26 @@
# define HEURISTIC2
# endif
# if defined(PJ)
# define ALIGNMENT 4
extern int _etext;
# define DATASTART ((ptr_t)(&_etext))
# define HEURISTIC1
# endif
# ifdef ARM32
# define CPP_WORDSZ 32
# define MACH_TYPE "ARM32"
# define ALIGNMENT 4
# ifdef NETBSD
# define OS_TYPE "NETBSD"
# define HEURISTIC2
extern char etext;
# define DATASTART ((ptr_t)(&etext))
# define USE_GENERIC_PUSH_REGS
# endif
#endif
# ifndef STACK_GROWS_UP
# define STACK_GROWS_DOWN
# endif
@ -995,6 +1106,10 @@
# define SUNOS5SIGS
# endif
# if defined(HPUX)
# define SUNOS5SIGS
# endif
# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
-> bad word size
# endif
@ -1021,6 +1136,10 @@
# undef MPROTECT_VDB
# endif
# ifdef USE_MUNMAP
# undef MPROTECT_VDB /* Can't deal with address space holes. */
# endif
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif
@ -1040,10 +1159,13 @@
# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
--> inconsistent configuration
# endif
# if defined(HPUX_THREADS) && !defined(HPUX)
--> inconsistent configuration
# endif
# if defined(PCR) || defined(SRC_M3) || \
defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
defined(IRIX_JDK_THREADS)
defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
# define THREADS
# endif

View File

@ -58,9 +58,11 @@
# if defined(__STDC__) || defined(__cplusplus)
# define GC_PROTO(args) args
typedef void * GC_PTR;
# define GC_CONST const
# else
# define GC_PROTO(args) ()
typedef char * GC_PTR;
# define GC_CONST
# endif
# ifdef __cplusplus
@ -96,11 +98,31 @@ GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
/* pointer to a previously allocated heap */
/* object. */
GC_API int GC_find_leak;
/* Do not actually garbage collect, but simply */
/* report inaccessible memory that was not */
/* deallocated with GC_free. Initial value */
/* is determined by FIND_LEAK macro. */
GC_API int GC_quiet; /* Disable statistics output. Only matters if */
/* collector has been compiled with statistics */
/* enabled. This involves a performance cost, */
/* and is thus not the default. */
GC_API int GC_finalize_on_demand;
/* If nonzero, finalizers will only be run in */
/* response to an eplit GC_invoke_finalizers */
/* call. The default is determined by whether */
/* the FINALIZE_ON_DEMAND macro is defined */
/* when the collector is built. */
GC_API int GC_java_finalization;
/* Mark objects reachable from finalizable */
/* objects in a separate postpass. This makes */
/* it a bit safer to use non-topologically- */
/* ordered finalization. Default value is */
/* determined by JAVA_FINALIZATION macro. */
GC_API int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
/* because it's not safe. */
@ -111,6 +133,12 @@ GC_API int GC_dont_expand;
GC_API int GC_full_freq; /* Number of partial collections between */
/* full collections. Matters only if */
/* GC_incremental is set. */
/* Full collections are also triggered if */
/* the collector detects a substantial */
/* increase in the number of in-use heap */
/* blocks. Values in the tens are now */
/* perfectly reasonable, unlike for */
/* earlier GC versions. */
GC_API GC_word GC_non_gc_bytes;
/* Bytes not considered candidates for collection. */
@ -277,6 +305,9 @@ GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Includes some pages that were allocated but never written. */
GC_API size_t GC_get_heap_size GC_PROTO((void));
/* Return a lower bound on the number of free bytes in the heap. */
GC_API size_t GC_get_free_bytes GC_PROTO((void));
/* Return the number of bytes allocated since the last collection. */
GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
@ -321,10 +352,11 @@ GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
#ifdef GC_ADD_CALLER
# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
# define GC_EXTRA_PARAMS GC_word ra, char * descr_string, int descr_int
# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * descr_string,
int descr_int
#else
# define GC_EXTRAS __FILE__, __LINE__
# define GC_EXTRA_PARAMS char * descr_string, int descr_int
# define GC_EXTRA_PARAMS GC_CONST char * descr_string, int descr_int
#endif
/* Debugging (annotated) allocation. GC_gcollect will check */
@ -510,7 +542,7 @@ GC_API int GC_invoke_finalizers GC_PROTO((void));
/* be finalized. Return the number of finalizers */
/* that were run. Normally this is also called */
/* implicitly during some allocations. If */
/* FINALIZE_ON_DEMAND is defined, it must be called */
/* GC-finalize_on_demand is nonzero, it must be called */
/* explicitly. */
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
@ -668,7 +700,7 @@ GC_API void (*GC_is_visible_print_proc)
# endif /* SOLARIS_THREADS */
#if defined(IRIX_THREADS) || defined(LINUX_THREADS)
#if defined(IRIX_THREADS) || defined(LINUX_THREADS) || defined(HPUX_THREADS)
/* We treat these similarly. */
# include <pthread.h>
# include <signal.h>
@ -687,11 +719,12 @@ GC_API void (*GC_is_visible_print_proc)
# if defined(PCR) || defined(SOLARIS_THREADS) || defined(WIN32_THREADS) || \
defined(IRIX_THREADS) || defined(LINUX_THREADS) || \
defined(IRIX_JDK_THREADS)
defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
/* Any flavor of threads except SRC_M3. */
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
/* lb must be large enough to hold the pointer field. */
GC_PTR GC_malloc_many(size_t lb);
#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */

View File

@ -13,7 +13,7 @@
//
// This is a C++ header file that is intended to replace the SGI STL
// alloc.h.
// alloc.h. This assumes SGI STL version < 3.0.
//
// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
// and -DALL_INTERIOR_POINTERS. We also recommend

View File

@ -318,12 +318,10 @@ class traceable_alloc_template {
typedef traceable_alloc_template < 0 > traceable_alloc;
#ifdef _SGI_SOURCE
// We want to specialize simple_alloc so that it does the right thing
// for all pointerfree types. At the moment there is no portable way to
// even approximate that. The following approximation should work for
// SGI compilers, and perhaps some others.
// SGI compilers, and recent versions of g++.
# define __GC_SPECIALIZE(T,alloc) \
class simple_alloc<T, alloc> { \
@ -451,6 +449,4 @@ __STL_END_NAMESPACE
#endif /* __STL_USE_STD_ALLOCATORS */
#endif /* _SGI_SOURCE */
#endif /* GC_ALLOC_H */

View File

@ -49,14 +49,16 @@ typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
* 0 means we're not responsible for this block.
* 0 means we're not responsible for this block,
* or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
/* ascending order. */
/* ascending order... */
struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */

View File

@ -1,6 +1,9 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@ -64,16 +67,16 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# include <stddef.h>
# endif
# define VOLATILE volatile
# define CONST const
#else
# ifdef MSWIN32
# include <stdlib.h>
# endif
# define VOLATILE
# define CONST
#endif
#ifdef AMIGA
#define CONST GC_CONST
#if 0 /* was once defined for AMIGA */
# define GC_FAR __far
#else
# define GC_FAR
@ -350,7 +353,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+ GC_page_size) \
+ GC_page_size-1)
# else
# if defined(AMIGA) || defined(NEXT) || defined(DOS4GW)
# if defined(AMIGA) || defined(NEXT) || defined(MACOSX) || defined(DOS4GW)
# define GET_MEM(bytes) HBLKPTR((size_t) \
calloc(1, (size_t)bytes + GC_page_size) \
+ GC_page_size-1)
@ -436,7 +439,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# endif
# ifdef LINUX_THREADS
# include <pthread.h>
# ifdef __i386__
# if defined(I386)
inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
@ -446,9 +449,57 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
return oldval;
}
# else
-- > Need implementation of GC_test_and_set()
# if defined(POWERPC)
inline static int GC_test_and_set(volatile unsigned int *addr) {
int oldval;
int temp = 1; // locked value
__asm__ __volatile__(
"1:\tlwarx %0,0,%3\n" // load and reserve
"\tcmpwi %0, 0\n" // if load is
"\tbne 2f\n" // non-zero, return already set
"\tstwcx. %2,0,%1\n" // else store conditional
"\tbne- 1b\n" // retry if lost reservation
"2:\t\n" // oldval is zero if we set
: "=&r"(oldval), "=p"(addr)
: "r"(temp), "1"(addr)
: "memory");
return (int)oldval;
}
# else
# ifdef ALPHA
inline static int GC_test_and_set(volatile unsigned int *
addr)
{
unsigned long oldvalue;
unsigned long temp;
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" and %0,%3,%2\n"
" bne %2,2f\n"
" xor %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,3f\n"
" mb\n"
"2:\n"
".section .text2,\"ax\"\n"
"3: br 1b\n"
".previous"
:"=&r" (temp), "=m" (*addr), "=&r"
(oldvalue)
:"Ir" (1), "m" (*addr));
return oldvalue;
}
# else
-- > Need implementation of GC_test_and_set()
# endif
# endif
# endif
# define GC_clear(addr) (*(addr) = 0)
inline static void GC_clear(volatile unsigned int *addr) {
*(addr) = 0;
}
extern volatile unsigned int GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@ -462,15 +513,10 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
# ifdef UNDEFINED
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# else
# define LOCK() \
# define LOCK() \
{ if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
# define UNLOCK() \
# define UNLOCK() \
GC_clear(&GC_allocate_lock)
# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
{ \
@ -478,15 +524,30 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
}
# define EXIT_GC() GC_collecting = 0;
# endif /* LINUX_THREADS */
# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
# if defined(HPUX_THREADS)
# include <pthread.h>
# include <mutex.h>
extern pthread_mutex_t GC_allocate_ml;
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# endif
# if defined(IRIX_THREADS) || defined(IRIX_JDK_THREADS)
/* This may also eventually be appropriate for HPUX_THREADS */
# include <pthread.h>
# ifndef HPUX_THREADS
/* This probably should never be included, but I can't test */
/* on Irix anymore. */
# include <mutex.h>
# endif
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
# ifndef HPUX_THREADS
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
|| !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
# define GC_test_and_set(addr, v) test_and_set(addr,v)
# else
# else
# define GC_test_and_set(addr, v) __test_and_set(addr,v)
# endif
# else
/* I couldn't find a way to do this inline on HP/UX */
# endif
extern unsigned long GC_allocate_lock;
/* This is not a mutex because mutexes that obey the (optional) */
@ -500,15 +561,17 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# define NO_THREAD (pthread_t)(-1)
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
# ifdef UNDEFINED
# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
# ifdef HPUX_THREADS
# define LOCK() { if (!GC_test_and_clear(&GC_allocate_lock)) GC_lock(); }
/* The following is INCORRECT, since the memory model is too weak. */
# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
*(volatile unsigned long *)(&GC_allocate_lock) = 1; }
# else
# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
# define LOCK() { if (GC_test_and_set(&GC_allocate_lock, 1)) GC_lock(); }
# if __mips >= 3 && (defined (_ABIN32) || defined(_ABI64)) \
&& defined(_COMPILER_VERSION) && _COMPILER_VERSION >= 700
# define UNLOCK() __lock_release(&GC_allocate_lock)
# else
# else
/* The function call in the following should prevent the */
/* compiler from moving assignments to below the UNLOCK. */
/* This is probably not necessary for ucode or gcc 2.8. */
@ -516,7 +579,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
/* versions. */
# define UNLOCK() { GC_noop1(&GC_allocate_lock); \
*(volatile unsigned long *)(&GC_allocate_lock) = 0; }
# endif
# endif
# endif
extern GC_bool GC_collecting;
# define ENTER_GC() \
@ -607,7 +670,7 @@ void GC_print_callers (/* struct callinfo info[NFRAMES] */);
# else
# if defined(SOLARIS_THREADS) || defined(WIN32_THREADS) \
|| defined(IRIX_THREADS) || defined(LINUX_THREADS) \
|| defined(IRIX_JDK_THREADS)
|| defined(IRIX_JDK_THREADS) || defined(HPUX_THREADS)
void GC_stop_world();
void GC_start_world();
# define STOP_WORLD() GC_stop_world()
@ -823,6 +886,7 @@ struct hblkhdr {
struct hblk * hb_next; /* Link field for hblk free list */
/* and for lists of chunks waiting to be */
/* reclaimed. */
struct hblk * hb_prev; /* Backwards link for free list. */
word hb_descr; /* object descriptor for marking. See */
/* mark.h. */
char* hb_map; /* A pointer to a pointer validity map of the block. */
@ -837,14 +901,28 @@ struct hblkhdr {
# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
/* point to the first page of */
/* this object. */
# define WAS_UNMAPPED 2 /* This is a free block, which has */
/* been unmapped from the address */
/* space. */
/* GC_remap must be invoked on it */
/* before it can be reallocated. */
/* Only set with USE_MUNMAP. */
unsigned short hb_last_reclaimed;
/* Value of GC_gc_no when block was */
/* last allocated or swept. May wrap. */
/* For a free block, this is maintained */
/* unly for USE_MUNMAP, and indicates */
/* when the header was allocated, or */
/* when the size of the block last */
/* changed. */
word hb_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
/* INCLUDED) in the heap block. */
/* The lsb of word 0 is numbered 0. */
/* Unused bits are invalid, and are */
/* occasionally set, e.g for uncollectable */
/* objects. */
};
/* heap block body */
@ -959,6 +1037,9 @@ struct _GC_arrays {
word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
word _large_free_bytes;
/* Total bytes contained in blocks on large object free */
/* list. */
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
@ -1005,6 +1086,9 @@ struct _GC_arrays {
/* Number of words in accessible atomic */
/* objects. */
# endif
# ifdef USE_MUNMAP
word _unmapped_bytes;
# endif
# ifdef MERGE_SIZES
unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
/* Number of words to allocate for a given allocation request in */
@ -1022,7 +1106,7 @@ struct _GC_arrays {
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
/* (If ALL_INTERIOR_POINTERS is defined, then */
/* instead ((short *)(hbh_map[sz])[i] is j if */
/* instead ((short *)(hb_map[sz])[i] is j if */
/* block_start+WORDS_TO_BYTES(i) is in the */
/* interior of an object starting at */
/* block_start+WORDS_TO_BYTES(i-j)). */
@ -1135,6 +1219,7 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
# define GC_large_free_bytes GC_arrays._large_free_bytes
# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
@ -1144,6 +1229,9 @@ GC_API GC_FAR struct _GC_arrays GC_arrays;
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
# define GC_last_stack GC_arrays._last_stack
# ifdef USE_MUNMAP
# define GC_unmapped_bytes GC_arrays._unmapped_bytes
# endif
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@ -1236,7 +1324,7 @@ extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
extern struct hblk * GC_hblkfreelist;
extern struct hblk * GC_hblkfreelist[];
/* List of completely empty heap blocks */
/* Linked through hb_next field of */
/* header structure associated with */
@ -1311,7 +1399,12 @@ GC_bool GC_should_collect();
void GC_apply_to_all_blocks(/*fn, client_data*/);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
struct hblk * GC_next_block(/* struct hblk * h */);
struct hblk * GC_next_used_block(/* struct hblk * h */);
/* Return first in-use block >= h */
struct hblk * GC_prev_block(/* struct hblk * h */);
/* Return last block <= h. Returned block */
/* is managed by GC, but may or may not be in */
/* use. */
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
void GC_invalidate_mark_state(); /* Tell the marker that marked */
@ -1384,8 +1477,14 @@ extern void (*GC_start_call_back)(/* void */);
/* lock held. */
/* 0 by default. */
void GC_push_regs(); /* Push register contents onto mark stack. */
/* If NURSERY is defined, the default push */
/* action can be overridden with GC_push_proc */
void GC_remark(); /* Mark from all marked objects. Used */
/* only if we had to drop something. */
# ifdef NURSERY
extern void (*GC_push_proc)(ptr_t);
# endif
# if defined(MSWIN32)
void __cdecl GC_push_one();
# else
@ -1608,6 +1707,15 @@ extern void (*GC_print_heap_obj)(/* ptr_t p */);
/* detailed description of the object */
/* referred to by p. */
/* Memory unmapping: */
#ifdef USE_MUNMAP
void GC_unmap_old(void);
void GC_merge_unmapped(void);
void GC_unmap(ptr_t start, word bytes);
void GC_remap(ptr_t start, word bytes);
void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
#endif
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
void GC_read_dirty(); /* Retrieve dirty bits. */
@ -1640,6 +1748,16 @@ void GC_print_heap_sects();
void GC_print_static_roots();
void GC_dump();
#ifdef KEEP_BACK_PTRS
void GC_store_back_pointer(ptr_t source, ptr_t dest);
void GC_marked_for_finalization(ptr_t dest);
# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
#else
# define GC_STORE_BACK_PTR(source, dest)
# define GC_MARKED_FOR_FINALIZATION(dest)
#endif
/* Make arguments appear live to compiler */
# ifdef __WATCOMC__
void GC_noop(void*, ...);
@ -1690,4 +1808,13 @@ void GC_err_puts(/* char *s */);
/* newlines, don't ... */
# ifdef GC_ASSERTIONS
# define GC_ASSERT(expr) if(!(expr)) {\
GC_err_printf2("Assertion failure: %s:%ld\n", \
__FILE__, (unsigned long)__LINE__); \
ABORT("assertion failure"); }
# else
# define GC_ASSERT(expr)
# endif
# endif /* GC_PRIVATE_H */

View File

@ -43,6 +43,11 @@
# define OPENBSD
# define mach_type_known
# endif
# if defined(__OpenBSD__) && defined(__sparc__)
# define SPARC
# define OPENBSD
# define mach_type_known
# endif
# if defined(__NetBSD__) && defined(m68k)
# define M68K
# define NETBSD
@ -100,7 +105,8 @@
# endif
# define mach_type_known
# endif
# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux)
# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
&& !defined(__OpenBSD__)
# define SPARC
# define DRSNX
# define mach_type_known
@ -129,7 +135,7 @@
# define HP_PA
# define mach_type_known
# endif
# if defined(LINUX) && defined(i386)
# if defined(LINUX) && (defined(i386) || defined(__i386__))
# define I386
# define mach_type_known
# endif
@ -141,9 +147,8 @@
# define M68K
# define mach_type_known
# endif
# if defined(linux) && defined(sparc)
# if defined(LINUX) && defined(sparc)
# define SPARC
# define LINUX
# define mach_type_known
# endif
# if defined(__alpha) || defined(__alpha__)
@ -153,9 +158,11 @@
# endif
# define mach_type_known
# endif
# if defined(_AMIGA)
# define M68K
# if defined(_AMIGA) && !defined(AMIGA)
# define AMIGA
# endif
# ifdef AMIGA
# define M68K
# define mach_type_known
# endif
# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
@ -168,6 +175,11 @@
# define MACOS
# define mach_type_known
# endif
# if defined(macosx)
# define MACOSX
# define POWERPC
# define mach_type_known
# endif
# if defined(NeXT) && defined(mc68000)
# define M68K
# define NEXT
@ -486,8 +498,8 @@
# ifdef POWERPC
# define MACH_TYPE "POWERPC"
# define ALIGNMENT 2
# ifdef MACOS
# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
# ifndef __LOWMEM__
# include <LowMem.h>
# endif
@ -497,14 +509,24 @@
# define DATAEND /* not needed */
# endif
# ifdef LINUX
# define ALIGNMENT 4 /* Guess. Can someone verify? */
/* This was 2, but that didn't sound right. */
# define OS_TYPE "LINUX"
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
/* Stack usually starts at 0x80000000 */
# define DATASTART GC_data_start
extern int _end;
# define DATAEND (&_end)
# endif
# ifdef MACOSX
# define ALIGNMENT 4
# define OS_TYPE "MACOSX"
# define DATASTART ((ptr_t) get_etext())
# define STACKBOTTOM ((ptr_t) 0xc0000000)
# define DATAEND /* not needed */
# endif
# endif
# ifdef VAX
@ -603,6 +625,11 @@
# define SVR4
# define STACKBOTTOM ((ptr_t) 0xf0000000)
# endif
# ifdef OPENBSD
# define OS_TYPE "OPENBSD"
# define STACKBOTTOM ((ptr_t) 0xf8000000)
# define DATASTART ((ptr_t)(&etext))
# endif
# endif
# ifdef I386
@ -657,10 +684,13 @@
# endif
# ifdef LINUX
# define OS_TYPE "LINUX"
# define STACKBOTTOM ((ptr_t)0xc0000000)
/* Appears to be 0xe0000000 for at least one 2.1.91 kernel. */
/* Probably needs to be more flexible, but I don't yet */
/* fully understand how flexible. */
# define HEURISTIC1
# undef STACK_GRAN
# define STACK_GRAN 0x10000000
/* STACKBOTTOM is usually 0xc0000000, but this changes with */
/* different kernel configurations. In particular, systems */
/* with 2GB physical memory will usually move the user */
/* address space limit, and hence initial SP to 0x80000000. */
# if !defined(LINUX_THREADS) || !defined(REDIRECT_MALLOC)
# define MPROTECT_VDB
# else
@ -909,9 +939,13 @@
# define CPP_WORDSZ 64
# define STACKBOTTOM ((ptr_t) 0x120000000)
# ifdef __ELF__
# if 0
/* __data_start apparently disappeared in some recent releases. */
extern int __data_start;
# define DATASTART &__data_start
# define DYNAMIC_LOADING
# endif
# define DATASTART GC_data_start
# define DYNAMIC_LOADING
# else
# define DATASTART ((ptr_t) 0x140000000)
# endif
@ -1021,6 +1055,10 @@
# undef MPROTECT_VDB
# endif
# ifdef USE_MUNMAP
# undef MPROTECT_VDB /* Can't deal with address space holes. */
# endif
# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
# define DEFAULT_VDB
# endif