ggc-zone.c: Remove everything in #ifdef USING_MALLOC_PAGE_GROUPS

2004-01-05  Daniel Berlin  <dberlin@dberlin.org>

	* ggc-zone.c: Remove everything in #ifdef USING_MALLOC_PAGE_GROUPS
	(USING_MMAP): We don't support non-mmap.
	(struct alloc_chunk): Steal 1 bit from typecode, use it to mark
	large objects.
	(struct page_entry): Remove bytes_free.
	(struct page_table_chain): Remove.
	(struct globals): Remove page_table member.
	(loookup_page_table_entry): Function deleted.
	(set_page_table_entry): Ditto.
	(ggc_allocated_p): No longer need page table lookups.
	(ggc_marked_p): Ditto.
	(alloc_small_page): Don't care about bytes_free anymore.
	(alloc_large_page): Round up size.
	(ggc_alloc_zone_1): Mark large objects as such, and calculate
	their size the new way.
	Remove page table lookups and setting.
	(ggc_get_size): Calculate large object size the new way.
	(sweep_pages): Redo to account for fact that we no longer have
	bytes_free.
	(ggc_collect): No longer need to reincrement bytes_free.
	(ggc_pch_alloc_object): Handle new large objects properly.
	(ggc_pch_read): Put PCH stuff into it's own uncollected zone.

From-SVN: r75438
This commit is contained in:
Daniel Berlin 2004-01-05 19:23:50 +00:00 committed by Daniel Berlin
parent ab22bc9148
commit 02fef8539e
2 changed files with 87 additions and 423 deletions

View File

@ -1,3 +1,28 @@
2004-01-05 Daniel Berlin <dberlin@dberlin.org>
* ggc-zone.c: Remove everything in #ifdef USING_MALLOC_PAGE_GROUPS
(USING_MMAP): We don't support non-mmap.
(struct alloc_chunk): Steal 1 bit from typecode, use it to mark
large objects.
(struct page_entry): Remove bytes_free.
(struct page_table_chain): Remove.
(struct globals): Remove page_table member.
(loookup_page_table_entry): Function deleted.
(set_page_table_entry): Ditto.
(ggc_allocated_p): No longer need page table lookups.
(ggc_marked_p): Ditto.
(alloc_small_page): Don't care about bytes_free anymore.
(alloc_large_page): Round up size.
(ggc_alloc_zone_1): Mark large objects as such, and calculate
their size the new way.
Remove page table lookups and setting.
(ggc_get_size): Calculate large object size the new way.
(sweep_pages): Redo to account for fact that we no longer have
bytes_free.
(ggc_collect): No longer need to reincrement bytes_free.
(ggc_pch_alloc_object): Handle new large objects properly.
(ggc_pch_read): Put PCH stuff into it's own uncollected zone.
2004-01-05 Kazu Hirata <kazu@cs.umass.edu>
* doc/invoke.texi: Remove a page break.

View File

@ -1,6 +1,7 @@
/* "Bag-of-pages" zone garbage collector for the GNU compiler.
Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin (dberlin@dberlin.org)
Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
(dberlin@dberlin.org)
This file is part of GCC.
@ -76,7 +77,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#endif
#ifndef USING_MMAP
#define USING_MALLOC_PAGE_GROUPS
#error "Zone collector requires mmap"
#endif
#if (GCC_VERSION < 3001)
@ -143,7 +144,8 @@ struct alloc_chunk {
unsigned int magic;
#endif
unsigned int type:1;
unsigned int typecode:15;
unsigned int typecode:14;
unsigned int large:1;
unsigned int size:15;
unsigned int mark:1;
union {
@ -211,38 +213,6 @@ struct max_alignment {
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
/* A two-level tree is used to look up the page-entry for a given
pointer. Two chunks of the pointer's bits are extracted to index
the first and second levels of the tree, as follows:
HOST_PAGE_SIZE_BITS
32 | |
msb +----------------+----+------+------+ lsb
| | |
PAGE_L1_BITS |
| |
PAGE_L2_BITS
The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
pages are aligned on system page boundaries. The next most
significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
index values in the lookup table, respectively.
For 32-bit architectures and the settings below, there are no
leftover bits. For architectures with wider pointers, the lookup
tree points to a list of pages, which must be scanned to find the
correct one. */
#define PAGE_L1_BITS (8)
#define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
#define LOOKUP_L1(p) \
(((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
#define LOOKUP_L2(p) \
(((size_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
/* A page_entry records the status of an allocation page. */
typedef struct page_entry
@ -261,15 +231,6 @@ typedef struct page_entry
/* The address at which the memory is allocated. */
char *page;
#ifdef USING_MALLOC_PAGE_GROUPS
/* Back pointer to the page group this page came from. */
struct page_group *group;
#endif
/* Number of bytes on the page unallocated. Only used during
collection, and even then large pages merely set this nonzero. */
size_t bytes_free;
/* Context depth of this page. */
unsigned short context_depth;
@ -280,50 +241,10 @@ typedef struct page_entry
struct alloc_zone *zone;
} page_entry;
#ifdef USING_MALLOC_PAGE_GROUPS
/* A page_group describes a large allocation from malloc, from which
we parcel out aligned pages. */
typedef struct page_group
{
/* A linked list of all extant page groups. */
struct page_group *next;
/* The address we received from malloc. */
char *allocation;
/* The size of the block. */
size_t alloc_size;
/* A bitmask of pages in use. */
unsigned int in_use;
} page_group;
#endif
#if HOST_BITS_PER_PTR <= 32
/* On 32-bit hosts, we use a two level page table, as pictured above. */
typedef page_entry **page_table[PAGE_L1_SIZE];
#else
/* On 64-bit hosts, we use the same two level page tables plus a linked
list that disambiguates the top 32-bits. There will almost always be
exactly one entry in the list. */
typedef struct page_table_chain
{
struct page_table_chain *next;
size_t high_bits;
page_entry **table[PAGE_L1_SIZE];
} *page_table;
#endif
/* The global variables. */
static struct globals
{
/* The page lookup table. A single page can only belong to one
zone. This means free pages are zone-specific ATM. */
page_table lookup;
/* The linked list of zones. */
struct alloc_zone *zones;
@ -374,10 +295,6 @@ struct alloc_zone
/* A cache of free system pages. */
page_entry *free_pages;
#ifdef USING_MALLOC_PAGE_GROUPS
page_group *page_groups;
#endif
/* Next zone in the linked list of zones. */
struct alloc_zone *next_zone;
@ -399,16 +316,9 @@ struct alloc_zone *tree_zone;
#define GGC_QUIRE_SIZE 16
static int ggc_allocated_p (const void *);
static page_entry *lookup_page_table_entry (const void *);
static void set_page_table_entry (void *, page_entry *);
#ifdef USING_MMAP
static char *alloc_anon (char *, size_t, struct alloc_zone *);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
static size_t page_group_index (char *, char *);
static void set_page_group_in_use (page_group *, char *);
static void clear_page_group_in_use (page_group *, char *);
#endif
static struct page_entry * alloc_small_page ( struct alloc_zone *);
static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
@ -425,94 +335,17 @@ static void check_cookies (void);
static inline int
ggc_allocated_p (const void *p)
{
page_entry ***base;
size_t L1, L2;
#if HOST_BITS_PER_PTR <= 32
base = &G.lookup[0];
#else
page_table table = G.lookup;
size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
while (1)
{
if (table == NULL)
return 0;
if (table->high_bits == high_bits)
break;
table = table->next;
}
base = &table->table[0];
struct alloc_chunk *chunk;
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
if (chunk->magic != CHUNK_MAGIC)
abort ();
#endif
/* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
return base[L1] && base[L1][L2];
if (chunk->type == 1)
return true;
return false;
}
/* Traverse the page table and find the entry for a page.
Die (probably) if the object wasn't allocated via GC. */
static inline page_entry *
lookup_page_table_entry(const void *p)
{
page_entry ***base;
size_t L1, L2;
#if HOST_BITS_PER_PTR <= 32
base = &G.lookup[0];
#else
page_table table = G.lookup;
size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
while (table->high_bits != high_bits)
table = table->next;
base = &table->table[0];
#endif
/* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
return base[L1][L2];
}
/* Set the page table entry for a page. */
static void
set_page_table_entry(void *p, page_entry *entry)
{
page_entry ***base;
size_t L1, L2;
#if HOST_BITS_PER_PTR <= 32
base = &G.lookup[0];
#else
page_table table;
size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
for (table = G.lookup; table; table = table->next)
if (table->high_bits == high_bits)
goto found;
/* Not found -- allocate a new table. */
table = (page_table) xcalloc (1, sizeof(*table));
table->next = G.lookup;
table->high_bits = high_bits;
G.lookup = table;
found:
base = &table->table[0];
#endif
/* Extract the level 1 and 2 indices. */
L1 = LOOKUP_L1 (p);
L2 = LOOKUP_L2 (p);
if (base[L1] == NULL)
base[L1] = (page_entry **) xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
base[L1][L2] = entry;
}
#ifdef USING_MMAP
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
@ -547,42 +380,15 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
return page;
}
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
/* Compute the index for this page into the page group. */
static inline size_t
page_group_index (char *allocation, char *page)
{
return (size_t) (page - allocation) >> G.lg_pagesize;
}
/* Set and clear the in_use bit for this page in the page group. */
static inline void
set_page_group_in_use (page_group *group, char *page)
{
group->in_use |= 1 << page_group_index (group->allocation, page);
}
static inline void
clear_page_group_in_use (page_group *group, char *page)
{
group->in_use &= ~(1 << page_group_index (group->allocation, page));
}
#endif
/* Allocate a new page for allocating objects of size 2^ORDER,
and return an entry for it. The entry is not added to the
appropriate page_table list. */
and return an entry for it. */
static inline struct page_entry *
alloc_small_page (struct alloc_zone *zone)
{
struct page_entry *entry;
char *page;
#ifdef USING_MALLOC_PAGE_GROUPS
page_group *group;
#endif
page = NULL;
@ -594,9 +400,7 @@ alloc_small_page (struct alloc_zone *zone)
zone->free_pages = entry->next;
page = entry->page;
#ifdef USING_MALLOC_PAGE_GROUPS
group = entry->group;
#endif
}
#ifdef USING_MMAP
else
@ -623,97 +427,17 @@ alloc_small_page (struct alloc_zone *zone)
zone->free_pages = f;
}
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
else
{
/* Allocate a large block of memory and serve out the aligned
pages therein. This results in much less memory wastage
than the traditional implementation of valloc. */
char *allocation, *a, *enda;
size_t alloc_size, head_slop, tail_slop;
int multiple_pages = (entry_size == G.pagesize);
if (multiple_pages)
alloc_size = GGC_QUIRE_SIZE * G.pagesize;
else
alloc_size = entry_size + G.pagesize - 1;
allocation = xmalloc (alloc_size);
VALGRIND_MALLOCLIKE_BLOCK(addr, alloc_size, 0, 0);
page = (char *) (((size_t) allocation + G.pagesize - 1) & -G.pagesize);
head_slop = page - allocation;
if (multiple_pages)
tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
else
tail_slop = alloc_size - entry_size - head_slop;
enda = allocation + alloc_size - tail_slop;
/* We allocated N pages, which are likely not aligned, leaving
us with N-1 usable pages. We plan to place the page_group
structure somewhere in the slop. */
if (head_slop >= sizeof (page_group))
group = (page_group *)page - 1;
else
{
/* We magically got an aligned allocation. Too bad, we have
to waste a page anyway. */
if (tail_slop == 0)
{
enda -= G.pagesize;
tail_slop += G.pagesize;
}
if (tail_slop < sizeof (page_group))
abort ();
group = (page_group *)enda;
tail_slop -= sizeof (page_group);
}
/* Remember that we allocated this memory. */
group->next = G.page_groups;
group->allocation = allocation;
group->alloc_size = alloc_size;
group->in_use = 0;
zone->page_groups = group;
G.bytes_mapped += alloc_size;
/* If we allocated multiple pages, put the rest on the free list. */
if (multiple_pages)
{
struct page_entry *e, *f = G.free_pages;
for (a = enda - G.pagesize; a != page; a -= G.pagesize)
{
e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
e->bytes = G.pagesize;
e->page = a;
e->group = group;
e->next = f;
f = e;
}
zone->free_pages = f;
}
}
#endif
if (entry == NULL)
entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
entry->next = 0;
entry->bytes = G.pagesize;
entry->bytes_free = G.pagesize;
entry->page = page;
entry->context_depth = zone->context_depth;
entry->large_p = false;
entry->zone = zone;
zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
#ifdef USING_MALLOC_PAGE_GROUPS
entry->group = group;
set_page_group_in_use (group, page);
#endif
set_page_table_entry (page, entry);
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Allocating %s page at %p, data %p-%p\n", entry->zone->name,
@ -721,6 +445,9 @@ alloc_small_page (struct alloc_zone *zone)
return entry;
}
/* Compute the smallest multiple of F that is >= X. */
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
/* Allocate a large page of size SIZE in ZONE. */
@ -729,24 +456,18 @@ alloc_large_page (size_t size, struct alloc_zone *zone)
{
struct page_entry *entry;
char *page;
size = ROUND_UP (size, 1024);
page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
entry->next = 0;
entry->bytes = size;
entry->bytes_free = LARGE_OBJECT_SIZE + CHUNK_OVERHEAD;
entry->page = page;
entry->context_depth = zone->context_depth;
entry->large_p = true;
entry->zone = zone;
zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
#ifdef USING_MALLOC_PAGE_GROUPS
entry->group = NULL;
#endif
set_page_table_entry (page, entry);
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
"Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
@ -766,8 +487,6 @@ free_page (page_entry *entry)
"Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
entry->page, entry->page + entry->bytes - 1);
set_page_table_entry (entry->page, NULL);
if (entry->large_p)
{
free (entry->page);
@ -779,10 +498,6 @@ free_page (page_entry *entry)
avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
#ifdef USING_MALLOC_PAGE_GROUPS
clear_page_group_in_use (entry->group, entry->page);
#endif
entry->next = entry->zone->free_pages;
entry->zone->free_pages = entry;
}
@ -823,34 +538,6 @@ release_pages (struct alloc_zone *zone)
zone->free_pages = NULL;
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
page_entry **pp, *p;
page_group **gp, *g;
/* Remove all pages from free page groups from the list. */
pp = &(zone->free_pages);
while ((p = *pp) != NULL)
if (p->group->in_use == 0)
{
*pp = p->next;
free (p);
}
else
pp = &p->next;
/* Remove all free page groups, and release the storage. */
gp = &(zone->page_groups);
while ((g = *gp) != NULL)
if (g->in_use == 0)
{
*gp = g->next;
zone->bytes_mapped -= g->alloc_size;
free (g->allocation);
VALGRIND_FREELIKE_BLOCK(g->allocation, 0);
}
else
gp = &g->next;
#endif
}
/* Place CHUNK of size SIZE on the free list for ZONE. */
@ -896,15 +583,16 @@ ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
/* Large objects are handled specially. */
if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
{
size = ROUND_UP (size, 1024);
entry = alloc_large_page (size, zone);
entry->survived = 0;
entry->next = entry->zone->pages;
entry->zone->pages = entry;
chunk = (struct alloc_chunk *) entry->page;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->size = LARGE_OBJECT_SIZE;
chunk->large = 1;
chunk->size = CEIL (size, 1024);
goto found;
}
@ -943,11 +631,13 @@ ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
chunk = (struct alloc_chunk *) entry->page;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->size = G.pagesize - CHUNK_OVERHEAD;
chunk->large = 0;
}
else
{
*pp = chunk->u.next_free;
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
chunk->large = 0;
}
/* Release extra memory from a chunk that's too big. */
lsize = chunk->size - size;
@ -965,6 +655,7 @@ ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
lchunk->type = 0;
lchunk->mark = 0;
lchunk->size = lsize;
lchunk->large = 0;
free_chunk (lchunk, lsize, zone);
}
/* Calculate the object's address. */
@ -1049,16 +740,8 @@ ggc_alloc_zone (size_t size, struct alloc_zone *zone)
int
ggc_set_mark (const void *p)
{
page_entry *entry;
struct alloc_chunk *chunk;
#ifdef ENABLE_CHECKING
/* Look up the page on which the object is alloced. If the object
wasn't allocated by the collector, we'll probably die. */
entry = lookup_page_table_entry (p);
if (entry == NULL)
abort ();
#endif
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
if (chunk->magic != CHUNK_MAGIC)
@ -1068,17 +751,6 @@ ggc_set_mark (const void *p)
return 1;
chunk->mark = 1;
#ifndef ENABLE_CHECKING
entry = lookup_page_table_entry (p);
#endif
/* Large pages are either completely full or completely empty. So if
they are marked, they are completely full. */
if (entry->large_p)
entry->bytes_free = 0;
else
entry->bytes_free -= chunk->size + CHUNK_OVERHEAD;
if (GGC_DEBUG_LEVEL >= 4)
fprintf (G.debug_file, "Marking %p\n", p);
@ -1094,14 +766,6 @@ ggc_marked_p (const void *p)
{
struct alloc_chunk *chunk;
#ifdef ENABLE_CHECKING
{
page_entry *entry = lookup_page_table_entry (p);
if (entry == NULL)
abort ();
}
#endif
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
if (chunk->magic != CHUNK_MAGIC)
@ -1116,26 +780,14 @@ size_t
ggc_get_size (const void *p)
{
struct alloc_chunk *chunk;
struct page_entry *entry;
#ifdef ENABLE_CHECKING
entry = lookup_page_table_entry (p);
if (entry == NULL)
abort ();
#endif
chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
#ifdef COOKIE_CHECKING
if (chunk->magic != CHUNK_MAGIC)
abort ();
#endif
if (chunk->size == LARGE_OBJECT_SIZE)
{
#ifndef ENABLE_CHECKING
entry = lookup_page_table_entry (p);
#endif
return entry->bytes;
}
if (chunk->large)
return chunk->size * 1024;
return chunk->size;
}
@ -1278,8 +930,6 @@ ggc_pop_context (void)
for (zone = G.zones; zone; zone = zone->next_zone)
ggc_pop_context_1 (zone);
}
/* Poison the chunk. */
#ifdef ENABLE_GC_CHECKING
#define poison_chunk(CHUNK, SIZE) \
@ -1296,7 +946,7 @@ sweep_pages (struct alloc_zone *zone)
page_entry **pp, *p, *next;
struct alloc_chunk *chunk, *last_free, *end;
size_t last_free_size, allocated = 0;
bool nomarksinpage;
/* First, reset the free_chunks lists, since we are going to
re-free free chunks in hopes of coalescing them into large chunks. */
memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
@ -1304,33 +954,28 @@ sweep_pages (struct alloc_zone *zone)
for (p = zone->pages; p ; p = next)
{
next = p->next;
/* For empty pages, just free the page. */
if (p->bytes_free == G.pagesize && p->context_depth == zone->context_depth)
/* Large pages are all or none affairs. Either they are
completely empty, or they are completely full.
XXX: Should we bother to increment allocated. */
if (p->large_p)
{
*pp = next;
if (((struct alloc_chunk *)p->page)->mark == 1)
{
((struct alloc_chunk *)p->page)->mark = 0;
}
else
{
*pp = next;
#ifdef ENABLE_GC_CHECKING
/* Poison the page. */
memset (p->page, 0xb5, p->bytes);
#endif
free_page (p);
free_page (p);
}
continue;
}
/* Large pages are all or none affairs. Either they are
completely empty, or they are completely full.
Thus, if the above didn't catch it, we need not do anything
except remove the mark and reset the bytes_free.
XXX: Should we bother to increment allocated. */
else if (p->large_p)
{
p->bytes_free = p->bytes;
((struct alloc_chunk *)p->page)->mark = 0;
continue;
}
pp = &p->next;
/* This page has now survived another collection. */
p->survived++;
@ -1343,12 +988,13 @@ sweep_pages (struct alloc_zone *zone)
end = (struct alloc_chunk *)(p->page + G.pagesize);
last_free = NULL;
last_free_size = 0;
nomarksinpage = true;
do
{
prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
if (chunk->mark || p->context_depth < zone->context_depth)
{
nomarksinpage = false;
if (last_free)
{
last_free->type = 0;
@ -1361,13 +1007,8 @@ sweep_pages (struct alloc_zone *zone)
if (chunk->mark)
{
allocated += chunk->size + CHUNK_OVERHEAD;
p->bytes_free += chunk->size + CHUNK_OVERHEAD;
}
chunk->mark = 0;
#ifdef ENABLE_CHECKING
if (p->bytes_free > p->bytes)
abort ();
#endif
}
else
{
@ -1386,7 +1027,17 @@ sweep_pages (struct alloc_zone *zone)
}
while (chunk < end);
if (last_free)
if (nomarksinpage)
{
*pp = next;
#ifdef ENABLE_GC_CHECKING
/* Poison the page. */
memset (p->page, 0xb5, p->bytes);
#endif
free_page (p);
continue;
}
else if (last_free)
{
last_free->type = 0;
last_free->size = last_free_size;
@ -1394,6 +1045,7 @@ sweep_pages (struct alloc_zone *zone)
poison_chunk (last_free, last_free_size);
free_chunk (last_free, last_free_size, zone);
}
pp = &p->next;
}
zone->allocated = allocated;
@ -1496,8 +1148,6 @@ check_cookies (void)
}
#endif
}
/* Top level collection routine. */
void
@ -1569,12 +1219,6 @@ ggc_collect (void)
prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
if (chunk->mark || p->context_depth < zone->context_depth)
{
if (chunk->mark)
p->bytes_free += chunk->size + CHUNK_OVERHEAD;
#ifdef ENABLE_CHECKING
if (p->bytes_free > p->bytes)
abort ();
#endif
chunk->mark = 0;
}
chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
@ -1583,7 +1227,6 @@ ggc_collect (void)
}
else
{
p->bytes_free = p->bytes;
((struct alloc_chunk *)p->page)->mark = 0;
}
}
@ -1679,7 +1322,7 @@ ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
if (!is_string)
{
struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
if (chunk->size == LARGE_OBJECT_SIZE)
if (chunk->large)
d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
else
d->base += chunk->size + CHUNK_OVERHEAD;
@ -1735,24 +1378,20 @@ ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
fatal_error ("can't write PCH file: %m");
free (d);
}
void
ggc_pch_read (FILE *f, void *addr)
{
struct ggc_pch_ondisk d;
struct page_entry *entry;
char *pte;
struct alloc_zone *pch_zone;
if (fread (&d, sizeof (d), 1, f) != 1)
fatal_error ("can't read PCH file: %m");
entry = xcalloc (1, sizeof (struct page_entry));
entry->bytes = d.total;
entry->page = addr;
entry->context_depth = 0;
entry->zone = &main_zone;
for (pte = entry->page;
pte < entry->page + entry->bytes;
pte += G.pagesize)
set_page_table_entry (pte, entry);
pch_zone = new_ggc_zone ("PCH zone");
entry->zone = pch_zone;
entry->next = entry->zone->pages;
entry->zone->pages = entry;
}