Add a fragmentation fallback in ggc-page v2

There were some concerns that the earlier munmap patch could lead
to address space being freed that cannot be allocated again
by ggc due to fragmentation. This patch adds a fragmentation
fallback to solve this: when a GGC_QUIRE_SIZE sized allocation fails,
try again with a page sized allocation.

Passes bootstrap and testing on x86_64-linux with the fallback
forced artificially.

v2: fix missed initialization bug added in last minute edit.

gcc/:
2011-10-20  Andi Kleen  <ak@linux.intel.com>

	* ggc-page (alloc_anon): Add check argument.
	(alloc_page): Add fallback to 1 page allocation.
	Adjust alloc_anon calls to new argument.

From-SVN: r180649
This commit is contained in:
Andi Kleen 2011-10-29 01:02:14 +00:00 committed by Andi Kleen
parent d33ef9a52b
commit 25f0ea8135
2 changed files with 21 additions and 8 deletions

View File

@ -1,3 +1,9 @@
2011-10-20 Andi Kleen <ak@linux.intel.com>
* ggc-page (alloc_anon): Add check argument.
(alloc_page): Add fallback to 1 page allocation.
Adjust alloc_anon calls to new argument.
2011-10-18 Andi Kleen <ak@linux.intel.com>
* ggc-page (release_pages): First free large continuous

View File

@ -483,7 +483,7 @@ static int ggc_allocated_p (const void *);
static page_entry *lookup_page_table_entry (const void *);
static void set_page_table_entry (void *, page_entry *);
#ifdef USING_MMAP
static char *alloc_anon (char *, size_t);
static char *alloc_anon (char *, size_t, bool check);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
static size_t page_group_index (char *, char *);
@ -662,7 +662,7 @@ debug_print_page_list (int order)
compile error unless exactly one of the HAVE_* is defined. */
static inline char *
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
{
#ifdef HAVE_MMAP_ANON
char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
@ -675,6 +675,8 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
if (page == (char *) MAP_FAILED)
{
if (!check)
return NULL;
perror ("virtual memory exhausted");
exit (FATAL_EXIT_CODE);
}
@ -777,13 +779,18 @@ alloc_page (unsigned order)
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
struct page_entry *e, *f = G.free_pages;
int i;
int i, entries = GGC_QUIRE_SIZE;
page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE);
page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
if (page == NULL)
{
page = alloc_anon(NULL, G.pagesize, true);
entries = 1;
}
/* This loop counts down so that the chain will be in ascending
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
for (i = entries - 1; i >= 1; i--)
{
e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
@ -796,7 +803,7 @@ alloc_page (unsigned order)
G.free_pages = f;
}
else
page = alloc_anon (NULL, entry_size);
page = alloc_anon (NULL, entry_size, true);
#endif
#ifdef USING_MALLOC_PAGE_GROUPS
else
@ -1649,14 +1656,14 @@ init_ggc (void)
believe, is an unaligned page allocation, which would cause us to
hork badly if we tried to use it. */
{
char *p = alloc_anon (NULL, G.pagesize);
char *p = alloc_anon (NULL, G.pagesize, true);
struct page_entry *e;
if ((size_t)p & (G.pagesize - 1))
{
/* How losing. Discard this one and try another. If we still
can't get something useful, give up. */
p = alloc_anon (NULL, G.pagesize);
p = alloc_anon (NULL, G.pagesize, true);
gcc_assert (!((size_t)p & (G.pagesize - 1)));
}