ggc-common.c [!ENABLE_VALGRIND_CHECKING] (VALGRIND_DISCARD): Define as empty.

* ggc-common.c [!ENABLE_VALGRIND_CHECKING] (VALGRIND_DISCARD):
	Define as empty.
	(ggc_realloc): Update valgrind annotations.
	* ggc-page.c [!ENABLE_VALGRIND_CHECKING] (VALGRIND_DISCARD):
	Define as empty.
 	(alloc_anon, free_page, ggc_alloc, poison_pages): Add machinery to
	valgrind-annotate memory.

From-SVN: r59310
This commit is contained in:
Hans-Peter Nilsson 2002-11-20 20:06:34 +00:00 committed by Hans-Peter Nilsson
parent 414d23ae18
commit 9a0a7d5dfa
3 changed files with 85 additions and 2 deletions

View File

@ -18,6 +18,14 @@
* gcc.c (execute) [ENABLE_VALGRIND_CHECKING]: Arrange to prepend
VALGRIND_PATH -q to each command.
* ggc-common.c [!ENABLE_VALGRIND_CHECKING] (VALGRIND_DISCARD):
Define as empty.
(ggc_realloc): Update valgrind annotations.
* ggc-page.c [!ENABLE_VALGRIND_CHECKING] (VALGRIND_DISCARD):
Define as empty.
(alloc_anon, free_page, ggc_alloc, poison_pages): Add machinery to
valgrind-annotate memory.
2002-11-20 Ulrich Weigand <uweigand@de.ibm.com>
* recog.c (constrain_operands): Prefer exact match over reloadable

View File

@ -30,6 +30,12 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "varray.h"
#include "ggc.h"
#include "langhooks.h"
#ifdef ENABLE_VALGRIND_CHECKING
#include <valgrind.h>
#else
/* Avoid #ifdef:s when we can help it. */
#define VALGRIND_DISCARD(x)
#endif
/* Statistics about the allocation. */
static ggc_statistics *ggc_stats;
@ -155,10 +161,36 @@ ggc_realloc (x, size)
old_size = ggc_get_size (x);
if (size <= old_size)
return x;
{
/* Mark the unwanted memory as unaccessible. We also need to make
the "new" size accessible, since ggc_get_size returns the size of
the pool, not the size of the individually allocated object, the
size which was previously made accessible. Unfortunately, we
don't know that previously allocated size. Without that
knowledge we have to lose some initialization-tracking for the
old parts of the object. An alternative is to mark the whole
old_size as reachable, but that would lose tracking of writes
after the end of the object (by small offsets). Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) x + size,
old_size - size));
VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, size));
return x;
}
r = ggc_alloc (size);
/* Since ggc_get_size returns the size of the pool, not the size of the
individually allocated object, we'd access parts of the old object
that were marked invalid with the memcpy below. We lose a bit of the
initialization-tracking since some of it may be uninitialized. */
VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (x, old_size));
memcpy (r, x, old_size);
/* The old object is not supposed to be used anymore. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (x, old_size));
return r;
}

View File

@ -29,6 +29,12 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "ggc.h"
#include "timevar.h"
#include "params.h"
#ifdef ENABLE_VALGRIND_CHECKING
#include <valgrind.h>
#else
/* Avoid #ifdef:s when we can help it. */
#define VALGRIND_DISCARD(x)
#endif
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
@ -524,6 +530,11 @@ alloc_anon (pref, size)
/* Remember that we allocated this memory. */
G.bytes_mapped += size;
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
return page;
}
#endif
@ -750,6 +761,10 @@ free_page (entry)
"Deallocating page at %p, data %p-%p\n", (PTR) entry,
entry->page, entry->page + entry->bytes - 1);
/* Mark the page as inaccessible. Discard the handle to avoid handle
leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
set_page_table_entry (entry->page, NULL);
#ifdef USING_MALLOC_PAGE_GROUPS
@ -943,11 +958,27 @@ ggc_alloc (size)
result = entry->page + object_offset;
#ifdef ENABLE_GC_CHECKING
/* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
exact same semantics in presence of memory bugs, regardless of
ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, OBJECT_SIZE (order)));
/* `Poison' the entire allocated object, including any padding at
the end. */
memset (result, 0xaf, OBJECT_SIZE (order));
/* Make the bytes after the end of the object unaccessible. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS ((char *) result + size,
OBJECT_SIZE (order) - size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
G.allocated += OBJECT_SIZE (order);
@ -1433,7 +1464,19 @@ poison_pages ()
word = i / HOST_BITS_PER_LONG;
bit = i % HOST_BITS_PER_LONG;
if (((p->in_use_p[word] >> bit) & 1) == 0)
memset (p->page + i * size, 0xa5, size);
{
char *object = p->page + i * size;
/* Keep poison-by-write when we expect to use Valgrind,
so the exact same memory semantics is kept, in case
there are memory errors. We override this request
below. */
VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (object, size));
memset (object, 0xa5, size);
/* Drop the handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (object, size));
}
}
}
}