oslib: rework anonimous RAM allocation

At the moment we first allocate RAM, sometimes more than necessary for
alignment reasons.  We then free the extra RAM.

Rework this to avoid the temporary allocation: reserve the
range by mapping it with PROT_NONE, then use just the
necessary range with MAP_FIXED.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2015-09-10 16:36:51 +03:00
parent 0cf33fb6b4
commit c2dfc5ba3f

View File

@ -129,9 +129,9 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
{
size_t align = QEMU_VMALLOC_ALIGN;
size_t total = size + align - getpagesize();
void *ptr = mmap(0, total, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
void *ptr1;
if (ptr == MAP_FAILED) {
return NULL;
@ -140,6 +140,14 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
if (alignment) {
*alignment = align;
}
ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (ptr1 == MAP_FAILED) {
munmap(ptr, total);
return NULL;
}
ptr += offset;
total -= offset;