util/mmap-alloc: Pass flags instead of separate bools to qemu_ram_mmap()
Let's pass flags instead of bools to prepare for passing other flags and update the documentation of qemu_ram_mmap(). Introduce new QEMU_MAP_ flags that abstract the mmap() PROT_ and MAP_ flag handling and simplify it. We expose only flags that are currently supported by qemu_ram_mmap(). Maybe, we'll see qemu_mmap() in the future as well that can implement these flags. Note: We don't use MAP_ flags as some flags (e.g., MAP_SYNC) are only defined for some systems and we want to always be able to identify these flags reliably inside qemu_ram_mmap() -- for example, to properly warn when some future flags are not available or effective on a system. Also, this way we can simplify PROT_ handling as well. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210510114328.21835-8-david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
ebef62d0e5
commit
b444f5c079
@ -7,18 +7,22 @@ size_t qemu_fd_getpagesize(int fd);
|
|||||||
size_t qemu_mempath_getpagesize(const char *mem_path);
|
size_t qemu_mempath_getpagesize(const char *mem_path);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* qemu_ram_mmap: mmap the specified file or device.
|
* qemu_ram_mmap: mmap anonymous memory, the specified file or device.
|
||||||
|
*
|
||||||
|
* mmap() abstraction to map guest RAM, simplifying flag handling, taking
|
||||||
|
* care of alignment requirements and installing guard pages.
|
||||||
*
|
*
|
||||||
* Parameters:
|
* Parameters:
|
||||||
* @fd: the file or the device to mmap
|
* @fd: the file or the device to mmap
|
||||||
* @size: the number of bytes to be mmaped
|
* @size: the number of bytes to be mmaped
|
||||||
* @align: if not zero, specify the alignment of the starting mapping address;
|
* @align: if not zero, specify the alignment of the starting mapping address;
|
||||||
* otherwise, the alignment in use will be determined by QEMU.
|
* otherwise, the alignment in use will be determined by QEMU.
|
||||||
* @readonly: true for a read-only mapping, false for read/write.
|
* @qemu_map_flags: QEMU_MAP_* flags
|
||||||
* @shared: map has RAM_SHARED flag.
|
|
||||||
* @is_pmem: map has RAM_PMEM flag.
|
|
||||||
* @map_offset: map starts at offset of map_offset from the start of fd
|
* @map_offset: map starts at offset of map_offset from the start of fd
|
||||||
*
|
*
|
||||||
|
* Internally, MAP_PRIVATE, MAP_ANONYMOUS and MAP_SHARED_VALIDATE are set
|
||||||
|
* implicitly based on other parameters.
|
||||||
|
*
|
||||||
* Return:
|
* Return:
|
||||||
* On success, return a pointer to the mapped area.
|
* On success, return a pointer to the mapped area.
|
||||||
* On failure, return MAP_FAILED.
|
* On failure, return MAP_FAILED.
|
||||||
@ -26,9 +30,7 @@ size_t qemu_mempath_getpagesize(const char *mem_path);
|
|||||||
void *qemu_ram_mmap(int fd,
|
void *qemu_ram_mmap(int fd,
|
||||||
size_t size,
|
size_t size,
|
||||||
size_t align,
|
size_t align,
|
||||||
bool readonly,
|
uint32_t qemu_map_flags,
|
||||||
bool shared,
|
|
||||||
bool is_pmem,
|
|
||||||
off_t map_offset);
|
off_t map_offset);
|
||||||
|
|
||||||
void qemu_ram_munmap(int fd, void *ptr, size_t size);
|
void qemu_ram_munmap(int fd, void *ptr, size_t size);
|
||||||
|
@ -366,6 +366,24 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared);
|
|||||||
void qemu_vfree(void *ptr);
|
void qemu_vfree(void *ptr);
|
||||||
void qemu_anon_ram_free(void *ptr, size_t size);
|
void qemu_anon_ram_free(void *ptr, size_t size);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example,
|
||||||
|
* consumed by qemu_ram_mmap().
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Map PROT_READ instead of PROT_READ | PROT_WRITE. */
|
||||||
|
#define QEMU_MAP_READONLY (1 << 0)
|
||||||
|
|
||||||
|
/* Use MAP_SHARED instead of MAP_PRIVATE. */
|
||||||
|
#define QEMU_MAP_SHARED (1 << 1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without
|
||||||
|
* QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC.
|
||||||
|
*/
|
||||||
|
#define QEMU_MAP_SYNC (1 << 2)
|
||||||
|
|
||||||
|
|
||||||
#define QEMU_MADV_INVALID -1
|
#define QEMU_MADV_INVALID -1
|
||||||
|
|
||||||
#if defined(CONFIG_MADVISE)
|
#if defined(CONFIG_MADVISE)
|
||||||
|
@ -1540,6 +1540,7 @@ static void *file_ram_alloc(RAMBlock *block,
|
|||||||
off_t offset,
|
off_t offset,
|
||||||
Error **errp)
|
Error **errp)
|
||||||
{
|
{
|
||||||
|
uint32_t qemu_map_flags;
|
||||||
void *area;
|
void *area;
|
||||||
|
|
||||||
block->page_size = qemu_fd_getpagesize(fd);
|
block->page_size = qemu_fd_getpagesize(fd);
|
||||||
@ -1587,9 +1588,10 @@ static void *file_ram_alloc(RAMBlock *block,
|
|||||||
perror("ftruncate");
|
perror("ftruncate");
|
||||||
}
|
}
|
||||||
|
|
||||||
area = qemu_ram_mmap(fd, memory, block->mr->align, readonly,
|
qemu_map_flags = readonly ? QEMU_MAP_READONLY : 0;
|
||||||
block->flags & RAM_SHARED, block->flags & RAM_PMEM,
|
qemu_map_flags |= (block->flags & RAM_SHARED) ? QEMU_MAP_SHARED : 0;
|
||||||
offset);
|
qemu_map_flags |= (block->flags & RAM_PMEM) ? QEMU_MAP_SYNC : 0;
|
||||||
|
area = qemu_ram_mmap(fd, memory, block->mr->align, qemu_map_flags, offset);
|
||||||
if (area == MAP_FAILED) {
|
if (area == MAP_FAILED) {
|
||||||
error_setg_errno(errp, errno,
|
error_setg_errno(errp, errno,
|
||||||
"unable to map backing store for guest RAM");
|
"unable to map backing store for guest RAM");
|
||||||
|
@ -118,9 +118,12 @@ static void *mmap_reserve(size_t size, int fd)
|
|||||||
* Activate memory in a reserved region from the given fd (if any), to make
|
* Activate memory in a reserved region from the given fd (if any), to make
|
||||||
* it accessible.
|
* it accessible.
|
||||||
*/
|
*/
|
||||||
static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly,
|
static void *mmap_activate(void *ptr, size_t size, int fd,
|
||||||
bool shared, bool is_pmem, off_t map_offset)
|
uint32_t qemu_map_flags, off_t map_offset)
|
||||||
{
|
{
|
||||||
|
const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
|
||||||
|
const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
|
||||||
|
const bool sync = qemu_map_flags & QEMU_MAP_SYNC;
|
||||||
const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
|
const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
|
||||||
int map_sync_flags = 0;
|
int map_sync_flags = 0;
|
||||||
int flags = MAP_FIXED;
|
int flags = MAP_FIXED;
|
||||||
@ -128,7 +131,7 @@ static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly,
|
|||||||
|
|
||||||
flags |= fd == -1 ? MAP_ANONYMOUS : 0;
|
flags |= fd == -1 ? MAP_ANONYMOUS : 0;
|
||||||
flags |= shared ? MAP_SHARED : MAP_PRIVATE;
|
flags |= shared ? MAP_SHARED : MAP_PRIVATE;
|
||||||
if (shared && is_pmem) {
|
if (shared && sync) {
|
||||||
map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
|
map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,9 +176,7 @@ static inline size_t mmap_guard_pagesize(int fd)
|
|||||||
void *qemu_ram_mmap(int fd,
|
void *qemu_ram_mmap(int fd,
|
||||||
size_t size,
|
size_t size,
|
||||||
size_t align,
|
size_t align,
|
||||||
bool readonly,
|
uint32_t qemu_map_flags,
|
||||||
bool shared,
|
|
||||||
bool is_pmem,
|
|
||||||
off_t map_offset)
|
off_t map_offset)
|
||||||
{
|
{
|
||||||
const size_t guard_pagesize = mmap_guard_pagesize(fd);
|
const size_t guard_pagesize = mmap_guard_pagesize(fd);
|
||||||
@ -199,7 +200,7 @@ void *qemu_ram_mmap(int fd,
|
|||||||
|
|
||||||
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
|
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
|
||||||
|
|
||||||
ptr = mmap_activate(guardptr + offset, size, fd, readonly, shared, is_pmem,
|
ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
|
||||||
map_offset);
|
map_offset);
|
||||||
if (ptr == MAP_FAILED) {
|
if (ptr == MAP_FAILED) {
|
||||||
munmap(guardptr, total);
|
munmap(guardptr, total);
|
||||||
|
@ -229,8 +229,9 @@ void *qemu_memalign(size_t alignment, size_t size)
|
|||||||
/* alloc shared memory pages */
|
/* alloc shared memory pages */
|
||||||
void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
|
void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
|
||||||
{
|
{
|
||||||
|
const uint32_t qemu_map_flags = shared ? QEMU_MAP_SHARED : 0;
|
||||||
size_t align = QEMU_VMALLOC_ALIGN;
|
size_t align = QEMU_VMALLOC_ALIGN;
|
||||||
void *ptr = qemu_ram_mmap(-1, size, align, false, shared, false, 0);
|
void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0);
|
||||||
|
|
||||||
if (ptr == MAP_FAILED) {
|
if (ptr == MAP_FAILED) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
Loading…
Reference in New Issue
Block a user