8dbe22c686
Let's introduce RAM_NORESERVE, allowing mmap'ing with MAP_NORESERVE. The new flag has the following semantics: " RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge pages if applicable) is skipped: will bail out if not supported. When not set, the OS will do the reservation, if supported for the memory type. " Allow passing it into: - memory_region_init_ram_nomigrate() - memory_region_init_resizeable_ram() - memory_region_init_ram_from_file() ... and teach qemu_ram_mmap() and qemu_anon_ram_alloc() about the flag. Bail out if the flag is not supported, which is the case right now for both, POSIX and win32. We will add Linux support next and allow specifying RAM_NORESERVE via memory backends. The target use case is virtio-mem, which dynamically exposes memory inside a large, sparse memory area to the VM. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core Signed-off-by: David Hildenbrand <david@redhat.com> Message-Id: <20210510114328.21835-9-david@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
114 lines
3.6 KiB
C
114 lines
3.6 KiB
C
#ifndef CPU_COMMON_H
|
|
#define CPU_COMMON_H
|
|
|
|
/* CPU interfaces that are target independent. */
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
#include "exec/hwaddr.h"
|
|
#endif
|
|
|
|
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
|
|
void qemu_init_cpu_list(void);
|
|
void cpu_list_lock(void);
|
|
void cpu_list_unlock(void);
|
|
|
|
void tcg_flush_softmmu_tlb(CPUState *cs);
|
|
|
|
void tcg_iommu_init_notifier_list(CPUState *cpu);
|
|
void tcg_iommu_free_notifier_list(CPUState *cpu);
|
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
enum device_endian {
|
|
DEVICE_NATIVE_ENDIAN,
|
|
DEVICE_BIG_ENDIAN,
|
|
DEVICE_LITTLE_ENDIAN,
|
|
};
|
|
|
|
#if defined(HOST_WORDS_BIGENDIAN)
|
|
#define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN
|
|
#else
|
|
#define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN
|
|
#endif
|
|
|
|
/* address in the RAM (different from a physical address) */
|
|
#if defined(CONFIG_XEN_BACKEND)
|
|
typedef uint64_t ram_addr_t;
|
|
# define RAM_ADDR_MAX UINT64_MAX
|
|
# define RAM_ADDR_FMT "%" PRIx64
|
|
#else
|
|
typedef uintptr_t ram_addr_t;
|
|
# define RAM_ADDR_MAX UINTPTR_MAX
|
|
# define RAM_ADDR_FMT "%" PRIxPTR
|
|
#endif
|
|
|
|
/* memory API */
|
|
|
|
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
|
|
/* This should not be used by devices. */
|
|
ram_addr_t qemu_ram_addr_from_host(void *ptr);
|
|
RAMBlock *qemu_ram_block_by_name(const char *name);
|
|
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
|
|
ram_addr_t *offset);
|
|
ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host);
|
|
void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev);
|
|
void qemu_ram_unset_idstr(RAMBlock *block);
|
|
const char *qemu_ram_get_idstr(RAMBlock *rb);
|
|
void *qemu_ram_get_host_addr(RAMBlock *rb);
|
|
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
|
|
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
|
|
ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
|
|
bool qemu_ram_is_shared(RAMBlock *rb);
|
|
bool qemu_ram_is_noreserve(RAMBlock *rb);
|
|
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
|
|
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
|
|
bool qemu_ram_is_migratable(RAMBlock *rb);
|
|
void qemu_ram_set_migratable(RAMBlock *rb);
|
|
void qemu_ram_unset_migratable(RAMBlock *rb);
|
|
|
|
size_t qemu_ram_pagesize(RAMBlock *block);
|
|
size_t qemu_ram_pagesize_largest(void);
|
|
|
|
void cpu_physical_memory_rw(hwaddr addr, void *buf,
|
|
hwaddr len, bool is_write);
|
|
static inline void cpu_physical_memory_read(hwaddr addr,
|
|
void *buf, hwaddr len)
|
|
{
|
|
cpu_physical_memory_rw(addr, buf, len, false);
|
|
}
|
|
static inline void cpu_physical_memory_write(hwaddr addr,
|
|
const void *buf, hwaddr len)
|
|
{
|
|
cpu_physical_memory_rw(addr, (void *)buf, len, true);
|
|
}
|
|
void *cpu_physical_memory_map(hwaddr addr,
|
|
hwaddr *plen,
|
|
bool is_write);
|
|
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
|
|
bool is_write, hwaddr access_len);
|
|
void cpu_register_map_client(QEMUBH *bh);
|
|
void cpu_unregister_map_client(QEMUBH *bh);
|
|
|
|
bool cpu_physical_memory_is_io(hwaddr phys_addr);
|
|
|
|
/* Coalesced MMIO regions are areas where write operations can be reordered.
|
|
* This usually implies that write operations are side-effect free. This allows
|
|
* batching which can make a major impact on performance when using
|
|
* virtualization.
|
|
*/
|
|
void qemu_flush_coalesced_mmio_buffer(void);
|
|
|
|
void cpu_flush_icache_range(hwaddr start, hwaddr len);
|
|
|
|
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
|
|
|
|
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
|
|
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
|
|
|
|
#endif
|
|
|
|
/* vl.c */
|
|
extern int singlestep;
|
|
|
|
#endif /* CPU_COMMON_H */
|