Replace qemu_real_host_page variables with inlined functions

Replace the global variables with inlined helper functions. getpagesize() is very
likely annotated with a "const" function attribute (at least with glibc), and thus
optimization should apply even better.

This avoids the need for a constructor initialization too.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20220323155743.1585078-12-marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Marc-André Lureau 2022-03-23 19:57:22 +04:00 committed by Paolo Bonzini
parent b307e5052d
commit 8e3b0cbb72
53 changed files with 150 additions and 162 deletions

View File

@ -122,7 +122,7 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
MemoryRegion *area = section->mr; MemoryRegion *area = section->mr;
bool writeable = !area->readonly && !area->rom_device; bool writeable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags; hv_memory_flags_t flags;
uint64_t page_size = qemu_real_host_page_size; uint64_t page_size = qemu_real_host_page_size();
if (!memory_region_is_ram(area)) { if (!memory_region_is_ram(area)) {
if (writeable) { if (writeable) {

View File

@ -59,7 +59,7 @@
#ifdef PAGE_SIZE #ifdef PAGE_SIZE
#undef PAGE_SIZE #undef PAGE_SIZE
#endif #endif
#define PAGE_SIZE qemu_real_host_page_size #define PAGE_SIZE qemu_real_host_page_size()
#ifndef KVM_GUESTDBG_BLOCKIRQ #ifndef KVM_GUESTDBG_BLOCKIRQ
#define KVM_GUESTDBG_BLOCKIRQ 0 #define KVM_GUESTDBG_BLOCKIRQ 0
@ -324,14 +324,14 @@ static hwaddr kvm_align_section(MemoryRegionSection *section,
with sub-page size and unaligned start address. Pad the start with sub-page size and unaligned start address. Pad the start
address to next and truncate size to previous page boundary. */ address to next and truncate size to previous page boundary. */
aligned = ROUND_UP(section->offset_within_address_space, aligned = ROUND_UP(section->offset_within_address_space,
qemu_real_host_page_size); qemu_real_host_page_size());
delta = aligned - section->offset_within_address_space; delta = aligned - section->offset_within_address_space;
*start = aligned; *start = aligned;
if (delta > size) { if (delta > size) {
return 0; return 0;
} }
return (size - delta) & qemu_real_host_page_mask; return (size - delta) & qemu_real_host_page_mask();
} }
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
@ -626,7 +626,7 @@ static void kvm_log_stop(MemoryListener *listener,
static void kvm_slot_sync_dirty_pages(KVMSlot *slot) static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{ {
ram_addr_t start = slot->ram_start_offset; ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size; ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
} }
@ -662,7 +662,7 @@ static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
* And mem->memory_size is aligned to it (otherwise this mem can't * And mem->memory_size is aligned to it (otherwise this mem can't
* be registered to KVM). * be registered to KVM).
*/ */
hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size, hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
/*HOST_LONG_BITS*/ 64) / 8; /*HOST_LONG_BITS*/ 64) / 8;
mem->dirty_bmap = g_malloc0(bitmap_size); mem->dirty_bmap = g_malloc0(bitmap_size);
mem->dirty_bmap_size = bitmap_size; mem->dirty_bmap_size = bitmap_size;
@ -707,7 +707,7 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
mem = &kml->slots[slot_id]; mem = &kml->slots[slot_id];
if (!mem->memory_size || offset >= if (!mem->memory_size || offset >=
(mem->memory_size / qemu_real_host_page_size)) { (mem->memory_size / qemu_real_host_page_size())) {
return; return;
} }
@ -895,7 +895,7 @@ static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
#define KVM_CLEAR_LOG_SHIFT 6 #define KVM_CLEAR_LOG_SHIFT 6
#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT) #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
@ -904,7 +904,7 @@ static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
KVMState *s = kvm_state; KVMState *s = kvm_state;
uint64_t end, bmap_start, start_delta, bmap_npages; uint64_t end, bmap_start, start_delta, bmap_npages;
struct kvm_clear_dirty_log d; struct kvm_clear_dirty_log d;
unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
int ret; int ret;
/* /*
@ -1335,7 +1335,7 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
void kvm_set_max_memslot_size(hwaddr max_slot_size) void kvm_set_max_memslot_size(hwaddr max_slot_size)
{ {
g_assert( g_assert(
ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
); );
kvm_max_slot_size = max_slot_size; kvm_max_slot_size = max_slot_size;
} }
@ -2341,7 +2341,7 @@ static int kvm_init(MachineState *ms)
* even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
* page size for the system though. * page size for the system though.
*/ */
assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
s->sigmask_len = 8; s->sigmask_len = 8;

View File

@ -319,7 +319,7 @@ size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
#else #else
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{ {
return qemu_real_host_page_size; return qemu_real_host_page_size();
} }
#endif #endif

View File

@ -135,7 +135,7 @@ size_t bdrv_opt_mem_align(BlockDriverState *bs)
{ {
if (!bs || !bs->drv) { if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */ /* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size); return MAX(4096, qemu_real_host_page_size());
} }
IO_CODE(); IO_CODE();
@ -146,7 +146,7 @@ size_t bdrv_min_mem_align(BlockDriverState *bs)
{ {
if (!bs || !bs->drv) { if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */ /* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size); return MAX(4096, qemu_real_host_page_size());
} }
IO_CODE(); IO_CODE();

View File

@ -386,7 +386,7 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
{ {
BDRVRawState *s = bs->opaque; BDRVRawState *s = bs->opaque;
char *buf; char *buf;
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
size_t alignments[] = {1, 512, 1024, 2048, 4096}; size_t alignments[] = {1, 512, 1024, 2048, 4096};
/* For SCSI generic devices the alignment is not really used. /* For SCSI generic devices the alignment is not really used.
@ -1261,7 +1261,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
raw_probe_alignment(bs, s->fd, errp); raw_probe_alignment(bs, s->fd, errp);
bs->bl.min_mem_alignment = s->buf_align; bs->bl.min_mem_alignment = s->buf_align;
bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size); bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
/* /*
* Maximum transfers are best effort, so it is okay to ignore any * Maximum transfers are best effort, so it is okay to ignore any
@ -1886,7 +1886,7 @@ static int allocate_first_block(int fd, size_t max_size)
size_t write_size = (max_size < MAX_BLOCKSIZE) size_t write_size = (max_size < MAX_BLOCKSIZE)
? BDRV_SECTOR_SIZE ? BDRV_SECTOR_SIZE
: MAX_BLOCKSIZE; : MAX_BLOCKSIZE;
size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size); size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
void *buf; void *buf;
ssize_t n; ssize_t n;
int ret; int ret;

View File

@ -201,7 +201,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
if (!have_limits) { if (!have_limits) {
bs->bl.min_mem_alignment = 512; bs->bl.min_mem_alignment = 512;
bs->bl.opt_mem_alignment = qemu_real_host_page_size; bs->bl.opt_mem_alignment = qemu_real_host_page_size();
/* Safe default since most protocols use readv()/writev()/etc */ /* Safe default since most protocols use readv()/writev()/etc */
bs->bl.max_iov = IOV_MAX; bs->bl.max_iov = IOV_MAX;

View File

@ -169,9 +169,9 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
size_t bytes; size_t bytes;
int r; int r;
bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size); bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
q->head = q->tail = 0; q->head = q->tail = 0;
q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes); q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->queue) { if (!q->queue) {
error_setg(errp, "Cannot allocate queue"); error_setg(errp, "Cannot allocate queue");
return false; return false;
@ -232,8 +232,8 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
trace_nvme_create_queue_pair(idx, q, size, aio_context, trace_nvme_create_queue_pair(idx, q, size, aio_context,
event_notifier_get_fd(s->irq_notifier)); event_notifier_get_fd(s->irq_notifier));
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS, bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
qemu_real_host_page_size); qemu_real_host_page_size());
q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes); q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->prp_list_pages) { if (!q->prp_list_pages) {
error_setg(errp, "Cannot allocate PRP page list"); error_setg(errp, "Cannot allocate PRP page list");
goto fail; goto fail;
@ -533,9 +533,9 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
.opcode = NVME_ADM_CMD_IDENTIFY, .opcode = NVME_ADM_CMD_IDENTIFY,
.cdw10 = cpu_to_le32(0x1), .cdw10 = cpu_to_le32(0x1),
}; };
size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size); size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
id = qemu_try_memalign(qemu_real_host_page_size, id_size); id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
if (!id) { if (!id) {
error_setg(errp, "Cannot allocate buffer for identify response"); error_setg(errp, "Cannot allocate buffer for identify response");
goto out; goto out;
@ -1048,7 +1048,7 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
bool retry = true; bool retry = true;
uint64_t iova; uint64_t iova;
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len, size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
qemu_real_host_page_size); qemu_real_host_page_size());
try_map: try_map:
r = qemu_vfio_dma_map(s->vfio, r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base, qiov->iov[i].iov_base,
@ -1224,8 +1224,8 @@ static inline bool nvme_qiov_aligned(BlockDriverState *bs,
for (i = 0; i < qiov->niov; ++i) { for (i = 0; i < qiov->niov; ++i) {
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base, if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
qemu_real_host_page_size) || qemu_real_host_page_size()) ||
!QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) { !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base, trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
qiov->iov[i].iov_len, s->page_size); qiov->iov[i].iov_len, s->page_size);
return false; return false;
@ -1241,7 +1241,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
int r; int r;
QEMU_AUTO_VFREE uint8_t *buf = NULL; QEMU_AUTO_VFREE uint8_t *buf = NULL;
QEMUIOVector local_qiov; QEMUIOVector local_qiov;
size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size); size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
assert(QEMU_IS_ALIGNED(offset, s->page_size)); assert(QEMU_IS_ALIGNED(offset, s->page_size));
assert(QEMU_IS_ALIGNED(bytes, s->page_size)); assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer); assert(bytes <= s->max_transfer);
@ -1251,7 +1251,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
} }
s->stats.unaligned_accesses++; s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write); trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
buf = qemu_try_memalign(qemu_real_host_page_size, len); buf = qemu_try_memalign(qemu_real_host_page_size(), len);
if (!buf) { if (!buf) {
return -ENOMEM; return -ENOMEM;

View File

@ -870,7 +870,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
} }
} }
s->bat_dirty_block = 4 * qemu_real_host_page_size; s->bat_dirty_block = 4 * qemu_real_host_page_size();
s->bat_dirty_bmap = s->bat_dirty_bmap =
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block)); bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));

View File

@ -75,7 +75,7 @@ static void qcow2_cache_table_release(Qcow2Cache *c, int i, int num_tables)
/* Using MADV_DONTNEED to discard memory is a Linux-specific feature */ /* Using MADV_DONTNEED to discard memory is a Linux-specific feature */
#ifdef CONFIG_LINUX #ifdef CONFIG_LINUX
void *t = qcow2_cache_get_table_addr(c, i); void *t = qcow2_cache_get_table_addr(c, i);
int align = qemu_real_host_page_size; int align = qemu_real_host_page_size();
size_t mem_size = (size_t) c->table_size * num_tables; size_t mem_size = (size_t) c->table_size * num_tables;
size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t; size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align); size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);

View File

@ -246,7 +246,7 @@ static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
* patch target_mmap(), but it is more complicated as the file * patch target_mmap(), but it is more complicated as the file
* size must be known. * size must be known.
*/ */
if (qemu_real_host_page_size < qemu_host_page_size) { if (qemu_real_host_page_size() < qemu_host_page_size) {
abi_ulong end_addr, end_addr1; abi_ulong end_addr, end_addr1;
end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss); end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
end_addr = HOST_PAGE_ALIGN(elf_bss); end_addr = HOST_PAGE_ALIGN(elf_bss);

View File

@ -515,7 +515,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
* up to the targets page boundary. * up to the targets page boundary.
*/ */
if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) { if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
struct stat sb; struct stat sb;
if (fstat(fd, &sb) == -1) { if (fstat(fd, &sb) == -1) {

View File

@ -53,7 +53,7 @@ struct udmabuf_create {
static size_t static size_t
udmabuf_get_size(struct vugbm_buffer *buf) udmabuf_get_size(struct vugbm_buffer *buf)
{ {
return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size); return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size());
} }
static bool static bool

2
cpu.c
View File

@ -481,7 +481,7 @@ void page_size_init(void)
/* NOTE: we can always suppose that qemu_host_page_size >= /* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */ TARGET_PAGE_SIZE */
if (qemu_host_page_size == 0) { if (qemu_host_page_size == 0) {
qemu_host_page_size = qemu_real_host_page_size; qemu_host_page_size = qemu_real_host_page_size();
} }
if (qemu_host_page_size < TARGET_PAGE_SIZE) { if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE; qemu_host_page_size = TARGET_PAGE_SIZE;

View File

@ -320,7 +320,7 @@ static ram_addr_t qxl_rom_size(void)
#define QXL_ROM_SZ 8192 #define QXL_ROM_SZ 8192
QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ); QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ);
return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size); return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size());
} }
static void init_qxl_rom(PCIQXLDevice *d) static void init_qxl_rom(PCIQXLDevice *d)

View File

@ -24,7 +24,7 @@
#include "trace.h" #include "trace.h"
#include "qom/object.h" #include "qom/object.h"
#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size #define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size()
#define FLIC_FAILED (-1UL) #define FLIC_FAILED (-1UL)
#define FLIC_SAVEVM_VERSION 1 #define FLIC_SAVEVM_VERSION 1

View File

@ -622,9 +622,9 @@ static bool fw_cfg_acpi_mr_restore(void *opaque)
FWCfgState *s = opaque; FWCfgState *s = opaque;
bool mr_aligned; bool mr_aligned;
mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size) && mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size) && QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size); QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
return s->acpi_mr_restore && !mr_aligned; return s->acpi_mr_restore && !mr_aligned;
} }

View File

@ -456,7 +456,7 @@ static void ppc_core99_init(MachineState *machine)
} }
/* The NewWorld NVRAM is not located in the MacIO device */ /* The NewWorld NVRAM is not located in the MacIO device */
if (kvm_enabled() && qemu_real_host_page_size > 4096) { if (kvm_enabled() && qemu_real_host_page_size() > 4096) {
/* We can't combine read-write and read-only in a single page, so /* We can't combine read-write and read-only in a single page, so
move the NVRAM out of ROM again for KVM */ move the NVRAM out of ROM again for KVM */
nvram_addr = 0xFFE00000; nvram_addr = 0xFFE00000;

View File

@ -1978,7 +1978,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
* our memory slot is of page size granularity. * our memory slot is of page size granularity.
*/ */
if (kvm_enabled()) { if (kvm_enabled()) {
msi_window_size = qemu_real_host_page_size; msi_window_size = qemu_real_host_page_size();
} }
memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr, memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,

View File

@ -608,7 +608,7 @@ static void pvrdma_realize(PCIDevice *pdev, Error **errp)
rdma_info_report("Initializing device %s %x.%x", pdev->name, rdma_info_report("Initializing device %s %x.%x", pdev->name,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
if (TARGET_PAGE_SIZE != qemu_real_host_page_size) { if (TARGET_PAGE_SIZE != qemu_real_host_page_size()) {
error_setg(errp, "Target page size must be the same as host page size"); error_setg(errp, "Target page size must be the same as host page size");
return; return;
} }

View File

@ -183,7 +183,7 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
assert(max_transfer); assert(max_transfer);
max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size) max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size())
/ s->blocksize; / s->blocksize;
stl_be_p(&r->buf[8], max_transfer); stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */ /* Also take care of the opt xfer len. */

View File

@ -47,7 +47,7 @@ void tpm_ppi_reset(TPMPPI *tpmppi)
void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m, void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m,
hwaddr addr, Object *obj) hwaddr addr, Object *obj)
{ {
tpmppi->buf = qemu_memalign(qemu_real_host_page_size, tpmppi->buf = qemu_memalign(qemu_real_host_page_size(),
HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE)); HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE));
memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi", memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi",
TPM_PPI_ADDR_SIZE, tpmppi->buf); TPM_PPI_ADDR_SIZE, tpmppi->buf);

View File

@ -397,7 +397,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
{ {
struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap; struct vfio_bitmap *bitmap;
uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size; uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
int ret; int ret;
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
@ -414,7 +414,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
* to qemu_real_host_page_size. * to qemu_real_host_page_size.
*/ */
bitmap->pgsize = qemu_real_host_page_size; bitmap->pgsize = qemu_real_host_page_size();
bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE; BITS_PER_BYTE;
@ -882,8 +882,8 @@ static void vfio_listener_region_add(MemoryListener *listener,
} }
if (unlikely((section->offset_within_address_space & if (unlikely((section->offset_within_address_space &
~qemu_real_host_page_mask) != ~qemu_real_host_page_mask()) !=
(section->offset_within_region & ~qemu_real_host_page_mask))) { (section->offset_within_region & ~qemu_real_host_page_mask()))) {
error_report("%s received unaligned region", __func__); error_report("%s received unaligned region", __func__);
return; return;
} }
@ -891,7 +891,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size); llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
if (int128_ge(int128_make64(iova), llend)) { if (int128_ge(int128_make64(iova), llend)) {
if (memory_region_is_ram_device(section->mr)) { if (memory_region_is_ram_device(section->mr)) {
@ -899,7 +899,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
memory_region_name(section->mr), memory_region_name(section->mr),
section->offset_within_address_space, section->offset_within_address_space,
int128_getlo(section->size), int128_getlo(section->size),
qemu_real_host_page_size); qemu_real_host_page_size());
} }
return; return;
} }
@ -1118,8 +1118,8 @@ static void vfio_listener_region_del(MemoryListener *listener,
} }
if (unlikely((section->offset_within_address_space & if (unlikely((section->offset_within_address_space &
~qemu_real_host_page_mask) != ~qemu_real_host_page_mask()) !=
(section->offset_within_region & ~qemu_real_host_page_mask))) { (section->offset_within_region & ~qemu_real_host_page_mask()))) {
error_report("%s received unaligned region", __func__); error_report("%s received unaligned region", __func__);
return; return;
} }
@ -1150,7 +1150,7 @@ static void vfio_listener_region_del(MemoryListener *listener,
iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space); iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size); llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
if (int128_ge(int128_make64(iova), llend)) { if (int128_ge(int128_make64(iova), llend)) {
return; return;
@ -1272,9 +1272,9 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
* qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* to qemu_real_host_page_size. * to qemu_real_host_page_size.
*/ */
range->bitmap.pgsize = qemu_real_host_page_size; range->bitmap.pgsize = qemu_real_host_page_size();
pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size; pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size();
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE; BITS_PER_BYTE;
range->bitmap.data = g_try_malloc0(range->bitmap.size); range->bitmap.data = g_try_malloc0(range->bitmap.size);
@ -1970,7 +1970,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. * qemu_real_host_page_size to mark those dirty.
*/ */
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) { if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
container->dirty_pages_supported = true; container->dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap; container->dirty_pgsizes = cap_mig->pgsize_bitmap;

View File

@ -1087,8 +1087,8 @@ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
/* If BAR is mapped and page aligned, update to fill PAGE_SIZE */ /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
if (bar_addr != PCI_BAR_UNMAPPED && if (bar_addr != PCI_BAR_UNMAPPED &&
!(bar_addr & ~qemu_real_host_page_mask)) { !(bar_addr & ~qemu_real_host_page_mask())) {
size = qemu_real_host_page_size; size = qemu_real_host_page_size();
} }
memory_region_transaction_begin(); memory_region_transaction_begin();
@ -1204,7 +1204,7 @@ void vfio_pci_write_config(PCIDevice *pdev,
for (bar = 0; bar < PCI_ROM_SLOT; bar++) { for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
if (old_addr[bar] != pdev->io_regions[bar].addr && if (old_addr[bar] != pdev->io_regions[bar].addr &&
vdev->bars[bar].region.size > 0 && vdev->bars[bar].region.size > 0 &&
vdev->bars[bar].region.size < qemu_real_host_page_size) { vdev->bars[bar].region.size < qemu_real_host_page_size()) {
vfio_sub_page_bar_update_mapping(pdev, bar); vfio_sub_page_bar_update_mapping(pdev, bar);
} }
} }
@ -1292,7 +1292,7 @@ static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
} }
/* MSI-X table start and end aligned to host page size */ /* MSI-X table start and end aligned to host page size */
start = vdev->msix->table_offset & qemu_real_host_page_mask; start = vdev->msix->table_offset & qemu_real_host_page_mask();
end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
(vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
@ -2478,7 +2478,7 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f)
*/ */
if (old_addr[bar] != pdev->io_regions[bar].addr && if (old_addr[bar] != pdev->io_regions[bar].addr &&
vdev->bars[bar].region.size > 0 && vdev->bars[bar].region.size > 0 &&
vdev->bars[bar].region.size < qemu_real_host_page_size) { vdev->bars[bar].region.size < qemu_real_host_page_size()) {
vfio_sub_page_bar_update_mapping(pdev, bar); vfio_sub_page_bar_update_mapping(pdev, bar);
} }
} }

View File

@ -44,7 +44,7 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener,
const hwaddr gpa = section->offset_within_address_space; const hwaddr gpa = section->offset_within_address_space;
hwaddr end; hwaddr end;
int ret; int ret;
hwaddr page_mask = qemu_real_host_page_mask; hwaddr page_mask = qemu_real_host_page_mask();
struct vfio_iommu_spapr_register_memory reg = { struct vfio_iommu_spapr_register_memory reg = {
.argsz = sizeof(reg), .argsz = sizeof(reg),
.flags = 0, .flags = 0,
@ -102,7 +102,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener,
const hwaddr gpa = section->offset_within_address_space; const hwaddr gpa = section->offset_within_address_space;
hwaddr end; hwaddr end;
int ret; int ret;
hwaddr page_mask = qemu_real_host_page_mask; hwaddr page_mask = qemu_real_host_page_mask();
struct vfio_iommu_spapr_register_memory reg = { struct vfio_iommu_spapr_register_memory reg = {
.argsz = sizeof(reg), .argsz = sizeof(reg),
.flags = 0, .flags = 0,
@ -199,12 +199,12 @@ int vfio_spapr_create_window(VFIOContainer *container,
* Below we look at qemu_real_host_page_size as TCEs are allocated from * Below we look at qemu_real_host_page_size as TCEs are allocated from
* system pages. * system pages.
*/ */
bits_per_level = ctz64(qemu_real_host_page_size) + 8; bits_per_level = ctz64(qemu_real_host_page_size()) + 8;
create.levels = bits_total / bits_per_level; create.levels = bits_total / bits_per_level;
if (bits_total % bits_per_level) { if (bits_total % bits_per_level) {
++create.levels; ++create.levels;
} }
max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size); max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size());
for ( ; create.levels <= max_levels; ++create.levels) { for ( ; create.levels <= max_levels; ++create.levels) {
ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
if (!ret) { if (!ret) {

View File

@ -11,7 +11,7 @@
#include "qemu/iova-tree.h" #include "qemu/iova-tree.h"
#include "vhost-iova-tree.h" #include "vhost-iova-tree.h"
#define iova_min_addr qemu_real_host_page_size #define iova_min_addr qemu_real_host_page_size()
/** /**
* VhostIOVATree, able to: * VhostIOVATree, able to:
@ -86,7 +86,7 @@ const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree,
int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map) int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map)
{ {
/* Some vhost devices do not like addr 0. Skip first page */ /* Some vhost devices do not like addr 0. Skip first page */
hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size; hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size();
if (map->translated_addr + map->size < map->translated_addr || if (map->translated_addr + map->size < map->translated_addr ||
map->perm == IOMMU_NONE) { map->perm == IOMMU_NONE) {

View File

@ -471,14 +471,14 @@ size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq)
size_t avail_size = offsetof(vring_avail_t, ring) + size_t avail_size = offsetof(vring_avail_t, ring) +
sizeof(uint16_t) * svq->vring.num; sizeof(uint16_t) * svq->vring.num;
return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size); return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
} }
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq) size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
{ {
size_t used_size = offsetof(vring_used_t, ring) + size_t used_size = offsetof(vring_used_t, ring) +
sizeof(vring_used_elem_t) * svq->vring.num; sizeof(vring_used_elem_t) * svq->vring.num;
return ROUND_UP(used_size, qemu_real_host_page_size); return ROUND_UP(used_size, qemu_real_host_page_size());
} }
/** /**
@ -533,11 +533,11 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
driver_size = vhost_svq_driver_area_size(svq); driver_size = vhost_svq_driver_area_size(svq);
device_size = vhost_svq_device_area_size(svq); device_size = vhost_svq_device_area_size(svq);
svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size); svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
desc_size = sizeof(vring_desc_t) * svq->vring.num; desc_size = sizeof(vring_desc_t) * svq->vring.num;
svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size); svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
memset(svq->vring.desc, 0, driver_size); memset(svq->vring.desc, 0, driver_size);
svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size); svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
memset(svq->vring.used, 0, device_size); memset(svq->vring.used, 0, device_size);
svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num); svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) { for (unsigned i = 0; i < svq->vring.num - 1; i++) {

View File

@ -1166,7 +1166,7 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev,
static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
{ {
assert(n && n->unmap_addr); assert(n && n->unmap_addr);
munmap(n->unmap_addr, qemu_real_host_page_size); munmap(n->unmap_addr, qemu_real_host_page_size());
n->unmap_addr = NULL; n->unmap_addr = NULL;
} }
@ -1503,7 +1503,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
int fd) int fd)
{ {
int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
size_t page_size = qemu_real_host_page_size; size_t page_size = qemu_real_host_page_size();
struct vhost_user *u = dev->opaque; struct vhost_user *u = dev->opaque;
VhostUserState *user = u->user; VhostUserState *user = u->user;
VirtIODevice *vdev = dev->vdev; VirtIODevice *vdev = dev->vdev;

View File

@ -468,7 +468,7 @@ err:
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
int queue_index) int queue_index)
{ {
size_t page_size = qemu_real_host_page_size; size_t page_size = qemu_real_host_page_size();
struct vhost_vdpa *v = dev->opaque; struct vhost_vdpa *v = dev->opaque;
VirtIODevice *vdev = dev->vdev; VirtIODevice *vdev = dev->vdev;
VhostVDPAHostNotifier *n; VhostVDPAHostNotifier *n;
@ -485,7 +485,7 @@ static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
{ {
size_t page_size = qemu_real_host_page_size; size_t page_size = qemu_real_host_page_size();
struct vhost_vdpa *v = dev->opaque; struct vhost_vdpa *v = dev->opaque;
VirtIODevice *vdev = dev->vdev; VirtIODevice *vdev = dev->vdev;
VhostVDPAHostNotifier *n; VhostVDPAHostNotifier *n;
@ -875,7 +875,7 @@ static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
return false; return false;
} }
size = ROUND_UP(result->size, qemu_real_host_page_size); size = ROUND_UP(result->size, qemu_real_host_page_size());
r = vhost_vdpa_dma_unmap(v, result->iova, size); r = vhost_vdpa_dma_unmap(v, result->iova, size);
return r == 0; return r == 0;
} }

View File

@ -53,11 +53,11 @@ static uint32_t virtio_mem_default_thp_size(void)
#if defined(__x86_64__) || defined(__arm__) || defined(__powerpc64__) #if defined(__x86_64__) || defined(__arm__) || defined(__powerpc64__)
default_thp_size = 2 * MiB; default_thp_size = 2 * MiB;
#elif defined(__aarch64__) #elif defined(__aarch64__)
if (qemu_real_host_page_size == 4 * KiB) { if (qemu_real_host_page_size() == 4 * KiB) {
default_thp_size = 2 * MiB; default_thp_size = 2 * MiB;
} else if (qemu_real_host_page_size == 16 * KiB) { } else if (qemu_real_host_page_size() == 16 * KiB) {
default_thp_size = 32 * MiB; default_thp_size = 32 * MiB;
} else if (qemu_real_host_page_size == 64 * KiB) { } else if (qemu_real_host_page_size() == 64 * KiB) {
default_thp_size = 512 * MiB; default_thp_size = 512 * MiB;
} }
#endif #endif
@ -120,7 +120,7 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb)
const uint64_t page_size = qemu_ram_pagesize(rb); const uint64_t page_size = qemu_ram_pagesize(rb);
/* We can have hugetlbfs with a page size smaller than the THP size. */ /* We can have hugetlbfs with a page size smaller than the THP size. */
if (page_size == qemu_real_host_page_size) { if (page_size == qemu_real_host_page_size()) {
return MAX(page_size, virtio_mem_thp_size()); return MAX(page_size, virtio_mem_thp_size());
} }
return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE); return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE);
@ -135,7 +135,7 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
* fresh page, consuming actual memory. * fresh page, consuming actual memory.
*/ */
return !qemu_ram_is_shared(rb) && rb->fd < 0 && return !qemu_ram_is_shared(rb) && rb->fd < 0 &&
qemu_ram_pagesize(rb) == qemu_real_host_page_size; qemu_ram_pagesize(rb) == qemu_real_host_page_size();
} }
#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ #endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */

View File

@ -26,7 +26,7 @@ extern uintptr_t qemu_host_page_size;
extern intptr_t qemu_host_page_mask; extern intptr_t qemu_host_page_mask;
#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size) #define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size) #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
void qemu_init_cpu_list(void); void qemu_init_cpu_list(void);

View File

@ -343,7 +343,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
hwaddr addr; hwaddr addr;
ram_addr_t ram_addr; ram_addr_t ram_addr;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE; unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */ /* start address is aligned at the start of a word? */

View File

@ -431,9 +431,9 @@ extern int madvise(char *, size_t, int);
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096) # define QEMU_VMALLOC_ALIGN (256 * 4096)
#elif defined(__linux__) && defined(__sparc__) #elif defined(__linux__) && defined(__sparc__)
# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size, SHMLBA) # define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA)
#else #else
# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size # define QEMU_VMALLOC_ALIGN qemu_real_host_page_size()
#endif #endif
#ifdef CONFIG_POSIX #ifdef CONFIG_POSIX
@ -590,8 +590,15 @@ pid_t qemu_fork(Error **errp);
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
* when intptr_t is 32-bit and we are aligning a long long. * when intptr_t is 32-bit and we are aligning a long long.
*/ */
extern uintptr_t qemu_real_host_page_size; static inline uintptr_t qemu_real_host_page_size(void)
extern intptr_t qemu_real_host_page_mask; {
return getpagesize();
}
static inline intptr_t qemu_real_host_page_mask(void)
{
return -(intptr_t)qemu_real_host_page_size();
}
/* /*
* After using getopt or getopt_long, if you need to parse another set * After using getopt or getopt_long, if you need to parse another set

View File

@ -1916,8 +1916,8 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
size = STACK_LOWER_LIMIT; size = STACK_LOWER_LIMIT;
} }
guard = TARGET_PAGE_SIZE; guard = TARGET_PAGE_SIZE;
if (guard < qemu_real_host_page_size) { if (guard < qemu_real_host_page_size()) {
guard = qemu_real_host_page_size; guard = qemu_real_host_page_size();
} }
error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE, error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,

View File

@ -494,7 +494,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
may need to truncate file maps at EOF and add extra anonymous pages may need to truncate file maps at EOF and add extra anonymous pages
up to the targets page boundary. */ up to the targets page boundary. */
if ((qemu_real_host_page_size < qemu_host_page_size) && if ((qemu_real_host_page_size() < qemu_host_page_size) &&
!(flags & MAP_ANONYMOUS)) { !(flags & MAP_ANONYMOUS)) {
struct stat sb; struct stat sb;

View File

@ -2652,7 +2652,7 @@ static struct rp_cmd_args {
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
ram_addr_t start, size_t len) ram_addr_t start, size_t len)
{ {
long our_host_ps = qemu_real_host_page_size; long our_host_ps = qemu_real_host_page_size();
trace_migrate_handle_rp_req_pages(rbname, start, len); trace_migrate_handle_rp_req_pages(rbname, start, len);

View File

@ -319,7 +319,7 @@ static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis)
return false; return false;
} }
if (qemu_real_host_page_size != ram_pagesize_summary()) { if (qemu_real_host_page_size() != ram_pagesize_summary()) {
bool have_hp = false; bool have_hp = false;
/* We've got a huge page */ /* We've got a huge page */
#ifdef UFFD_FEATURE_MISSING_HUGETLBFS #ifdef UFFD_FEATURE_MISSING_HUGETLBFS
@ -357,7 +357,7 @@ static int test_ramblock_postcopiable(RAMBlock *rb, void *opaque)
*/ */
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{ {
long pagesize = qemu_real_host_page_size; long pagesize = qemu_real_host_page_size();
int ufd = -1; int ufd = -1;
bool ret = false; /* Error unless we change it */ bool ret = false; /* Error unless we change it */
void *testarea = NULL; void *testarea = NULL;

View File

@ -720,7 +720,7 @@ static uint64_t vtop(void *ptr, Error **errp)
uint64_t pinfo; uint64_t pinfo;
uint64_t ret = -1; uint64_t ret = -1;
uintptr_t addr = (uintptr_t) ptr; uintptr_t addr = (uintptr_t) ptr;
uintptr_t pagesize = qemu_real_host_page_size; uintptr_t pagesize = qemu_real_host_page_size();
off_t offset = addr / pagesize * sizeof(pinfo); off_t offset = addr / pagesize * sizeof(pinfo);
int fd; int fd;

View File

@ -2974,10 +2974,10 @@ sub process {
ERROR("use memset() instead of bzero()\n" . $herecurr); ERROR("use memset() instead of bzero()\n" . $herecurr);
} }
if ($line =~ /\bgetpagesize\(\)/) { if ($line =~ /\bgetpagesize\(\)/) {
ERROR("use qemu_real_host_page_size instead of getpagesize()\n" . $herecurr); ERROR("use qemu_real_host_page_size() instead of getpagesize()\n" . $herecurr);
} }
if ($line =~ /\bsysconf\(_SC_PAGESIZE\)/) { if ($line =~ /\bsysconf\(_SC_PAGESIZE\)/) {
ERROR("use qemu_real_host_page_size instead of sysconf(_SC_PAGESIZE)\n" . $herecurr); ERROR("use qemu_real_host_page_size() instead of sysconf(_SC_PAGESIZE)\n" . $herecurr);
} }
my $non_exit_glib_asserts = qr{g_assert_cmpstr| my $non_exit_glib_asserts = qr{g_assert_cmpstr|
g_assert_cmpint| g_assert_cmpint|

View File

@ -1383,11 +1383,11 @@ long qemu_maxrampagesize(void)
#else #else
long qemu_minrampagesize(void) long qemu_minrampagesize(void)
{ {
return qemu_real_host_page_size; return qemu_real_host_page_size();
} }
long qemu_maxrampagesize(void) long qemu_maxrampagesize(void)
{ {
return qemu_real_host_page_size; return qemu_real_host_page_size();
} }
#endif #endif
@ -2163,7 +2163,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
new_block->max_length = max_size; new_block->max_length = max_size;
assert(max_size >= size); assert(max_size >= size);
new_block->fd = -1; new_block->fd = -1;
new_block->page_size = qemu_real_host_page_size; new_block->page_size = qemu_real_host_page_size();
new_block->host = host; new_block->host = host;
new_block->flags = ram_flags; new_block->flags = ram_flags;
ram_block_add(new_block, &local_err); ram_block_add(new_block, &local_err);

View File

@ -188,15 +188,15 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
/* Adjust start_pa and size so that they are page-aligned. (Cf /* Adjust start_pa and size so that they are page-aligned. (Cf
* kvm_set_phys_mem() in kvm-all.c). * kvm_set_phys_mem() in kvm-all.c).
*/ */
delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
delta &= ~qemu_real_host_page_mask; delta &= ~qemu_real_host_page_mask();
if (delta > size) { if (delta > size) {
return; return;
} }
start_pa += delta; start_pa += delta;
size -= delta; size -= delta;
size &= qemu_real_host_page_mask; size &= qemu_real_host_page_mask();
if (!size || (start_pa & ~qemu_real_host_page_mask)) { if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return; return;
} }
@ -214,7 +214,7 @@ static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
* call into the kernel. Instead, we split the mapping into smaller ones, * call into the kernel. Instead, we split the mapping into smaller ones,
* and call hax_update_mapping() on each. * and call hax_update_mapping() on each.
*/ */
max_mapping_size = UINT32_MAX & qemu_real_host_page_mask; max_mapping_size = UINT32_MAX & qemu_real_host_page_mask();
while (size > max_mapping_size) { while (size > max_mapping_size) {
hax_update_mapping(start_pa, max_mapping_size, host_va, flags); hax_update_mapping(start_pa, max_mapping_size, host_va, flags);
start_pa += max_mapping_size; start_pa += max_mapping_size;

View File

@ -1075,15 +1075,15 @@ nvmm_process_section(MemoryRegionSection *section, int add)
} }
/* Adjust start_pa and size so that they are page-aligned. */ /* Adjust start_pa and size so that they are page-aligned. */
delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
delta &= ~qemu_real_host_page_mask; delta &= ~qemu_real_host_page_mask();
if (delta > size) { if (delta > size) {
return; return;
} }
start_pa += delta; start_pa += delta;
size -= delta; size -= delta;
size &= qemu_real_host_page_mask; size &= qemu_real_host_page_mask();
if (!size || (start_pa & ~qemu_real_host_page_mask)) { if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return; return;
} }

View File

@ -1572,15 +1572,15 @@ static void whpx_process_section(MemoryRegionSection *section, int add)
return; return;
} }
delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask); delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
delta &= ~qemu_real_host_page_mask; delta &= ~qemu_real_host_page_mask();
if (delta > size) { if (delta > size) {
return; return;
} }
start_pa += delta; start_pa += delta;
size -= delta; size -= delta;
size &= qemu_real_host_page_mask; size &= qemu_real_host_page_mask();
if (!size || (start_pa & ~qemu_real_host_page_mask)) { if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return; return;
} }

View File

@ -418,7 +418,7 @@ void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
* will be a normal mapping, not a special hugepage one used * will be a normal mapping, not a special hugepage one used
* for RAM. * for RAM.
*/ */
if (qemu_real_host_page_size < 0x10000) { if (qemu_real_host_page_size() < 0x10000) {
error_setg(errp, error_setg(errp,
"KVM can't supply 64kiB CI pages, which guest expects"); "KVM can't supply 64kiB CI pages, which guest expects");
} }

View File

@ -488,14 +488,14 @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
/* page-align the beginning and end of the buffer */ /* page-align the beginning and end of the buffer */
buf = static_code_gen_buffer; buf = static_code_gen_buffer;
end = static_code_gen_buffer + sizeof(static_code_gen_buffer); end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size); buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size());
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size); end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size());
size = end - buf; size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */ /* Honor a command-line option limiting the size of the buffer. */
if (size > tb_size) { if (size > tb_size) {
size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size); size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size());
} }
region.start_aligned = buf; region.start_aligned = buf;
@ -729,7 +729,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
*/ */
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
{ {
const size_t page_size = qemu_real_host_page_size; const size_t page_size = qemu_real_host_page_size();
size_t region_size; size_t region_size;
int have_prot, need_prot; int have_prot, need_prot;

View File

@ -468,8 +468,8 @@ vubr_queue_set_started(VuDev *dev, int qidx, bool started)
if (started && vubr->notifier.fd >= 0) { if (started && vubr->notifier.fd >= 0) {
vu_set_queue_host_notifier(dev, vq, vubr->notifier.fd, vu_set_queue_host_notifier(dev, vq, vubr->notifier.fd,
qemu_real_host_page_size, qemu_real_host_page_size(),
qidx * qemu_real_host_page_size); qidx * qemu_real_host_page_size());
} }
if (qidx % 2 == 1) { if (qidx % 2 == 1) {
@ -601,7 +601,7 @@ static void *notifier_thread(void *arg)
{ {
VuDev *dev = (VuDev *)arg; VuDev *dev = (VuDev *)arg;
VubrDev *vubr = container_of(dev, VubrDev, vudev); VubrDev *vubr = container_of(dev, VubrDev, vudev);
int pagesize = qemu_real_host_page_size; int pagesize = qemu_real_host_page_size();
int qidx; int qidx;
while (true) { while (true) {
@ -637,7 +637,7 @@ vubr_host_notifier_setup(VubrDev *dev)
void *addr; void *addr;
int fd; int fd;
length = qemu_real_host_page_size * VHOST_USER_BRIDGE_MAX_QUEUES; length = qemu_real_host_page_size() * VHOST_USER_BRIDGE_MAX_QUEUES;
fd = mkstemp(template); fd = mkstemp(template);
if (fd < 0) { if (fd < 0) {

View File

@ -175,7 +175,7 @@ int qemu_fdatasync(int fd)
int qemu_msync(void *addr, size_t length, int fd) int qemu_msync(void *addr, size_t length, int fd)
{ {
#ifdef CONFIG_POSIX #ifdef CONFIG_POSIX
size_t align_mask = ~(qemu_real_host_page_size - 1); size_t align_mask = ~(qemu_real_host_page_size() - 1);
/** /**
* There are no strict reqs as per the length of mapping * There are no strict reqs as per the length of mapping
@ -183,7 +183,7 @@ int qemu_msync(void *addr, size_t length, int fd)
* alignment changes. Additionally - round the size to the multiple * alignment changes. Additionally - round the size to the multiple
* of PAGE_SIZE * of PAGE_SIZE
*/ */
length += ((uintptr_t)addr & (qemu_real_host_page_size - 1)); length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1));
length = (length + ~align_mask) & align_mask; length = (length + ~align_mask) & align_mask;
addr = (void *)((uintptr_t)addr & align_mask); addr = (void *)((uintptr_t)addr & align_mask);

View File

@ -42,7 +42,6 @@ if have_membarrier
util_ss.add(files('sys_membarrier.c')) util_ss.add(files('sys_membarrier.c'))
endif endif
util_ss.add(files('log.c')) util_ss.add(files('log.c'))
util_ss.add(files('pagesize.c'))
util_ss.add(files('qdist.c')) util_ss.add(files('qdist.c'))
util_ss.add(files('qht.c')) util_ss.add(files('qht.c'))
util_ss.add(files('qsp.c')) util_ss.add(files('qsp.c'))

View File

@ -50,7 +50,7 @@ size_t qemu_fd_getpagesize(int fd)
#endif #endif
#endif #endif
return qemu_real_host_page_size; return qemu_real_host_page_size();
} }
size_t qemu_mempath_getpagesize(const char *mem_path) size_t qemu_mempath_getpagesize(const char *mem_path)
@ -81,7 +81,7 @@ size_t qemu_mempath_getpagesize(const char *mem_path)
#endif #endif
#endif #endif
return qemu_real_host_page_size; return qemu_real_host_page_size();
} }
#define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory" #define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory"
@ -101,7 +101,7 @@ static bool map_noreserve_effective(int fd, uint32_t qemu_map_flags)
* MAP_NORESERVE. * MAP_NORESERVE.
* b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory. * b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory.
*/ */
if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size) { if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size()) {
return true; return true;
} }
@ -166,7 +166,7 @@ static void *mmap_reserve(size_t size, int fd)
* We do this unless we are using the system page size, in which case * We do this unless we are using the system page size, in which case
* anonymous memory is OK. * anonymous memory is OK.
*/ */
if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) { if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size()) {
fd = -1; fd = -1;
flags |= MAP_ANONYMOUS; flags |= MAP_ANONYMOUS;
} else { } else {
@ -243,7 +243,7 @@ static inline size_t mmap_guard_pagesize(int fd)
/* Mappings in the same segment must share the same page size */ /* Mappings in the same segment must share the same page size */
return qemu_fd_getpagesize(fd); return qemu_fd_getpagesize(fd);
#else #else
return qemu_real_host_page_size; return qemu_real_host_page_size();
#endif #endif
} }

View File

@ -69,8 +69,8 @@ int qemu_madvise(void *addr, size_t len, int advice)
static int qemu_mprotect__osdep(void *addr, size_t size, int prot) static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
{ {
g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask)); g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask()));
g_assert(!(size & ~qemu_real_host_page_mask)); g_assert(!(size & ~qemu_real_host_page_mask()));
#ifdef _WIN32 #ifdef _WIN32
DWORD old_protect; DWORD old_protect;

View File

@ -767,7 +767,7 @@ void *qemu_alloc_stack(size_t *sz)
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
void *ptr2; void *ptr2;
#endif #endif
size_t pagesz = qemu_real_host_page_size; size_t pagesz = qemu_real_host_page_size();
#ifdef _SC_THREAD_STACK_MIN #ifdef _SC_THREAD_STACK_MIN
/* avoid stacks smaller than _SC_THREAD_STACK_MIN */ /* avoid stacks smaller than _SC_THREAD_STACK_MIN */
long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN); long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
@ -829,7 +829,7 @@ void qemu_free_stack(void *stack, size_t sz)
unsigned int usage; unsigned int usage;
void *ptr; void *ptr;
for (ptr = stack + qemu_real_host_page_size; ptr < stack + sz; for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz;
ptr += sizeof(uint32_t)) { ptr += sizeof(uint32_t)) {
if (*(uint32_t *)ptr != 0xdeadbeaf) { if (*(uint32_t *)ptr != 0xdeadbeaf) {
break; break;
@ -927,10 +927,10 @@ size_t qemu_get_host_physmem(void)
#ifdef _SC_PHYS_PAGES #ifdef _SC_PHYS_PAGES
long pages = sysconf(_SC_PHYS_PAGES); long pages = sysconf(_SC_PHYS_PAGES);
if (pages > 0) { if (pages > 0) {
if (pages > SIZE_MAX / qemu_real_host_page_size) { if (pages > SIZE_MAX / qemu_real_host_page_size()) {
return SIZE_MAX; return SIZE_MAX;
} else { } else {
return pages * qemu_real_host_page_size; return pages * qemu_real_host_page_size();
} }
} }
#endif #endif

View File

@ -319,7 +319,7 @@ void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
Error **errp) Error **errp)
{ {
int i; int i;
size_t pagesize = qemu_real_host_page_size; size_t pagesize = qemu_real_host_page_size();
memory = (memory + pagesize - 1) & -pagesize; memory = (memory + pagesize - 1) & -pagesize;
for (i = 0; i < memory / pagesize; i++) { for (i = 0; i < memory / pagesize; i++) {

View File

@ -1,18 +0,0 @@
/*
* pagesize.c - query the host about its page size
*
* Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
uintptr_t qemu_real_host_page_size;
intptr_t qemu_real_host_page_mask;
static void __attribute__((constructor)) init_real_host_page_size(void)
{
qemu_real_host_page_size = getpagesize();
qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
}

View File

@ -163,7 +163,7 @@ void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
Error **errp) Error **errp)
{ {
void *p; void *p;
assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size()));
assert_bar_index_valid(s, index); assert_bar_index_valid(s, index);
p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset), p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset),
prot, MAP_SHARED, prot, MAP_SHARED,
@ -591,9 +591,9 @@ static IOVAMapping *qemu_vfio_add_mapping(QEMUVFIOState *s,
IOVAMapping m = {.host = host, .size = size, .iova = iova}; IOVAMapping m = {.host = host, .size = size, .iova = iova};
IOVAMapping *insert; IOVAMapping *insert;
assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size()));
assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size()));
assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size()));
trace_qemu_vfio_new_mapping(s, host, size, index, iova); trace_qemu_vfio_new_mapping(s, host, size, index, iova);
assert(index >= 0); assert(index >= 0);
@ -644,7 +644,7 @@ static void qemu_vfio_undo_mapping(QEMUVFIOState *s, IOVAMapping *mapping,
index = mapping - s->mappings; index = mapping - s->mappings;
assert(mapping->size > 0); assert(mapping->size > 0);
assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size()));
assert(index >= 0 && index < s->nr_mappings); assert(index >= 0 && index < s->nr_mappings);
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) { if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
error_setg_errno(errp, errno, "VFIO_UNMAP_DMA failed"); error_setg_errno(errp, errno, "VFIO_UNMAP_DMA failed");
@ -752,8 +752,8 @@ int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
IOVAMapping *mapping; IOVAMapping *mapping;
uint64_t iova0; uint64_t iova0;
assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size)); assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size()));
assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size)); assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size()));
trace_qemu_vfio_dma_map(s, host, size, temporary, iova); trace_qemu_vfio_dma_map(s, host, size, temporary, iova);
QEMU_LOCK_GUARD(&s->lock); QEMU_LOCK_GUARD(&s->lock);
mapping = qemu_vfio_find_mapping(s, host, &index); mapping = qemu_vfio_find_mapping(s, host, &index);