vfio: Support host translation granule size

The cpu_physical_memory_set_dirty_lebitmap() can quickly deal with
the dirty pages of memory by bitmap-traveling, regardless of whether
the bitmap is aligned correctly or not.

cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
host page size. So it'd better to set bitmap_pgsize to host page size
to support more translation granule sizes.

[aw: The Fixes commit below introduced code to restrict migration
support to configurations where the target page size intersects the
host dirty page support.  For example, a 4K guest on a 4K host.
Due to the above flexibility in bitmap handling, this restriction
unnecessarily prevents mixed target/host pages size that could
otherwise be supported.  Use host page size for dirty bitmap.]

Fixes: 87ea529c50 ("vfio: Get migration capability flags for container")
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
Message-Id: <20210304133446.1521-1-jiangkunkun@huawei.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Kunkun Jiang 2021-03-04 21:34:46 +08:00 committed by Alex Williamson
parent ecebe53fe9
commit 1eb7f64275
1 changed files with 25 additions and 23 deletions

View File

@ -378,7 +378,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
{ {
struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap; struct vfio_bitmap *bitmap;
uint64_t pages = TARGET_PAGE_ALIGN(size) >> TARGET_PAGE_BITS; uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
int ret; int ret;
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
@ -390,12 +390,12 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container,
bitmap = (struct vfio_bitmap *)&unmap->data; bitmap = (struct vfio_bitmap *)&unmap->data;
/* /*
* cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap_pgsize to * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* TARGET_PAGE_SIZE. * to qemu_real_host_page_size.
*/ */
bitmap->pgsize = TARGET_PAGE_SIZE; bitmap->pgsize = qemu_real_host_page_size;
bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE; BITS_PER_BYTE;
@ -674,16 +674,17 @@ static void vfio_listener_region_add(MemoryListener *listener,
return; return;
} }
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != if (unlikely((section->offset_within_address_space &
(section->offset_within_region & ~TARGET_PAGE_MASK))) { ~qemu_real_host_page_mask) !=
(section->offset_within_region & ~qemu_real_host_page_mask))) {
error_report("%s received unaligned region", __func__); error_report("%s received unaligned region", __func__);
return; return;
} }
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size); llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
if (int128_ge(int128_make64(iova), llend)) { if (int128_ge(int128_make64(iova), llend)) {
return; return;
@ -892,8 +893,9 @@ static void vfio_listener_region_del(MemoryListener *listener,
return; return;
} }
if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != if (unlikely((section->offset_within_address_space &
(section->offset_within_region & ~TARGET_PAGE_MASK))) { ~qemu_real_host_page_mask) !=
(section->offset_within_region & ~qemu_real_host_page_mask))) {
error_report("%s received unaligned region", __func__); error_report("%s received unaligned region", __func__);
return; return;
} }
@ -921,10 +923,10 @@ static void vfio_listener_region_del(MemoryListener *listener,
*/ */
} }
iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space); llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size); llend = int128_add(llend, section->size);
llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
if (int128_ge(int128_make64(iova), llend)) { if (int128_ge(int128_make64(iova), llend)) {
return; return;
@ -1004,13 +1006,13 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
range->size = size; range->size = size;
/* /*
* cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* TARGET_PAGE_SIZE to mark those dirty. Hence set bitmap's pgsize to * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* TARGET_PAGE_SIZE. * to qemu_real_host_page_size.
*/ */
range->bitmap.pgsize = TARGET_PAGE_SIZE; range->bitmap.pgsize = qemu_real_host_page_size;
pages = TARGET_PAGE_ALIGN(range->size) >> TARGET_PAGE_BITS; pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE; BITS_PER_BYTE;
range->bitmap.data = g_try_malloc0(range->bitmap.size); range->bitmap.data = g_try_malloc0(range->bitmap.size);
@ -1114,8 +1116,8 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
section->offset_within_region; section->offset_within_region;
return vfio_get_dirty_bitmap(container, return vfio_get_dirty_bitmap(container,
TARGET_PAGE_ALIGN(section->offset_within_address_space), REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
int128_get64(section->size), ram_addr); int128_get64(section->size), ram_addr);
} }
static void vfio_listener_log_sync(MemoryListener *listener, static void vfio_listener_log_sync(MemoryListener *listener,
@ -1655,10 +1657,10 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
header); header);
/* /*
* cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* TARGET_PAGE_SIZE to mark those dirty. * qemu_real_host_page_size to mark those dirty.
*/ */
if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) { if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
container->dirty_pages_supported = true; container->dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap; container->dirty_pgsizes = cap_mig->pgsize_bitmap;