vfio/container: Switch to IOMMU BE set_dirty_page_tracking/query_dirty_bitmap API

dirty_pages_supported field is also moved to the base container

No functional change intended.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
This commit is contained in:
Eric Auger 2023-11-02 15:12:33 +08:00 committed by Cédric Le Goater
parent e559706338
commit bb424490ed
5 changed files with 44 additions and 17 deletions

View File

@ -1079,7 +1079,8 @@ static void vfio_listener_log_global_start(MemoryListener *listener)
if (vfio_devices_all_device_dirty_tracking(container)) {
ret = vfio_devices_dma_logging_start(container);
} else {
ret = vfio_set_dirty_page_tracking(container, true);
ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
true);
}
if (ret) {
@ -1097,7 +1098,8 @@ static void vfio_listener_log_global_stop(MemoryListener *listener)
if (vfio_devices_all_device_dirty_tracking(container)) {
vfio_devices_dma_logging_stop(container);
} else {
ret = vfio_set_dirty_page_tracking(container, false);
ret = vfio_container_set_dirty_page_tracking(&container->bcontainer,
false);
}
if (ret) {
@ -1165,7 +1167,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
VFIOBitmap vbmap;
int ret;
if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
if (!container->bcontainer.dirty_pages_supported &&
!all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(ram_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
@ -1180,7 +1183,8 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
if (all_device_dirty_tracking) {
ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
} else {
ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
ret = vfio_container_query_dirty_bitmap(&container->bcontainer, &vbmap,
iova, size);
}
if (ret) {

View File

@ -31,11 +31,27 @@ int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
}
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start)
{
g_assert(bcontainer->ops->set_dirty_page_tracking);
return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
}
int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap,
hwaddr iova, hwaddr size)
{
g_assert(bcontainer->ops->query_dirty_bitmap);
return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
}
void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
const VFIOIOMMUOps *ops)
{
bcontainer->ops = ops;
bcontainer->space = space;
bcontainer->dirty_pages_supported = false;
QLIST_INIT(&bcontainer->giommu_list);
}

View File

@ -131,7 +131,7 @@ static int vfio_legacy_dma_unmap(VFIOContainerBase *bcontainer, hwaddr iova,
if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
if (!vfio_devices_all_device_dirty_tracking(container) &&
container->dirty_pages_supported) {
container->bcontainer.dirty_pages_supported) {
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
}
@ -205,14 +205,17 @@ static int vfio_legacy_dma_map(VFIOContainerBase *bcontainer, hwaddr iova,
return -errno;
}
int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
static int vfio_legacy_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start)
{
VFIOContainer *container = container_of(bcontainer, VFIOContainer,
bcontainer);
int ret;
struct vfio_iommu_type1_dirty_bitmap dirty = {
.argsz = sizeof(dirty),
};
if (!container->dirty_pages_supported) {
if (!bcontainer->dirty_pages_supported) {
return 0;
}
@ -232,9 +235,12 @@ int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
return ret;
}
int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
hwaddr iova, hwaddr size)
static int vfio_legacy_query_dirty_bitmap(VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap,
hwaddr iova, hwaddr size)
{
VFIOContainer *container = container_of(bcontainer, VFIOContainer,
bcontainer);
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
int ret;
@ -461,7 +467,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container,
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
container->dirty_pages_supported = true;
container->bcontainer.dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap;
}
@ -553,7 +559,6 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container = g_malloc0(sizeof(*container));
container->fd = fd;
container->error = NULL;
container->dirty_pages_supported = false;
container->dma_max_mappings = 0;
container->iova_ranges = NULL;
QLIST_INIT(&container->vrdl_list);
@ -937,4 +942,6 @@ void vfio_detach_device(VFIODevice *vbasedev)
const VFIOIOMMUOps vfio_legacy_ops = {
.dma_map = vfio_legacy_dma_map,
.dma_unmap = vfio_legacy_dma_unmap,
.set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
.query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
};

View File

@ -83,7 +83,6 @@ typedef struct VFIOContainer {
unsigned iommu_type;
Error *error;
bool initialized;
bool dirty_pages_supported;
uint64_t dirty_pgsizes;
uint64_t max_dirty_bitmap_size;
unsigned long pgsizes;
@ -190,11 +189,6 @@ VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
void vfio_put_address_space(VFIOAddressSpace *space);
bool vfio_devices_all_running_and_saving(VFIOContainer *container);
/* container->fd */
int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start);
int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
hwaddr iova, hwaddr size);
/* SPAPR specific */
int vfio_container_add_section_window(VFIOContainer *container,
MemoryRegionSection *section,

View File

@ -36,6 +36,7 @@ typedef struct VFIOAddressSpace {
typedef struct VFIOContainerBase {
const VFIOIOMMUOps *ops;
VFIOAddressSpace *space;
bool dirty_pages_supported;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_ENTRY(VFIOContainerBase) next;
} VFIOContainerBase;
@ -54,6 +55,11 @@ int vfio_container_dma_map(VFIOContainerBase *bcontainer,
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
hwaddr iova, ram_addr_t size,
IOMMUTLBEntry *iotlb);
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start);
int vfio_container_query_dirty_bitmap(VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap,
hwaddr iova, hwaddr size);
void vfio_container_init(VFIOContainerBase *bcontainer,
VFIOAddressSpace *space,