vfio/migration: Block multiple devices migration

Currently VFIO migration doesn't implement some kind of intermediate
quiescent state in which P2P DMAs are quiesced before stopping or
running the device. This can cause problems in multi-device migration
where the devices are doing P2P DMAs, since the devices are not stopped
together at the same time.

Until such support is added, block migration of multiple devices.

Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Link: https://lore.kernel.org/r/20230216143630.25610-6-avihaih@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Avihai Horon 2023-02-16 16:36:24 +02:00 committed by Alex Williamson
parent 8b942af393
commit 29d81b71aa
3 changed files with 61 additions and 0 deletions

View File

@ -41,6 +41,7 @@
#include "qapi/error.h"
#include "migration/migration.h"
#include "migration/misc.h"
#include "migration/blocker.h"
#include "sysemu/tpm.h"
VFIOGroupList vfio_group_list =
@ -337,6 +338,58 @@ bool vfio_mig_active(void)
return true;
}
static Error *multiple_devices_migration_blocker;
static unsigned int vfio_migratable_device_num(void)
{
VFIOGroup *group;
VFIODevice *vbasedev;
unsigned int device_num = 0;
QLIST_FOREACH(group, &vfio_group_list, next) {
QLIST_FOREACH(vbasedev, &group->device_list, next) {
if (vbasedev->migration) {
device_num++;
}
}
}
return device_num;
}
int vfio_block_multiple_devices_migration(Error **errp)
{
int ret;
if (multiple_devices_migration_blocker ||
vfio_migratable_device_num() <= 1) {
return 0;
}
error_setg(&multiple_devices_migration_blocker,
"Migration is currently not supported with multiple "
"VFIO devices");
ret = migrate_add_blocker(multiple_devices_migration_blocker, errp);
if (ret < 0) {
error_free(multiple_devices_migration_blocker);
multiple_devices_migration_blocker = NULL;
}
return ret;
}
void vfio_unblock_multiple_devices_migration(void)
{
if (!multiple_devices_migration_blocker ||
vfio_migratable_device_num() > 1) {
return;
}
migrate_del_blocker(multiple_devices_migration_blocker);
error_free(multiple_devices_migration_blocker);
multiple_devices_migration_blocker = NULL;
}
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
VFIOGroup *group;

View File

@ -878,6 +878,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
goto add_blocker;
}
ret = vfio_block_multiple_devices_migration(errp);
if (ret) {
return ret;
}
trace_vfio_migration_probe(vbasedev->name, info->index);
g_free(info);
return 0;
@ -904,6 +909,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
qemu_del_vm_change_state_handler(migration->vm_state);
unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
vfio_migration_exit(vbasedev);
vfio_unblock_multiple_devices_migration();
}
if (vbasedev->migration_blocker) {

View File

@ -218,6 +218,8 @@ typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList;
extern VFIOGroupList vfio_group_list;
bool vfio_mig_active(void);
int vfio_block_multiple_devices_migration(Error **errp);
void vfio_unblock_multiple_devices_migration(void);
int64_t vfio_mig_bytes_transferred(void);
#ifdef CONFIG_LINUX