vdpa: add VhostVDPAShared

It will hold properties shared among all vhost_vdpa instances associated
with of the same device.  For example, we just need one iova_tree or one
memory listener for the entire device.

Next patches will register the vhost_vdpa memory listener at the
beginning of the VM migration at the destination. This enables QEMU to
map the memory to the device before stopping the VM at the source,
instead of doing while both source and destination are stopped, thus
minimizing the downtime.

However, the destination QEMU is unaware of which vhost_vdpa struct will
register its memory_listener.  If the source guest has CVQ enabled, it
will be the one associated with the CVQ.  Otherwise, it will be the
first one.

Save the memory operations related members in a common place rather than
always in the first / last vhost_vdpa.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20231221174322.3130442-2-eperezma@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Eugenio Pérez 2023-12-21 18:43:10 +01:00 committed by Michael S. Tsirkin
parent bc865bfe2d
commit 8c5e980922
2 changed files with 27 additions and 2 deletions

View File

@ -30,6 +30,10 @@ typedef struct VhostVDPAHostNotifier {
void *addr;
} VhostVDPAHostNotifier;
/* Info shared by all vhost_vdpa device models */
typedef struct vhost_vdpa_shared {
} VhostVDPAShared;
typedef struct vhost_vdpa {
int device_fd;
int index;
@ -46,6 +50,7 @@ typedef struct vhost_vdpa {
bool suspended;
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
VhostVDPAShared *shared;
GPtrArray *shadow_vqs;
const VhostShadowVirtqueueOps *shadow_vq_ops;
void *shadow_vq_ops_opaque;

View File

@ -240,6 +240,10 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
qemu_close(s->vhost_vdpa.device_fd);
s->vhost_vdpa.device_fd = -1;
}
if (s->vhost_vdpa.index != 0) {
return;
}
g_free(s->vhost_vdpa.shared);
}
/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
@ -1661,6 +1665,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
bool svq,
struct vhost_vdpa_iova_range iova_range,
uint64_t features,
VhostVDPAShared *shared,
Error **errp)
{
NetClientState *nc = NULL;
@ -1696,6 +1701,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
if (queue_pair_index == 0) {
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
} else if (!is_datapath) {
s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
PROT_READ | PROT_WRITE,
@ -1708,11 +1714,16 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vq_ops_opaque = s;
s->cvq_isolated = cvq_isolated;
}
if (queue_pair_index != 0) {
s->vhost_vdpa.shared = shared;
}
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {
qemu_del_net_client(nc);
return NULL;
}
return nc;
}
@ -1824,17 +1835,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
for (i = 0; i < queue_pairs; i++) {
VhostVDPAShared *shared = NULL;
if (i) {
shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
}
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq,
iova_range, features, errp);
iova_range, features, shared, errp);
if (!ncs[i])
goto err;
}
if (has_cvq) {
VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
VhostVDPAShared *shared = s0->vhost_vdpa.shared;
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false,
opts->x_svq, iova_range, features, errp);
opts->x_svq, iova_range, features, shared,
errp);
if (!nc)
goto err;
}