vdpa: always start CVQ in SVQ mode if possible

Isolate control virtqueue in its own group, allowing to intercept control
commands but letting dataplane run totally passthrough to the guest.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20221215113144.322011-13-eperezma@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eugenio Pérez 2022-12-15 12:31:44 +01:00 committed by Michael S. Tsirkin
parent 6188d78a19
commit c1a1008685
2 changed files with 111 additions and 2 deletions

View File

@ -638,7 +638,8 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
{ {
uint64_t features; uint64_t features;
uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH; 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
int r; int r;
if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {

View File

@ -102,6 +102,8 @@ static const uint64_t vdpa_svq_device_features =
BIT_ULL(VIRTIO_NET_F_RSC_EXT) | BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
BIT_ULL(VIRTIO_NET_F_STANDBY); BIT_ULL(VIRTIO_NET_F_STANDBY);
#define VHOST_VDPA_NET_CVQ_ASID 1
VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
{ {
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@ -243,6 +245,40 @@ static NetClientInfo net_vhost_vdpa_info = {
.check_peer_type = vhost_vdpa_check_peer_type, .check_peer_type = vhost_vdpa_check_peer_type,
}; };
static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index)
{
struct vhost_vring_state state = {
.index = vq_index,
};
int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
if (unlikely(r < 0)) {
error_report("Cannot get VQ %u group: %s", vq_index,
g_strerror(errno));
return r;
}
return state.num;
}
static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
unsigned vq_group,
unsigned asid_num)
{
struct vhost_vring_state asid = {
.index = vq_group,
.num = asid_num,
};
int r;
r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
if (unlikely(r < 0)) {
error_report("Can't set vq group %u asid %u, errno=%d (%s)",
asid.index, asid.num, errno, g_strerror(errno));
}
return r;
}
static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
{ {
VhostIOVATree *tree = v->iova_tree; VhostIOVATree *tree = v->iova_tree;
@ -317,11 +353,75 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc) static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{ {
VhostVDPAState *s; VhostVDPAState *s;
int r; struct vhost_vdpa *v;
uint64_t backend_features;
int64_t cvq_group;
int cvq_index, r;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
s = DO_UPCAST(VhostVDPAState, nc, nc); s = DO_UPCAST(VhostVDPAState, nc, nc);
v = &s->vhost_vdpa;
v->shadow_data = s->always_svq;
v->shadow_vqs_enabled = s->always_svq;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
if (s->always_svq) {
/* SVQ is already configured for all virtqueues */
goto out;
}
/*
* If we early return in these cases SVQ will not be enabled. The migration
* will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
*
* Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev
* yet.
*/
r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
if (unlikely(r < 0)) {
error_report("Cannot get vdpa backend_features: %s(%d)",
g_strerror(errno), errno);
return -1;
}
if (!(backend_features & VHOST_BACKEND_F_IOTLB_ASID) ||
!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
return 0;
}
/*
* Check if all the virtqueues of the virtio device are in a different vq
* than the last vq. VQ group of last group passed in cvq_group.
*/
cvq_index = v->dev->vq_index_end - 1;
cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index);
if (unlikely(cvq_group < 0)) {
return cvq_group;
}
for (int i = 0; i < cvq_index; ++i) {
int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i);
if (unlikely(group < 0)) {
return group;
}
if (group == cvq_group) {
return 0;
}
}
r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
if (unlikely(r < 0)) {
return r;
}
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
v->shadow_vqs_enabled = true;
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
out:
if (!s->vhost_vdpa.shadow_vqs_enabled) { if (!s->vhost_vdpa.shadow_vqs_enabled) {
return 0; return 0;
} }
@ -350,6 +450,14 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
if (s->vhost_vdpa.shadow_vqs_enabled) { if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
if (!s->always_svq) {
/*
* If only the CVQ is shadowed we can delete this safely.
* If all the VQs are shadows this will be needed by the time the
* device is started again to register SVQ vrings and similar.
*/
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
} }
} }