vdpa-dev: Fix initialisation order to restore VDUSE compatibility
VDUSE requires that virtqueues are first enabled before the DRIVER_OK status flag is set; with the current API of the kernel module, it is impossible to enable the opposite order in our block export code because userspace is not notified when a virtqueue is enabled. This requirement also mathces the normal initialisation order as done by the generic vhost code in QEMU. However, commit 6c482547 accidentally changed the order for vdpa-dev and broke access to VDUSE devices with this. This changes vdpa-dev to use the normal order again and use the standard vhost callback .vhost_set_vring_enable for this. VDUSE devices can be used with vdpa-dev again after this fix. vhost_net intentionally avoided enabling the vrings for vdpa and does this manually later while it does enable them for other vhost backends. Reflect this in the vhost_net code and return early for vdpa, so that the behaviour doesn't change for this device. Cc: qemu-stable@nongnu.org Fixes: 6c4825476a43 ('vdpa: move vhost_vdpa_set_vring_ready to the caller') Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20240315155949.86066-1-kwolf@redhat.com> Reviewed-by: Eugenio Pérez <eperezma@redhat.com> Reviewed-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
d9e4070603
commit
2c66de61f8
@ -541,6 +541,16 @@ int vhost_set_vring_enable(NetClientState *nc, int enable)
|
||||
VHostNetState *net = get_vhost_net(nc);
|
||||
const VhostOps *vhost_ops = net->dev.vhost_ops;
|
||||
|
||||
/*
|
||||
* vhost-vdpa network devices need to enable dataplane virtqueues after
|
||||
* DRIVER_OK, so they can recover device state before starting dataplane.
|
||||
* Because of that, we don't enable virtqueues here and leave it to
|
||||
* net/vhost-vdpa.c.
|
||||
*/
|
||||
if (nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
nc->vring_enable = enable;
|
||||
|
||||
if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
|
||||
|
@ -49,7 +49,7 @@ vhost_vdpa_set_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRI
|
||||
vhost_vdpa_get_device_id(void *dev, uint32_t device_id) "dev: %p device_id %"PRIu32
|
||||
vhost_vdpa_reset_device(void *dev) "dev: %p"
|
||||
vhost_vdpa_get_vq_index(void *dev, int idx, int vq_idx) "dev: %p idx: %d vq idx: %d"
|
||||
vhost_vdpa_set_vring_ready(void *dev, unsigned i, int r) "dev: %p, idx: %u, r: %d"
|
||||
vhost_vdpa_set_vring_enable_one(void *dev, unsigned i, int enable, int r) "dev: %p, idx: %u, enable: %u, r: %d"
|
||||
vhost_vdpa_dump_config(void *dev, const char *line) "dev: %p %s"
|
||||
vhost_vdpa_set_config(void *dev, uint32_t offset, uint32_t size, uint32_t flags) "dev: %p offset: %"PRIu32" size: %"PRIu32" flags: 0x%"PRIx32
|
||||
vhost_vdpa_get_config(void *dev, void *config, uint32_t config_len) "dev: %p config: %p config_len: %"PRIu32
|
||||
|
@ -253,14 +253,11 @@ static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp)
|
||||
|
||||
s->dev.acked_features = vdev->guest_features;
|
||||
|
||||
ret = vhost_dev_start(&s->dev, vdev, false);
|
||||
ret = vhost_dev_start(&s->dev, vdev, true);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error starting vhost");
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
for (i = 0; i < s->dev.nvqs; ++i) {
|
||||
vhost_vdpa_set_vring_ready(&s->vdpa, i);
|
||||
}
|
||||
s->started = true;
|
||||
|
||||
/*
|
||||
|
@ -896,19 +896,41 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
|
||||
return idx;
|
||||
}
|
||||
|
||||
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
|
||||
static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
|
||||
int enable)
|
||||
{
|
||||
struct vhost_dev *dev = v->dev;
|
||||
struct vhost_vring_state state = {
|
||||
.index = idx,
|
||||
.num = 1,
|
||||
.num = enable,
|
||||
};
|
||||
int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
|
||||
|
||||
trace_vhost_vdpa_set_vring_ready(dev, idx, r);
|
||||
trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
|
||||
{
|
||||
struct vhost_vdpa *v = dev->opaque;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < dev->nvqs; ++i) {
|
||||
ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
|
||||
{
|
||||
return vhost_vdpa_set_vring_enable_one(v, idx, 1);
|
||||
}
|
||||
|
||||
static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
|
||||
int fd)
|
||||
{
|
||||
@ -1536,6 +1558,7 @@ const VhostOps vdpa_ops = {
|
||||
.vhost_set_features = vhost_vdpa_set_features,
|
||||
.vhost_reset_device = vhost_vdpa_reset_device,
|
||||
.vhost_get_vq_index = vhost_vdpa_get_vq_index,
|
||||
.vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
|
||||
.vhost_get_config = vhost_vdpa_get_config,
|
||||
.vhost_set_config = vhost_vdpa_set_config,
|
||||
.vhost_requires_shm_log = NULL,
|
||||
|
@ -1984,7 +1984,13 @@ static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
|
||||
return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
|
||||
}
|
||||
|
||||
/* Host notifiers must be enabled at this point. */
|
||||
/*
|
||||
* Host notifiers must be enabled at this point.
|
||||
*
|
||||
* If @vrings is true, this function will enable all vrings before starting the
|
||||
* device. If it is false, the vring initialization is left to be done by the
|
||||
* caller.
|
||||
*/
|
||||
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
|
||||
{
|
||||
int i, r;
|
||||
|
Loading…
x
Reference in New Issue
Block a user