vhost: Change the sequence of device start
This patch is part of adding vhost-user vhost_dev_start support. The motivation is to improve backend configuration speed and reduce live migration VM downtime. Moving the device start routines after finishing all the necessary device and VQ configuration, further aligning to the virtio specification for "device initialization sequence". Following patch will add vhost-user vhost_dev_start support. Signed-off-by: Yajun Wu <yajunw@nvidia.com> Acked-by: Parav Pandit <parav@nvidia.com> Message-Id: <20221017064452.1226514-2-yajunw@nvidia.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
1b2b12376c
commit
8b67fe0065
@ -168,13 +168,6 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
|
||||
ret = vhost_dev_start(&s->dev, vdev);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error starting vhost");
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
s->started_vu = true;
|
||||
|
||||
/* guest_notifier_mask/pending not used yet, so just unmask
|
||||
* everything here. virtio-pci will do the right thing by
|
||||
* enabling/disabling irqfd.
|
||||
@ -183,9 +176,20 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
|
||||
vhost_virtqueue_mask(&s->dev, vdev, i, false);
|
||||
}
|
||||
|
||||
s->dev.vq_index_end = s->dev.nvqs;
|
||||
ret = vhost_dev_start(&s->dev, vdev);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error starting vhost");
|
||||
goto err_guest_notifiers;
|
||||
}
|
||||
s->started_vu = true;
|
||||
|
||||
return ret;
|
||||
|
||||
err_guest_notifiers:
|
||||
for (i = 0; i < s->dev.nvqs; i++) {
|
||||
vhost_virtqueue_mask(&s->dev, vdev, i, true);
|
||||
}
|
||||
k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
|
||||
err_host_notifiers:
|
||||
vhost_dev_disable_notifiers(&s->dev, vdev);
|
||||
|
@ -389,21 +389,20 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
|
||||
} else {
|
||||
peer = qemu_get_peer(ncs, n->max_queue_pairs);
|
||||
}
|
||||
r = vhost_net_start_one(get_vhost_net(peer), dev);
|
||||
|
||||
if (r < 0) {
|
||||
goto err_start;
|
||||
}
|
||||
|
||||
if (peer->vring_enable) {
|
||||
/* restore vring enable state */
|
||||
r = vhost_set_vring_enable(peer, peer->vring_enable);
|
||||
|
||||
if (r < 0) {
|
||||
vhost_net_stop_one(get_vhost_net(peer), dev);
|
||||
goto err_start;
|
||||
}
|
||||
}
|
||||
|
||||
r = vhost_net_start_one(get_vhost_net(peer), dev);
|
||||
if (r < 0) {
|
||||
goto err_start;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user