vhost: move iova_tree set to vhost_svq_start
Since we don't know if we will use SVQ at qemu initialization, let's allocate iova_tree only if needed. To do so, accept it at SVQ start, not at initialization. This will avoid to create it if the device does not support SVQ. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20221215113144.322011-5-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
3cfb4d069c
commit
5fde952bbd
@ -642,9 +642,10 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
|
||||
* @svq: Shadow Virtqueue
|
||||
* @vdev: VirtIO device
|
||||
* @vq: Virtqueue to shadow
|
||||
* @iova_tree: Tree to perform descriptors translations
|
||||
*/
|
||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
VirtQueue *vq)
|
||||
VirtQueue *vq, VhostIOVATree *iova_tree)
|
||||
{
|
||||
size_t desc_size, driver_size, device_size;
|
||||
|
||||
@ -655,6 +656,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
svq->last_used_idx = 0;
|
||||
svq->vdev = vdev;
|
||||
svq->vq = vq;
|
||||
svq->iova_tree = iova_tree;
|
||||
|
||||
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
|
||||
driver_size = vhost_svq_driver_area_size(svq);
|
||||
@ -712,18 +714,15 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
|
||||
* Creates vhost shadow virtqueue, and instructs the vhost device to use the
|
||||
* shadow methods and file descriptors.
|
||||
*
|
||||
* @iova_tree: Tree to perform descriptors translations
|
||||
* @ops: SVQ owner callbacks
|
||||
* @ops_opaque: ops opaque pointer
|
||||
*/
|
||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
||||
const VhostShadowVirtqueueOps *ops,
|
||||
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||
void *ops_opaque)
|
||||
{
|
||||
VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1);
|
||||
|
||||
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
|
||||
svq->iova_tree = iova_tree;
|
||||
svq->ops = ops;
|
||||
svq->ops_opaque = ops_opaque;
|
||||
return svq;
|
||||
|
@ -126,11 +126,10 @@ size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
|
||||
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
||||
|
||||
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
VirtQueue *vq);
|
||||
VirtQueue *vq, VhostIOVATree *iova_tree);
|
||||
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
||||
|
||||
VhostShadowVirtqueue *vhost_svq_new(VhostIOVATree *iova_tree,
|
||||
const VhostShadowVirtqueueOps *ops,
|
||||
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
||||
void *ops_opaque);
|
||||
|
||||
void vhost_svq_free(gpointer vq);
|
||||
|
@ -430,8 +430,7 @@ static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
|
||||
for (unsigned n = 0; n < hdev->nvqs; ++n) {
|
||||
VhostShadowVirtqueue *svq;
|
||||
|
||||
svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
|
||||
v->shadow_vq_ops_opaque);
|
||||
svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
|
||||
g_ptr_array_add(shadow_vqs, svq);
|
||||
}
|
||||
|
||||
@ -1063,7 +1062,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
vhost_svq_start(svq, dev->vdev, vq);
|
||||
vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
|
||||
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
|
||||
if (unlikely(!ok)) {
|
||||
goto err_map;
|
||||
|
Loading…
Reference in New Issue
Block a user