From 05ba3f63d174a716b42aa31c9af5d0ef64fff515 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 20 Oct 2021 12:55:56 +0800 Subject: [PATCH] vhost-net: control virtqueue support We assume there's no cvq in the past, this is not true when we need control virtqueue support for vhost-user backends. So this patch implements the control virtqueue support for vhost-net. As datapath, the control virtqueue is also required to be coupled with the NetClientState. The vhost_net_start/stop() are tweaked to accept the number of datapath queue pairs plus the the number of control virtqueue for us to start and stop the vhost device. Signed-off-by: Jason Wang Message-Id: <20211020045600.16082-7-jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/net/vhost_net-stub.c | 4 ++-- hw/net/vhost_net.c | 43 ++++++++++++++++++++++++++++++----------- hw/net/virtio-net.c | 4 ++-- include/net/vhost_net.h | 6 ++++-- 4 files changed, 40 insertions(+), 17 deletions(-) diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index a7f4252630..89d71cfb8e 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { return -ENOSYS; } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { } diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 386ec2eaa2..e1e9d1ec89 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -315,11 +315,14 @@ static void vhost_net_stop_one(struct vhost_net *net, } int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + int total_notifiers = data_queue_pairs * 2 + cvq; + VirtIONet *n = VIRTIO_NET(dev); + int nvhosts = data_queue_pairs + cvq; struct vhost_net *net; int r, e, i; NetClientState *peer; @@ -329,9 +332,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, return -ENOSYS; } - for (i = 0; i < total_queues; i++) { + for (i = 0; i < nvhosts; i++) { + + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { /* Control Virtqueue */ + peer = qemu_get_peer(ncs, n->max_queues); + } - peer = qemu_get_peer(ncs, i); net = get_vhost_net(peer); vhost_net_set_vq_index(net, i * 2); @@ -344,14 +352,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, } } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); goto err; } - for (i = 0; i < total_queues; i++) { - peer = qemu_get_peer(ncs, i); + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queues); + } r = vhost_net_start_one(get_vhost_net(peer), dev); if (r < 0) { @@ -375,7 +387,7 @@ err_start: peer = qemu_get_peer(ncs , i); vhost_net_stop_one(get_vhost_net(peer), dev); } - e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (e < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); fflush(stderr); @@ -385,18 +397,27 @@ err: } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + VirtIONet *n = VIRTIO_NET(dev); + NetClientState *peer; + int total_notifiers = data_queue_pairs * 2 + cvq; + int nvhosts = data_queue_pairs + cvq; int i, r; - for (i = 0; i < total_queues; i++) { - vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev); + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queues); + } + vhost_net_stop_one(get_vhost_net(peer), dev); } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (r < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); fflush(stderr); diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 3dd2896ff9..5ee6729662 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -285,14 +285,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queues); + r = vhost_net_start(vdev, n->nic->ncs, queues, 0); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queues); + vhost_net_stop(vdev, n->nic->ncs, queues, 0); n->vhost_started = 0; } } diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index fba40cf695..387e913e4e 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -21,8 +21,10 @@ typedef struct VhostNetOptions { uint64_t vhost_net_get_max_queues(VHostNetState *net); struct vhost_net *vhost_net_init(VhostNetOptions *options); -int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues); -void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues); +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); void vhost_net_cleanup(VHostNetState *net);