libvhost-user: support many virtqueues

Currently libvhost-user is hardcoded to at most 8 virtqueues.  The
device backend should decide the number of virtqueues, not
libvhost-user.  This is important for multiqueue device backends where
the guest driver needs an accurate number of virtqueues.

This change breaks libvhost-user and libvhost-user-glib API stability.
There is no stability guarantee yet, so make this change now and update
all in-tree library users.

This patch touches up vhost-user-blk, vhost-user-gpu, vhost-user-input,
vhost-user-scsi, and vhost-user-bridge.  If the device has a fixed
number of queues that exact number is used.  Otherwise the previous
default of 8 virtqueues is used.

vu_init() and vug_init() can now fail if malloc() returns NULL.  I
considered aborting with an error in libvhost-user but it should be safe
to instantiate new vhost-user instances at runtime without risk of
terminating the process.  Therefore callers need to handle the vu_init()
failure now.

vhost-user-blk and vhost-user-scsi duplicate virtqueue index checks that
are already performed by libvhost-user.  This code would need to be
modified to use max_queues but remove it completely instead since it's
redundant.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <20190626074815.19994-3-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2019-06-26 08:48:13 +01:00 committed by Michael S. Tsirkin
parent db68f4ff06
commit 6f5fd83788
9 changed files with 105 additions and 51 deletions

View File

@ -131,18 +131,24 @@ static void vug_watch(VuDev *dev, int condition, void *data)
} }
} }
void bool
vug_init(VugDev *dev, int socket, vug_init(VugDev *dev, uint16_t max_queues, int socket,
vu_panic_cb panic, const VuDevIface *iface) vu_panic_cb panic, const VuDevIface *iface)
{ {
g_assert(dev); g_assert(dev);
g_assert(iface); g_assert(iface);
vu_init(&dev->parent, socket, panic, set_watch, remove_watch, iface); if (!vu_init(&dev->parent, max_queues, socket, panic, set_watch,
remove_watch, iface)) {
return false;
}
dev->fdmap = g_hash_table_new_full(NULL, NULL, NULL, dev->fdmap = g_hash_table_new_full(NULL, NULL, NULL,
(GDestroyNotify) g_source_destroy); (GDestroyNotify) g_source_destroy);
dev->src = vug_source_new(dev, socket, G_IO_IN, vug_watch, NULL); dev->src = vug_source_new(dev, socket, G_IO_IN, vug_watch, NULL);
return true;
} }
void void

View File

@ -25,7 +25,7 @@ typedef struct VugDev {
GSource *src; GSource *src;
} VugDev; } VugDev;
void vug_init(VugDev *dev, int socket, bool vug_init(VugDev *dev, uint16_t max_queues, int socket,
vu_panic_cb panic, const VuDevIface *iface); vu_panic_cb panic, const VuDevIface *iface);
void vug_deinit(VugDev *dev); void vug_deinit(VugDev *dev);

View File

@ -493,9 +493,9 @@ vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
static void static void
vu_set_enable_all_rings(VuDev *dev, bool enabled) vu_set_enable_all_rings(VuDev *dev, bool enabled)
{ {
int i; uint16_t i;
for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) { for (i = 0; i < dev->max_queues; i++) {
dev->vq[i].enable = enabled; dev->vq[i].enable = enabled;
} }
} }
@ -916,7 +916,7 @@ vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
{ {
int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (index >= VHOST_MAX_NR_VIRTQUEUE) { if (index >= dev->max_queues) {
vmsg_close_fds(vmsg); vmsg_close_fds(vmsg);
vu_panic(dev, "Invalid queue index: %u", index); vu_panic(dev, "Invalid queue index: %u", index);
return false; return false;
@ -1213,7 +1213,7 @@ vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
DPRINT("State.index: %d\n", index); DPRINT("State.index: %d\n", index);
DPRINT("State.enable: %d\n", enable); DPRINT("State.enable: %d\n", enable);
if (index >= VHOST_MAX_NR_VIRTQUEUE) { if (index >= dev->max_queues) {
vu_panic(dev, "Invalid vring_enable index: %u", index); vu_panic(dev, "Invalid vring_enable index: %u", index);
return false; return false;
} }
@ -1582,7 +1582,7 @@ vu_deinit(VuDev *dev)
} }
dev->nregions = 0; dev->nregions = 0;
for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) { for (i = 0; i < dev->max_queues; i++) {
VuVirtq *vq = &dev->vq[i]; VuVirtq *vq = &dev->vq[i];
if (vq->call_fd != -1) { if (vq->call_fd != -1) {
@ -1627,18 +1627,23 @@ vu_deinit(VuDev *dev)
if (dev->sock != -1) { if (dev->sock != -1) {
close(dev->sock); close(dev->sock);
} }
free(dev->vq);
dev->vq = NULL;
} }
void bool
vu_init(VuDev *dev, vu_init(VuDev *dev,
uint16_t max_queues,
int socket, int socket,
vu_panic_cb panic, vu_panic_cb panic,
vu_set_watch_cb set_watch, vu_set_watch_cb set_watch,
vu_remove_watch_cb remove_watch, vu_remove_watch_cb remove_watch,
const VuDevIface *iface) const VuDevIface *iface)
{ {
int i; uint16_t i;
assert(max_queues > 0);
assert(socket >= 0); assert(socket >= 0);
assert(set_watch); assert(set_watch);
assert(remove_watch); assert(remove_watch);
@ -1654,18 +1659,28 @@ vu_init(VuDev *dev,
dev->iface = iface; dev->iface = iface;
dev->log_call_fd = -1; dev->log_call_fd = -1;
dev->slave_fd = -1; dev->slave_fd = -1;
for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) { dev->max_queues = max_queues;
dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
if (!dev->vq) {
DPRINT("%s: failed to malloc virtqueues\n", __func__);
return false;
}
for (i = 0; i < max_queues; i++) {
dev->vq[i] = (VuVirtq) { dev->vq[i] = (VuVirtq) {
.call_fd = -1, .kick_fd = -1, .err_fd = -1, .call_fd = -1, .kick_fd = -1, .err_fd = -1,
.notification = true, .notification = true,
}; };
} }
return true;
} }
VuVirtq * VuVirtq *
vu_get_queue(VuDev *dev, int qidx) vu_get_queue(VuDev *dev, int qidx)
{ {
assert(qidx < VHOST_MAX_NR_VIRTQUEUE); assert(qidx < dev->max_queues);
return &dev->vq[qidx]; return &dev->vq[qidx];
} }

View File

@ -25,7 +25,6 @@
#define VHOST_USER_F_PROTOCOL_FEATURES 30 #define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_LOG_PAGE 4096 #define VHOST_LOG_PAGE 4096
#define VHOST_MAX_NR_VIRTQUEUE 8
#define VIRTQUEUE_MAX_SIZE 1024 #define VIRTQUEUE_MAX_SIZE 1024
#define VHOST_MEMORY_MAX_NREGIONS 8 #define VHOST_MEMORY_MAX_NREGIONS 8
@ -353,7 +352,7 @@ struct VuDev {
int sock; int sock;
uint32_t nregions; uint32_t nregions;
VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS]; VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
VuVirtq vq[VHOST_MAX_NR_VIRTQUEUE]; VuVirtq *vq;
VuDevInflightInfo inflight_info; VuDevInflightInfo inflight_info;
int log_call_fd; int log_call_fd;
int slave_fd; int slave_fd;
@ -362,6 +361,7 @@ struct VuDev {
uint64_t features; uint64_t features;
uint64_t protocol_features; uint64_t protocol_features;
bool broken; bool broken;
uint16_t max_queues;
/* @set_watch: add or update the given fd to the watch set, /* @set_watch: add or update the given fd to the watch set,
* call cb when condition is met */ * call cb when condition is met */
@ -391,6 +391,7 @@ typedef struct VuVirtqElement {
/** /**
* vu_init: * vu_init:
* @dev: a VuDev context * @dev: a VuDev context
* @max_queues: maximum number of virtqueues
* @socket: the socket connected to vhost-user master * @socket: the socket connected to vhost-user master
* @panic: a panic callback * @panic: a panic callback
* @set_watch: a set_watch callback * @set_watch: a set_watch callback
@ -398,8 +399,11 @@ typedef struct VuVirtqElement {
* @iface: a VuDevIface structure with vhost-user device callbacks * @iface: a VuDevIface structure with vhost-user device callbacks
* *
* Intializes a VuDev vhost-user context. * Intializes a VuDev vhost-user context.
*
* Returns: true on success, false on failure.
**/ **/
void vu_init(VuDev *dev, bool vu_init(VuDev *dev,
uint16_t max_queues,
int socket, int socket,
vu_panic_cb panic, vu_panic_cb panic,
vu_set_watch_cb set_watch, vu_set_watch_cb set_watch,

View File

@ -25,6 +25,10 @@
#include <sys/ioctl.h> #include <sys/ioctl.h>
#endif #endif
enum {
VHOST_USER_BLK_MAX_QUEUES = 8,
};
struct virtio_blk_inhdr { struct virtio_blk_inhdr {
unsigned char status; unsigned char status;
}; };
@ -334,12 +338,6 @@ static void vub_process_vq(VuDev *vu_dev, int idx)
VuVirtq *vq; VuVirtq *vq;
int ret; int ret;
if ((idx < 0) || (idx >= VHOST_MAX_NR_VIRTQUEUE)) {
fprintf(stderr, "VQ Index out of range: %d\n", idx);
vub_panic_cb(vu_dev, NULL);
return;
}
gdev = container_of(vu_dev, VugDev, parent); gdev = container_of(vu_dev, VugDev, parent);
vdev_blk = container_of(gdev, VubDev, parent); vdev_blk = container_of(gdev, VubDev, parent);
assert(vdev_blk); assert(vdev_blk);
@ -631,7 +629,11 @@ int main(int argc, char **argv)
vdev_blk->enable_ro = true; vdev_blk->enable_ro = true;
} }
vug_init(&vdev_blk->parent, csock, vub_panic_cb, &vub_iface); if (!vug_init(&vdev_blk->parent, VHOST_USER_BLK_MAX_QUEUES, csock,
vub_panic_cb, &vub_iface)) {
fprintf(stderr, "Failed to initialized libvhost-user-glib\n");
goto err;
}
g_main_loop_run(vdev_blk->loop); g_main_loop_run(vdev_blk->loop);

View File

@ -25,6 +25,10 @@
#include "virgl.h" #include "virgl.h"
#include "vugbm.h" #include "vugbm.h"
enum {
VHOST_USER_GPU_MAX_QUEUES = 2,
};
struct virtio_gpu_simple_resource { struct virtio_gpu_simple_resource {
uint32_t resource_id; uint32_t resource_id;
uint32_t width; uint32_t width;
@ -1169,7 +1173,10 @@ main(int argc, char *argv[])
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
vug_init(&g.dev, fd, vg_panic, &vuiface); if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
g_printerr("Failed to initialize libvhost-user-glib.\n");
exit(EXIT_FAILURE);
}
loop = g_main_loop_new(NULL, FALSE); loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop); g_main_loop_run(loop);

View File

@ -17,6 +17,10 @@
#include "standard-headers/linux/virtio_input.h" #include "standard-headers/linux/virtio_input.h"
#include "qapi/error.h" #include "qapi/error.h"
enum {
VHOST_USER_INPUT_MAX_QUEUES = 2,
};
typedef struct virtio_input_event virtio_input_event; typedef struct virtio_input_event virtio_input_event;
typedef struct virtio_input_config virtio_input_config; typedef struct virtio_input_config virtio_input_config;
@ -384,7 +388,12 @@ main(int argc, char *argv[])
g_printerr("Invalid vhost-user socket.\n"); g_printerr("Invalid vhost-user socket.\n");
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
vug_init(&vi.dev, fd, vi_panic, &vuiface);
if (!vug_init(&vi.dev, VHOST_USER_INPUT_MAX_QUEUES, fd, vi_panic,
&vuiface)) {
g_printerr("Failed to initialize libvhost-user-glib.\n");
exit(EXIT_FAILURE);
}
loop = g_main_loop_new(NULL, FALSE); loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop); g_main_loop_run(loop);

View File

@ -19,6 +19,10 @@
#define VUS_ISCSI_INITIATOR "iqn.2016-11.com.nutanix:vhost-user-scsi" #define VUS_ISCSI_INITIATOR "iqn.2016-11.com.nutanix:vhost-user-scsi"
enum {
VHOST_USER_SCSI_MAX_QUEUES = 8,
};
typedef struct VusIscsiLun { typedef struct VusIscsiLun {
struct iscsi_context *iscsi_ctx; struct iscsi_context *iscsi_ctx;
int iscsi_lun; int iscsi_lun;
@ -231,11 +235,6 @@ static void vus_proc_req(VuDev *vu_dev, int idx)
gdev = container_of(vu_dev, VugDev, parent); gdev = container_of(vu_dev, VugDev, parent);
vdev_scsi = container_of(gdev, VusDev, parent); vdev_scsi = container_of(gdev, VusDev, parent);
if (idx < 0 || idx >= VHOST_MAX_NR_VIRTQUEUE) {
g_warning("VQ Index out of range: %d", idx);
vus_panic_cb(vu_dev, NULL);
return;
}
vq = vu_get_queue(vu_dev, idx); vq = vu_get_queue(vu_dev, idx);
if (!vq) { if (!vq) {
@ -295,12 +294,6 @@ static void vus_queue_set_started(VuDev *vu_dev, int idx, bool started)
assert(vu_dev); assert(vu_dev);
if (idx < 0 || idx >= VHOST_MAX_NR_VIRTQUEUE) {
g_warning("VQ Index out of range: %d", idx);
vus_panic_cb(vu_dev, NULL);
return;
}
vq = vu_get_queue(vu_dev, idx); vq = vu_get_queue(vu_dev, idx);
if (idx == 0 || idx == 1) { if (idx == 0 || idx == 1) {
@ -398,7 +391,11 @@ int main(int argc, char **argv)
goto err; goto err;
} }
vug_init(&vdev_scsi->parent, csock, vus_panic_cb, &vus_iface); if (!vug_init(&vdev_scsi->parent, VHOST_USER_SCSI_MAX_QUEUES, csock,
vus_panic_cb, &vus_iface)) {
g_printerr("Failed to initialize libvhost-user-glib\n");
goto err;
}
g_main_loop_run(vdev_scsi->loop); g_main_loop_run(vdev_scsi->loop);

View File

@ -45,6 +45,10 @@
} \ } \
} while (0) } while (0)
enum {
VHOST_USER_BRIDGE_MAX_QUEUES = 8,
};
typedef void (*CallbackFunc)(int sock, void *ctx); typedef void (*CallbackFunc)(int sock, void *ctx);
typedef struct Event { typedef struct Event {
@ -512,12 +516,16 @@ vubr_accept_cb(int sock, void *ctx)
} }
DPRINT("Got connection from remote peer on sock %d\n", conn_fd); DPRINT("Got connection from remote peer on sock %d\n", conn_fd);
vu_init(&dev->vudev, if (!vu_init(&dev->vudev,
conn_fd, VHOST_USER_BRIDGE_MAX_QUEUES,
vubr_panic, conn_fd,
vubr_set_watch, vubr_panic,
vubr_remove_watch, vubr_set_watch,
&vuiface); vubr_remove_watch,
&vuiface)) {
fprintf(stderr, "Failed to initialize libvhost-user\n");
exit(1);
}
dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb); dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb);
dispatcher_remove(&dev->dispatcher, sock); dispatcher_remove(&dev->dispatcher, sock);
@ -560,12 +568,18 @@ vubr_new(const char *path, bool client)
if (connect(dev->sock, (struct sockaddr *)&un, len) == -1) { if (connect(dev->sock, (struct sockaddr *)&un, len) == -1) {
vubr_die("connect"); vubr_die("connect");
} }
vu_init(&dev->vudev,
dev->sock, if (!vu_init(&dev->vudev,
vubr_panic, VHOST_USER_BRIDGE_MAX_QUEUES,
vubr_set_watch, dev->sock,
vubr_remove_watch, vubr_panic,
&vuiface); vubr_set_watch,
vubr_remove_watch,
&vuiface)) {
fprintf(stderr, "Failed to initialize libvhost-user\n");
exit(1);
}
cb = vubr_receive_cb; cb = vubr_receive_cb;
} }
@ -584,7 +598,7 @@ static void *notifier_thread(void *arg)
int qidx; int qidx;
while (true) { while (true) {
for (qidx = 0; qidx < VHOST_MAX_NR_VIRTQUEUE; qidx++) { for (qidx = 0; qidx < VHOST_USER_BRIDGE_MAX_QUEUES; qidx++) {
uint16_t *n = vubr->notifier.addr + pagesize * qidx; uint16_t *n = vubr->notifier.addr + pagesize * qidx;
if (*n == qidx) { if (*n == qidx) {
@ -616,7 +630,7 @@ vubr_host_notifier_setup(VubrDev *dev)
void *addr; void *addr;
int fd; int fd;
length = getpagesize() * VHOST_MAX_NR_VIRTQUEUE; length = getpagesize() * VHOST_USER_BRIDGE_MAX_QUEUES;
fd = mkstemp(template); fd = mkstemp(template);
if (fd < 0) { if (fd < 0) {