virtio-blk: add iothread-vq-mapping parameter

Add the iothread-vq-mapping parameter to assign virtqueues to IOThreads.
Store the vq:AioContext mapping in the new struct
VirtIOBlockDataPlane->vq_aio_context[] field and refactor the code to
use the per-vq AioContext instead of the BlockDriverState's AioContext.

Reimplement --device virtio-blk-pci,iothread= and non-IOThread mode by
assigning all virtqueues to the IOThread and main loop's AioContext in
vq_aio_context[], respectively.

The comment in struct VirtIOBlockDataPlane about EventNotifiers is
stale. Remove it.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <20231220134755.814917-5-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-12-20 08:47:55 -05:00 committed by Kevin Wolf
parent cf03a152c5
commit b6948ab01d
4 changed files with 202 additions and 50 deletions

View File

@ -32,13 +32,11 @@ struct VirtIOBlockDataPlane {
VirtIOBlkConf *conf; VirtIOBlkConf *conf;
VirtIODevice *vdev; VirtIODevice *vdev;
/* Note that these EventNotifiers are assigned by value. This is /*
* fine as long as you do not call event_notifier_cleanup on them * The AioContext for each virtqueue. The BlockDriverState will use the
* (because you don't own the file descriptor or handle; you just * first element as its AioContext.
* use it).
*/ */
IOThread *iothread; AioContext **vq_aio_context;
AioContext *ctx;
}; };
/* Raise an interrupt to signal guest, if necessary */ /* Raise an interrupt to signal guest, if necessary */
@ -47,6 +45,45 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
virtio_notify_irqfd(s->vdev, vq); virtio_notify_irqfd(s->vdev, vq);
} }
/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
static void
apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
AioContext **vq_aio_context, uint16_t num_queues)
{
IOThreadVirtQueueMappingList *node;
size_t num_iothreads = 0;
size_t cur_iothread = 0;
for (node = iothread_vq_mapping_list; node; node = node->next) {
num_iothreads++;
}
for (node = iothread_vq_mapping_list; node; node = node->next) {
IOThread *iothread = iothread_by_id(node->value->iothread);
AioContext *ctx = iothread_get_aio_context(iothread);
/* Released in virtio_blk_data_plane_destroy() */
object_ref(OBJECT(iothread));
if (node->value->vqs) {
uint16List *vq;
/* Explicit vq:IOThread assignment */
for (vq = node->value->vqs; vq; vq = vq->next) {
vq_aio_context[vq->value] = ctx;
}
} else {
/* Round-robin vq:IOThread assignment */
for (unsigned i = cur_iothread; i < num_queues;
i += num_iothreads) {
vq_aio_context[i] = ctx;
}
}
cur_iothread++;
}
}
/* Context: QEMU global mutex held */ /* Context: QEMU global mutex held */
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane, VirtIOBlockDataPlane **dataplane,
@ -58,7 +95,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
*dataplane = NULL; *dataplane = NULL;
if (conf->iothread) { if (conf->iothread || conf->iothread_vq_mapping_list) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) { if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp, error_setg(errp,
"device is incompatible with iothread " "device is incompatible with iothread "
@ -86,13 +123,24 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
s = g_new0(VirtIOBlockDataPlane, 1); s = g_new0(VirtIOBlockDataPlane, 1);
s->vdev = vdev; s->vdev = vdev;
s->conf = conf; s->conf = conf;
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
if (conf->iothread) { if (conf->iothread_vq_mapping_list) {
s->iothread = conf->iothread; apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
object_ref(OBJECT(s->iothread)); conf->num_queues);
s->ctx = iothread_get_aio_context(s->iothread); } else if (conf->iothread) {
AioContext *ctx = iothread_get_aio_context(conf->iothread);
for (unsigned i = 0; i < conf->num_queues; i++) {
s->vq_aio_context[i] = ctx;
}
/* Released in virtio_blk_data_plane_destroy() */
object_ref(OBJECT(conf->iothread));
} else { } else {
s->ctx = qemu_get_aio_context(); AioContext *ctx = qemu_get_aio_context();
for (unsigned i = 0; i < conf->num_queues; i++) {
s->vq_aio_context[i] = ctx;
}
} }
*dataplane = s; *dataplane = s;
@ -104,6 +152,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{ {
VirtIOBlock *vblk; VirtIOBlock *vblk;
VirtIOBlkConf *conf = s->conf;
if (!s) { if (!s) {
return; return;
@ -111,9 +160,21 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
vblk = VIRTIO_BLK(s->vdev); vblk = VIRTIO_BLK(s->vdev);
assert(!vblk->dataplane_started); assert(!vblk->dataplane_started);
if (s->iothread) {
object_unref(OBJECT(s->iothread)); if (conf->iothread_vq_mapping_list) {
IOThreadVirtQueueMappingList *node;
for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
IOThread *iothread = iothread_by_id(node->value->iothread);
object_unref(OBJECT(iothread));
} }
}
if (conf->iothread) {
object_unref(OBJECT(conf->iothread));
}
g_free(s->vq_aio_context);
g_free(s); g_free(s);
} }
@ -177,19 +238,13 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
trace_virtio_blk_data_plane_start(s); trace_virtio_blk_data_plane_start(s);
r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); r = blk_set_aio_context(s->conf->conf.blk, s->vq_aio_context[0],
&local_err);
if (r < 0) { if (r < 0) {
error_report_err(local_err); error_report_err(local_err);
goto fail_aio_context; goto fail_aio_context;
} }
/* Kick right away to begin processing requests already in vring */
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
event_notifier_set(virtio_queue_get_host_notifier(vq));
}
/* /*
* These fields must be visible to the IOThread when it processes the * These fields must be visible to the IOThread when it processes the
* virtqueue, otherwise it will think dataplane has not started yet. * virtqueue, otherwise it will think dataplane has not started yet.
@ -206,8 +261,12 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
if (!blk_in_drain(s->conf->conf.blk)) { if (!blk_in_drain(s->conf->conf.blk)) {
for (i = 0; i < nvqs; i++) { for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i); VirtQueue *vq = virtio_get_queue(s->vdev, i);
AioContext *ctx = s->vq_aio_context[i];
virtio_queue_aio_attach_host_notifier(vq, s->ctx); /* Kick right away to begin processing requests already in vring */
event_notifier_set(virtio_queue_get_host_notifier(vq));
virtio_queue_aio_attach_host_notifier(vq, ctx);
} }
} }
return 0; return 0;
@ -236,16 +295,12 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev)
* *
* Context: BH in IOThread * Context: BH in IOThread
*/ */
static void virtio_blk_data_plane_stop_bh(void *opaque) static void virtio_blk_data_plane_stop_vq_bh(void *opaque)
{ {
VirtIOBlockDataPlane *s = opaque; VirtQueue *vq = opaque;
unsigned i;
for (i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
virtio_queue_aio_detach_host_notifier(vq, s->ctx); virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
/* /*
* Test and clear notifier after disabling event, in case poll callback * Test and clear notifier after disabling event, in case poll callback
@ -253,7 +308,6 @@ static void virtio_blk_data_plane_stop_bh(void *opaque)
*/ */
virtio_queue_host_notifier_read(host_notifier); virtio_queue_host_notifier_read(host_notifier);
} }
}
/* Context: QEMU global mutex held */ /* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIODevice *vdev) void virtio_blk_data_plane_stop(VirtIODevice *vdev)
@ -279,7 +333,12 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
trace_virtio_blk_data_plane_stop(s); trace_virtio_blk_data_plane_stop(s);
if (!blk_in_drain(s->conf->conf.blk)) { if (!blk_in_drain(s->conf->conf.blk)) {
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
AioContext *ctx = s->vq_aio_context[i];
aio_wait_bh_oneshot(ctx, virtio_blk_data_plane_stop_vq_bh, vq);
}
} }
/* /*
@ -322,3 +381,23 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev)
s->stopping = false; s->stopping = false;
} }
void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
for (uint16_t i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
}
}
void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev);
for (uint16_t i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(vdev, i);
virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
}
}

View File

@ -28,4 +28,7 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
int virtio_blk_data_plane_start(VirtIODevice *vdev); int virtio_blk_data_plane_start(VirtIODevice *vdev);
void virtio_blk_data_plane_stop(VirtIODevice *vdev); void virtio_blk_data_plane_stop(VirtIODevice *vdev);
void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s);
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */ #endif /* HW_DATAPLANE_VIRTIO_BLK_H */

View File

@ -1151,6 +1151,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
return; return;
} }
} }
virtio_blk_handle_vq(s, vq); virtio_blk_handle_vq(s, vq);
} }
@ -1463,6 +1464,68 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
return 0; return 0;
} }
static bool
validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
uint16_t num_queues, Error **errp)
{
g_autofree unsigned long *vqs = bitmap_new(num_queues);
g_autoptr(GHashTable) iothreads =
g_hash_table_new(g_str_hash, g_str_equal);
for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
const char *name = node->value->iothread;
uint16List *vq;
if (!iothread_by_id(name)) {
error_setg(errp, "IOThread \"%s\" object does not exist", name);
return false;
}
if (!g_hash_table_add(iothreads, (gpointer)name)) {
error_setg(errp,
"duplicate IOThread name \"%s\" in iothread-vq-mapping",
name);
return false;
}
if (node != list) {
if (!!node->value->vqs != !!list->value->vqs) {
error_setg(errp, "either all items in iothread-vq-mapping "
"must have vqs or none of them must have it");
return false;
}
}
for (vq = node->value->vqs; vq; vq = vq->next) {
if (vq->value >= num_queues) {
error_setg(errp, "vq index %u for IOThread \"%s\" must be "
"less than num_queues %u in iothread-vq-mapping",
vq->value, name, num_queues);
return false;
}
if (test_and_set_bit(vq->value, vqs)) {
error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
"because it is already assigned", vq->value, name);
return false;
}
}
}
if (list->value->vqs) {
for (uint16_t i = 0; i < num_queues; i++) {
if (!test_bit(i, vqs)) {
error_setg(errp,
"missing vq %u IOThread assignment in iothread-vq-mapping",
i);
return false;
}
}
}
return true;
}
static void virtio_resize_cb(void *opaque) static void virtio_resize_cb(void *opaque)
{ {
VirtIODevice *vdev = opaque; VirtIODevice *vdev = opaque;
@ -1487,34 +1550,24 @@ static void virtio_blk_resize(void *opaque)
static void virtio_blk_drained_begin(void *opaque) static void virtio_blk_drained_begin(void *opaque)
{ {
VirtIOBlock *s = opaque; VirtIOBlock *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
if (!s->dataplane || !s->dataplane_started) { if (!s->dataplane || !s->dataplane_started) {
return; return;
} }
for (uint16_t i = 0; i < s->conf.num_queues; i++) { virtio_blk_data_plane_detach(s->dataplane);
VirtQueue *vq = virtio_get_queue(vdev, i);
virtio_queue_aio_detach_host_notifier(vq, ctx);
}
} }
/* Resume virtqueue ioeventfd processing after drain */ /* Resume virtqueue ioeventfd processing after drain */
static void virtio_blk_drained_end(void *opaque) static void virtio_blk_drained_end(void *opaque)
{ {
VirtIOBlock *s = opaque; VirtIOBlock *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
AioContext *ctx = blk_get_aio_context(s->conf.conf.blk);
if (!s->dataplane || !s->dataplane_started) { if (!s->dataplane || !s->dataplane_started) {
return; return;
} }
for (uint16_t i = 0; i < s->conf.num_queues; i++) { virtio_blk_data_plane_attach(s->dataplane);
VirtQueue *vq = virtio_get_queue(vdev, i);
virtio_queue_aio_attach_host_notifier(vq, ctx);
}
} }
static const BlockDevOps virtio_block_ops = { static const BlockDevOps virtio_block_ops = {
@ -1600,6 +1653,19 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
return; return;
} }
if (conf->iothread_vq_mapping_list) {
if (conf->iothread) {
error_setg(errp, "iothread and iothread-vq-mapping properties "
"cannot be set at the same time");
return;
}
if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
conf->num_queues, errp)) {
return;
}
}
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
s->host_features); s->host_features);
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
@ -1702,6 +1768,8 @@ static Property virtio_blk_properties[] = {
DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
IOThread *), IOThread *),
DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
conf.iothread_vq_mapping_list),
DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
VIRTIO_BLK_F_DISCARD, true), VIRTIO_BLK_F_DISCARD, true),
DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,

View File

@ -21,6 +21,7 @@
#include "sysemu/block-backend.h" #include "sysemu/block-backend.h"
#include "sysemu/block-ram-registrar.h" #include "sysemu/block-ram-registrar.h"
#include "qom/object.h" #include "qom/object.h"
#include "qapi/qapi-types-virtio.h"
#define TYPE_VIRTIO_BLK "virtio-blk-device" #define TYPE_VIRTIO_BLK "virtio-blk-device"
OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK)
@ -37,6 +38,7 @@ struct VirtIOBlkConf
{ {
BlockConf conf; BlockConf conf;
IOThread *iothread; IOThread *iothread;
IOThreadVirtQueueMappingList *iothread_vq_mapping_list;
char *serial; char *serial;
uint32_t request_merging; uint32_t request_merging;
uint16_t num_queues; uint16_t num_queues;