qemu-e2k/hw/block/dataplane/virtio-blk.c

300 lines
8.3 KiB
C
Raw Normal View History

/*
* Dedicated thread for virtio-blk I/O processing
*
* Copyright 2012 IBM, Corp.
* Copyright 2012 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Stefan Hajnoczi <stefanha@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
2016-03-14 09:01:28 +01:00
#include "qapi/error.h"
#include "trace.h"
#include "qemu/iov.h"
#include "qemu/thread.h"
#include "qemu/error-report.h"
#include "hw/virtio/virtio-access.h"
#include "hw/virtio/virtio-blk.h"
#include "virtio-blk.h"
#include "block/aio.h"
#include "hw/virtio/virtio-bus.h"
#include "qom/object_interfaces.h"
struct VirtIOBlockDataPlane {
bool starting;
bool stopping;
VirtIOBlkConf *conf;
VirtIODevice *vdev;
QEMUBH *bh; /* bh for guest notification */
unsigned long *batch_notify_vqs;
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 12:44:59 +01:00
bool batch_notifications;
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
* (because you don't own the file descriptor or handle; you just
* use it).
*/
IOThread *iothread;
AioContext *ctx;
};
/* Raise an interrupt to signal guest, if necessary */
void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
{
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 12:44:59 +01:00
if (s->batch_notifications) {
set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
qemu_bh_schedule(s->bh);
} else {
virtio_notify_irqfd(s->vdev, vq);
}
}
static void notify_guest_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
unsigned nvqs = s->conf->num_queues;
unsigned long bitmap[BITS_TO_LONGS(nvqs)];
unsigned j;
memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
memset(s->batch_notify_vqs, 0, sizeof(bitmap));
for (j = 0; j < nvqs; j += BITS_PER_LONG) {
unsigned long bits = bitmap[j];
while (bits != 0) {
unsigned i = j + ctzl(bits);
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio: set ISR on dataplane notifications Dataplane has been omitting forever the step of setting ISR when an interrupt is raised. This caused little breakage, because the specification actually says that ISR may not be updated in MSI mode. Some versions of the Windows drivers however didn't clear MSI mode correctly, and proceeded using polling mode (using ISR, not the used ring index!) for crashdump and hibernation. If it were just crashdump and hibernation it would not be a big deal, but recent releases of Windows do not really shut down, but rather log out and hibernate to make the next startup faster. Hence, this manifested as a more serious hang during shutdown with e.g. Windows 8.1 and virtio-win 1.8.0 RPMs. Newer versions fixed this, while older versions do not use MSI at all. The failure has always been there for virtio dataplane, but it became visible after commits 9ffe337 ("virtio-blk: always use dataplane path if ioeventfd is active", 2016-10-30) and ad07cd6 ("virtio-scsi: always use dataplane path if ioeventfd is active", 2016-10-30) made virtio-blk and virtio-scsi always use the dataplane code under KVM. The good news therefore is that it was not a bug in the patches---they were doing exactly what they were meant for, i.e. shake out remaining dataplane bugs. The fix is not hard, so it's worth arranging for the broken drivers. The virtio_should_notify+event_notifier_set pair that is common to virtio-blk and virtio-scsi dataplane is replaced with a new public function virtio_notify_irqfd that also sets ISR. The irqfd emulation code now need not set ISR anymore, so virtio_irq is removed. Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Farhan Ali <alifm@linux.vnet.ibm.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
2016-11-18 16:07:02 +01:00
virtio_notify_irqfd(s->vdev, vq);
bits &= bits - 1; /* clear right-most bit */
}
}
}
/* Context: QEMU global mutex held */
bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp)
{
VirtIOBlockDataPlane *s;
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
*dataplane = NULL;
if (conf->iothread) {
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
error_setg(errp,
"device is incompatible with iothread "
"(transport does not support notifiers)");
return false;
}
if (!virtio_device_ioeventfd_enabled(vdev)) {
error_setg(errp, "ioeventfd is required for iothread");
return false;
}
/* If dataplane is (re-)enabled while the guest is running there could
* be block jobs that can conflict.
*/
if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
error_prepend(errp, "cannot start virtio-blk dataplane: ");
return false;
}
}
/* Don't try if transport does not support notifiers. */
if (!virtio_device_ioeventfd_enabled(vdev)) {
return false;
}
s = g_new0(VirtIOBlockDataPlane, 1);
s->vdev = vdev;
s->conf = conf;
if (conf->iothread) {
s->iothread = conf->iothread;
object_ref(OBJECT(s->iothread));
s->ctx = iothread_get_aio_context(s->iothread);
} else {
s->ctx = qemu_get_aio_context();
}
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s;
return true;
}
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
{
VirtIOBlock *vblk;
if (!s) {
return;
}
vblk = VIRTIO_BLK(s->vdev);
assert(!vblk->dataplane_started);
g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
if (s->iothread) {
object_unref(OBJECT(s->iothread));
}
g_free(s);
}
static bool virtio_blk_data_plane_handle_output(VirtIODevice *vdev,
VirtQueue *vq)
{
VirtIOBlock *s = (VirtIOBlock *)vdev;
assert(s->dataplane);
assert(s->dataplane_started);
return virtio_blk_handle_vq(s, vq);
}
/* Context: QEMU global mutex held */
int virtio_blk_data_plane_start(VirtIODevice *vdev)
{
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
VirtIOBlockDataPlane *s = vblk->dataplane;
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
unsigned i;
unsigned nvqs = s->conf->num_queues;
int r;
if (vblk->dataplane_started || s->starting) {
return 0;
}
s->starting = true;
virtio-blk: dataplane: Don't batch notifications if EVENT_IDX is present Commit 5b2ffbe4d99843fd8305c573a100047a8c962327 ("virtio-blk: dataplane: notify guest as a batch") deferred guest notification to a BH in order batch notifications, with purpose of avoiding flooding the guest with interruptions. This optimization came with a cost. The average latency perceived in the guest is increased by a few microseconds, but also when multiple IO operations finish at the same time, the guest won't be notified until all completions from each operation has been run. On the contrary, virtio-scsi issues the notification at the end of each completion. On the other hand, nowadays we have the EVENT_IDX feature that allows a better coordination between QEMU and the Guest OS to avoid sending unnecessary interruptions. With this change, virtio-blk/dataplane only batches notifications if the EVENT_IDX feature is not present. Some numbers obtained with fio (ioengine=sync, iodepth=1, direct=1): - Test specs: * fio-3.4 (ioengine=sync, iodepth=1, direct=1) * qemu master * virtio-blk with a dedicated iothread (default poll-max-ns) * backend: null_blk nr_devices=1 irqmode=2 completion_nsec=280000 * 8 vCPUs pinned to isolated physical cores * Emulator and iothread also pinned to separate isolated cores * variance between runs < 1% - Not patched * numjobs=1: lat_avg=327.32 irqs=29998 * numjobs=4: lat_avg=337.89 irqs=29073 * numjobs=8: lat_avg=342.98 irqs=28643 - Patched: * numjobs=1: lat_avg=323.92 irqs=30262 * numjobs=4: lat_avg=332.65 irqs=29520 * numjobs=8: lat_avg=335.54 irqs=29323 Signed-off-by: Sergio Lopez <slp@redhat.com> Message-id: 20180307114459.26636-1-slp@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-03-07 12:44:59 +01:00
if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
s->batch_notifications = true;
} else {
s->batch_notifications = false;
}
/* Set up guest notifier (irq) */
r = k->set_guest_notifiers(qbus->parent, nvqs, true);
if (r != 0) {
error_report("virtio-blk failed to set guest notifier (%d), "
"ensure -accel kvm is set.", r);
goto fail_guest_notifiers;
}
/* Set up virtqueue notify */
for (i = 0; i < nvqs; i++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
if (r != 0) {
fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
while (i--) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
goto fail_guest_notifiers;
}
}
s->starting = false;
vblk->dataplane_started = true;
trace_virtio_blk_data_plane_start(s);
blk_set_aio_context(s->conf->conf.blk, s->ctx);
/* Kick right away to begin processing requests already in vring */
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
event_notifier_set(virtio_queue_get_host_notifier(vq));
}
/* Get this show started by hooking up our callbacks */
aio_context_acquire(s->ctx);
for (i = 0; i < nvqs; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
virtio_blk_data_plane_handle_output);
}
aio_context_release(s->ctx);
return 0;
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
vblk->dataplane_started = true;
return -ENOSYS;
}
/* Stop notifications for new requests from guest.
*
* Context: BH in IOThread
*/
static void virtio_blk_data_plane_stop_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
unsigned i;
for (i = 0; i < s->conf->num_queues; i++) {
VirtQueue *vq = virtio_get_queue(s->vdev, i);
virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
}
}
/* Context: QEMU global mutex held */
void virtio_blk_data_plane_stop(VirtIODevice *vdev)
{
VirtIOBlock *vblk = VIRTIO_BLK(vdev);
VirtIOBlockDataPlane *s = vblk->dataplane;
BusState *qbus = qdev_get_parent_bus(DEVICE(vblk));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
unsigned i;
unsigned nvqs = s->conf->num_queues;
if (!vblk->dataplane_started || s->stopping) {
return;
}
/* Better luck next time. */
if (vblk->dataplane_disabled) {
vblk->dataplane_disabled = false;
vblk->dataplane_started = false;
return;
}
s->stopping = true;
trace_virtio_blk_data_plane_stop(s);
aio_context_acquire(s->ctx);
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
/* Drain and switch bs back to the QEMU main loop */
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
aio_context_release(s->ctx);
for (i = 0; i < nvqs; i++) {
virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
}
/* Clean up guest notifier (irq) */
k->set_guest_notifiers(qbus->parent, nvqs, false);
vblk->dataplane_started = false;
s->stopping = false;
}