virtio-scsi: don't waste CPU polling the event virtqueue

The virtio-scsi event virtqueue is not emptied by its handler function.
This is typical for rx virtqueues where the device uses buffers when
some event occurs (e.g. a packet is received, an error condition
happens, etc).

Polling non-empty virtqueues wastes CPU cycles. We are not waiting for
new buffers to become available, we are waiting for an event to occur,
so it's a misuse of CPU resources to poll for buffers.

Introduce the new virtio_queue_aio_attach_host_notifier_no_poll() API,
which is identical to virtio_queue_aio_attach_host_notifier() except
that it does not poll the virtqueue.

Before this patch the following command-line consumed 100% CPU in the
IOThread polling and calling virtio_scsi_handle_event():

  $ qemu-system-x86_64 -M accel=kvm -m 1G -cpu host \
      --object iothread,id=iothread0 \
      --device virtio-scsi-pci,iothread=iothread0 \
      --blockdev file,filename=test.img,aio=native,cache.direct=on,node-name=drive0 \
      --device scsi-hd,drive=drive0

After this patch CPU is no longer wasted.

Reported-by: Nir Soffer <nsoffer@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Tested-by: Nir Soffer <nsoffer@redhat.com>
Message-id: 20220427143541.119567-3-stefanha@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2022-04-27 15:35:37 +01:00
parent 2f743ef636
commit 38738f7dbb
3 changed files with 15 additions and 1 deletions

View File

@ -138,7 +138,7 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev)
aio_context_acquire(s->ctx); aio_context_acquire(s->ctx);
virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx);
virtio_queue_aio_attach_host_notifier(vs->event_vq, s->ctx); virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx);
for (i = 0; i < vs->conf.num_queues; i++) { for (i = 0; i < vs->conf.num_queues; i++) {
virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx);

View File

@ -3534,6 +3534,19 @@ void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx)
virtio_queue_host_notifier_aio_poll_end); virtio_queue_host_notifier_aio_poll_end);
} }
/*
* Same as virtio_queue_aio_attach_host_notifier() but without polling. Use
* this for rx virtqueues and similar cases where the virtqueue handler
* function does not pop all elements. When the virtqueue is left non-empty
* polling consumes CPU cycles and should not be used.
*/
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx)
{
aio_set_event_notifier(ctx, &vq->host_notifier, true,
virtio_queue_host_notifier_read,
NULL, NULL);
}
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx) void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx)
{ {
aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL); aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL, NULL);

View File

@ -317,6 +317,7 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled); void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
void virtio_queue_host_notifier_read(EventNotifier *n); void virtio_queue_host_notifier_read(EventNotifier *n);
void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx); void virtio_queue_aio_attach_host_notifier(VirtQueue *vq, AioContext *ctx);
void virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ctx);
void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx); void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector); VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
VirtQueue *virtio_vector_next_queue(VirtQueue *vq); VirtQueue *virtio_vector_next_queue(VirtQueue *vq);