virtio: change set guest notifier to per-device

When using irqfd with vhost-net to inject interrupts,
a single evenfd might inject multiple interrupts.
Implementing this is much easier with a single
per-device callback to set guest notifiers.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2010-10-06 15:20:17 +02:00
parent 010ec62934
commit 54dd932128
3 changed files with 57 additions and 26 deletions

View File

@ -454,11 +454,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
};
struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
if (!vdev->binding->set_guest_notifier) {
fprintf(stderr, "binding does not support guest notifiers\n");
return -ENOSYS;
}
if (!vdev->binding->set_host_notifier) {
fprintf(stderr, "binding does not support host notifiers\n");
return -ENOSYS;
@ -511,12 +506,6 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
r = -errno;
goto fail_alloc;
}
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_guest_notifier;
}
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, true);
if (r < 0) {
fprintf(stderr, "Error binding host notifier: %d\n", -r);
@ -541,8 +530,6 @@ fail_call:
fail_kick:
vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
fail_host_notifier:
vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
fail_guest_notifier:
fail_alloc:
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
0, 0);
@ -568,13 +555,6 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
.index = idx,
};
int r;
r = vdev->binding->set_guest_notifier(vdev->binding_opaque, idx, false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d guest cleanup failed: %d\n", idx, r);
fflush(stderr);
}
assert (r >= 0);
r = vdev->binding->set_host_notifier(vdev->binding_opaque, idx, false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d host cleanup failed: %d\n", idx, r);
@ -647,15 +627,26 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i, r;
if (!vdev->binding->set_guest_notifiers) {
fprintf(stderr, "binding does not support guest notifiers\n");
r = -ENOSYS;
goto fail;
}
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, true);
if (r < 0) {
fprintf(stderr, "Error binding guest notifier: %d\n", -r);
goto fail_notifiers;
}
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
goto fail;
goto fail_features;
}
r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem);
if (r < 0) {
r = -errno;
goto fail;
goto fail_mem;
}
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_init(hdev,
@ -675,13 +666,14 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
(uint64_t)(unsigned long)hdev->log);
if (r < 0) {
r = -errno;
goto fail_vq;
goto fail_log;
}
}
hdev->started = true;
return 0;
fail_log:
fail_vq:
while (--i >= 0) {
vhost_virtqueue_cleanup(hdev,
@ -689,13 +681,18 @@ fail_vq:
hdev->vqs + i,
i);
}
fail_mem:
fail_features:
vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
fail_notifiers:
fail:
return r;
}
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
{
int i;
int i, r;
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev,
vdev,
@ -704,6 +701,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
}
vhost_client_sync_dirty_bitmap(&hdev->client, 0,
(target_phys_addr_t)~0x0ull);
r = vdev->binding->set_guest_notifiers(vdev->binding_opaque, false);
if (r < 0) {
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
fflush(stderr);
}
assert (r >= 0);
hdev->started = false;
qemu_free(hdev->log);
hdev->log_size = 0;

View File

@ -451,6 +451,33 @@ static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
return 0;
}
static int virtio_pci_set_guest_notifiers(void *opaque, bool assign)
{
VirtIOPCIProxy *proxy = opaque;
VirtIODevice *vdev = proxy->vdev;
int r, n;
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
break;
}
r = virtio_pci_set_guest_notifier(opaque, n, assign);
if (r < 0) {
goto assign_error;
}
}
return 0;
assign_error:
/* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
while (--n >= 0) {
virtio_pci_set_guest_notifier(opaque, n, !assign);
}
return r;
}
static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
{
VirtIOPCIProxy *proxy = opaque;
@ -488,7 +515,7 @@ static const VirtIOBindings virtio_pci_bindings = {
.load_queue = virtio_pci_load_queue,
.get_features = virtio_pci_get_features,
.set_host_notifier = virtio_pci_set_host_notifier,
.set_guest_notifier = virtio_pci_set_guest_notifier,
.set_guest_notifiers = virtio_pci_set_guest_notifiers,
};
static void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev,

View File

@ -93,7 +93,7 @@ typedef struct {
int (*load_config)(void * opaque, QEMUFile *f);
int (*load_queue)(void * opaque, int n, QEMUFile *f);
unsigned (*get_features)(void * opaque);
int (*set_guest_notifier)(void * opaque, int n, bool assigned);
int (*set_guest_notifiers)(void * opaque, bool assigned);
int (*set_host_notifier)(void * opaque, int n, bool assigned);
} VirtIOBindings;