virtio-pci: decouple the single vector from the interrupt process

To reuse the interrupt process in configure interrupt
Need to decouple the single vector from the interrupt process. Add new function
kvm_virtio_pci_vector_use_one and _release_one. These functions are use
for the single vector, the whole process will finish in a loop for the vq number.

Signed-off-by: Cindy Lu <lulu@redhat.com>
Message-Id: <20211104164827.21911-4-lulu@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Cindy Lu 2021-11-05 00:48:20 +08:00 committed by Michael S. Tsirkin
parent e3480ef81f
commit 316011b8a7
1 changed files with 73 additions and 58 deletions

View File

@ -677,7 +677,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
} }
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
unsigned int queue_no,
unsigned int vector) unsigned int vector)
{ {
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@ -740,87 +739,103 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
return 0; return 0;
} }
static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
{ {
unsigned int vector;
int ret;
EventNotifier *n;
PCIDevice *dev = &proxy->pci_dev; PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
unsigned int vector;
int ret, queue_no; ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
EventNotifier *n; if (ret < 0) {
for (queue_no = 0; queue_no < nvqs; queue_no++) { return ret;
if (!virtio_queue_get_num(vdev, queue_no)) { }
break; if (vector >= msix_nr_vectors_allocated(dev)) {
} return 0;
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); }
if (ret < 0) { ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
break; if (ret < 0) {
} goto undo;
if (vector >= msix_nr_vectors_allocated(dev)) { }
continue; /*
} * If guest supports masking, set up irqfd now.
ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); * Otherwise, delay until unmasked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) { if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo; goto undo;
} }
/* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo;
}
}
} }
return 0;
return 0;
undo: undo:
while (--queue_no >= 0) {
vector = virtio_queue_vector(vdev, queue_no); vector = virtio_queue_vector(vdev, queue_no);
if (vector >= msix_nr_vectors_allocated(dev)) { if (vector >= msix_nr_vectors_allocated(dev)) {
continue; return ret;
}
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
if (ret < 0) {
return ret;
} }
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { kvm_virtio_pci_irqfd_release(proxy, n, vector);
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); }
if (ret < 0) { return ret;
break; }
} static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
kvm_virtio_pci_irqfd_release(proxy, n, vector); {
int queue_no;
int ret = 0;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
return -1;
} }
kvm_virtio_pci_vq_vector_release(proxy, vector); ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
} }
return ret; return ret;
} }
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
int queue_no)
{ {
PCIDevice *dev = &proxy->pci_dev;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
unsigned int vector; unsigned int vector;
int queue_no;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
EventNotifier *n; EventNotifier *n;
int ret ; int ret;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
PCIDevice *dev = &proxy->pci_dev;
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector);
if (ret < 0) {
return;
}
if (vector >= msix_nr_vectors_allocated(dev)) {
return;
}
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
}
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
int queue_no;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
for (queue_no = 0; queue_no < nvqs; queue_no++) { for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) { if (!virtio_queue_get_num(vdev, queue_no)) {
break; break;
} }
ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); kvm_virtio_pci_vector_release_one(proxy, queue_no);
if (ret < 0) {
break;
}
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
/* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
} }
} }