vhost: Add Shadow VirtQueue kick forwarding capabilities

At this mode no buffer forwarding will be performed in SVQ mode: Qemu
will just forward the guest's kicks to the device.

Host memory notifiers regions are left out for simplicity, and they will
not be addressed in this series.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eugenio Pérez 2022-03-14 18:34:42 +01:00 committed by Jason Wang
parent 10857ec0ad
commit dff4426fa6
4 changed files with 215 additions and 2 deletions

View File

@ -11,6 +11,59 @@
#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-shadow-virtqueue.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "linux-headers/linux/vhost.h"
/**
* Forward guest notifications.
*
* @n: guest kick event notifier, the one that guest set to notify svq.
*/
static void vhost_handle_guest_kick(EventNotifier *n)
{
VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick);
event_notifier_test_and_clear(n);
event_notifier_set(&svq->hdev_kick);
}
/**
* Set a new file descriptor for the guest to kick the SVQ and notify for avail
*
* @svq: The svq
* @svq_kick_fd: The svq kick fd
*
* Note that the SVQ will never close the old file descriptor.
*/
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
{
EventNotifier *svq_kick = &svq->svq_kick;
bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick);
bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND;
if (poll_stop) {
event_notifier_set_handler(svq_kick, NULL);
}
/*
* event_notifier_set_handler already checks for guest's notifications if
* they arrive at the new file descriptor in the switch, so there is no
* need to explicitly check for them.
*/
if (poll_start) {
event_notifier_init_fd(svq_kick, svq_kick_fd);
event_notifier_set(svq_kick);
event_notifier_set_handler(svq_kick, vhost_handle_guest_kick);
}
}
/**
* Stop the shadow virtqueue operation.
* @svq: Shadow Virtqueue
*/
void vhost_svq_stop(VhostShadowVirtqueue *svq)
{
event_notifier_set_handler(&svq->svq_kick, NULL);
}
/** /**
* Creates vhost shadow virtqueue, and instructs the vhost device to use the * Creates vhost shadow virtqueue, and instructs the vhost device to use the
@ -39,6 +92,7 @@ VhostShadowVirtqueue *vhost_svq_new(void)
goto err_init_hdev_call; goto err_init_hdev_call;
} }
event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND);
return g_steal_pointer(&svq); return g_steal_pointer(&svq);
err_init_hdev_call: err_init_hdev_call:
@ -56,6 +110,7 @@ err_init_hdev_kick:
void vhost_svq_free(gpointer pvq) void vhost_svq_free(gpointer pvq)
{ {
VhostShadowVirtqueue *vq = pvq; VhostShadowVirtqueue *vq = pvq;
vhost_svq_stop(vq);
event_notifier_cleanup(&vq->hdev_kick); event_notifier_cleanup(&vq->hdev_kick);
event_notifier_cleanup(&vq->hdev_call); event_notifier_cleanup(&vq->hdev_call);
g_free(vq); g_free(vq);

View File

@ -18,8 +18,22 @@ typedef struct VhostShadowVirtqueue {
EventNotifier hdev_kick; EventNotifier hdev_kick;
/* Shadow call notifier, sent to vhost */ /* Shadow call notifier, sent to vhost */
EventNotifier hdev_call; EventNotifier hdev_call;
/*
* Borrowed virtqueue's guest to host notifier. To borrow it in this event
* notifier allows to recover the VhostShadowVirtqueue from the event loop
* easily. If we use the VirtQueue's one, we don't have an easy way to
* retrieve VhostShadowVirtqueue.
*
* So shadow virtqueue must not clean it, or we would lose VirtQueue one.
*/
EventNotifier svq_kick;
} VhostShadowVirtqueue; } VhostShadowVirtqueue;
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
void vhost_svq_stop(VhostShadowVirtqueue *svq);
VhostShadowVirtqueue *vhost_svq_new(void); VhostShadowVirtqueue *vhost_svq_new(void);
void vhost_svq_free(gpointer vq); void vhost_svq_free(gpointer vq);

View File

@ -17,12 +17,14 @@
#include "hw/virtio/vhost.h" #include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-backend.h" #include "hw/virtio/vhost-backend.h"
#include "hw/virtio/virtio-net.h" #include "hw/virtio/virtio-net.h"
#include "hw/virtio/vhost-shadow-virtqueue.h"
#include "hw/virtio/vhost-vdpa.h" #include "hw/virtio/vhost-vdpa.h"
#include "exec/address-spaces.h" #include "exec/address-spaces.h"
#include "qemu/main-loop.h" #include "qemu/main-loop.h"
#include "cpu.h" #include "cpu.h"
#include "trace.h" #include "trace.h"
#include "qemu-common.h" #include "qemu-common.h"
#include "qapi/error.h"
/* /*
* Return one past the end of the end of section. Be careful with uint64_t * Return one past the end of the end of section. Be careful with uint64_t
@ -342,6 +344,30 @@ static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
return v->index != 0; return v->index != 0;
} }
static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
Error **errp)
{
g_autoptr(GPtrArray) shadow_vqs = NULL;
if (!v->shadow_vqs_enabled) {
return 0;
}
shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
for (unsigned n = 0; n < hdev->nvqs; ++n) {
g_autoptr(VhostShadowVirtqueue) svq = vhost_svq_new();
if (unlikely(!svq)) {
error_setg(errp, "Cannot create svq %u", n);
return -1;
}
g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
}
v->shadow_vqs = g_steal_pointer(&shadow_vqs);
return 0;
}
static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
{ {
struct vhost_vdpa *v; struct vhost_vdpa *v;
@ -364,6 +390,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
dev->opaque = opaque ; dev->opaque = opaque ;
v->listener = vhost_vdpa_memory_listener; v->listener = vhost_vdpa_memory_listener;
v->msg_type = VHOST_IOTLB_MSG_V2; v->msg_type = VHOST_IOTLB_MSG_V2;
ret = vhost_vdpa_init_svq(dev, v, errp);
if (ret) {
goto err;
}
vhost_vdpa_get_iova_range(v); vhost_vdpa_get_iova_range(v);
@ -375,6 +405,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
VIRTIO_CONFIG_S_DRIVER); VIRTIO_CONFIG_S_DRIVER);
return 0; return 0;
err:
ram_block_discard_disable(false);
return ret;
} }
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
@ -445,8 +479,14 @@ static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
{ {
struct vhost_vdpa *v = dev->opaque;
int i; int i;
if (v->shadow_vqs_enabled) {
/* FIXME SVQ is not compatible with host notifiers mr */
return;
}
for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
if (vhost_vdpa_host_notifier_init(dev, i)) { if (vhost_vdpa_host_notifier_init(dev, i)) {
goto err; goto err;
@ -460,6 +500,21 @@ err:
return; return;
} }
static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
size_t idx;
if (!v->shadow_vqs) {
return;
}
for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
}
g_ptr_array_free(v->shadow_vqs, true);
}
static int vhost_vdpa_cleanup(struct vhost_dev *dev) static int vhost_vdpa_cleanup(struct vhost_dev *dev)
{ {
struct vhost_vdpa *v; struct vhost_vdpa *v;
@ -468,6 +523,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
trace_vhost_vdpa_cleanup(dev, v); trace_vhost_vdpa_cleanup(dev, v);
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
memory_listener_unregister(&v->listener); memory_listener_unregister(&v->listener);
vhost_vdpa_svq_cleanup(dev);
dev->opaque = NULL; dev->opaque = NULL;
ram_block_discard_disable(false); ram_block_discard_disable(false);
@ -559,11 +615,26 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
return ret; return ret;
} }
static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
{
if (!v->shadow_vqs_enabled) {
return;
}
for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
vhost_svq_stop(svq);
}
}
static int vhost_vdpa_reset_device(struct vhost_dev *dev) static int vhost_vdpa_reset_device(struct vhost_dev *dev)
{ {
struct vhost_vdpa *v = dev->opaque;
int ret; int ret;
uint8_t status = 0; uint8_t status = 0;
vhost_vdpa_reset_svq(v);
ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
trace_vhost_vdpa_reset_device(dev, status); trace_vhost_vdpa_reset_device(dev, status);
return ret; return ret;
@ -647,13 +718,74 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
return ret; return ret;
} }
static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
struct vhost_vring_file *file)
{
trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
}
/**
* Set the shadow virtqueue descriptors to the device
*
* @dev: The vhost device model
* @svq: The shadow virtqueue
* @idx: The index of the virtqueue in the vhost device
* @errp: Error
*/
static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
VhostShadowVirtqueue *svq, unsigned idx,
Error **errp)
{
struct vhost_vring_file file = {
.index = dev->vq_index + idx,
};
const EventNotifier *event_notifier = &svq->hdev_kick;
int r;
file.fd = event_notifier_get_fd(event_notifier);
r = vhost_vdpa_set_vring_dev_kick(dev, &file);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Can't set device kick fd");
}
return r == 0;
}
static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
{
struct vhost_vdpa *v = dev->opaque;
Error *err = NULL;
unsigned i;
if (!v->shadow_vqs) {
return true;
}
for (i = 0; i < v->shadow_vqs->len; ++i) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
if (unlikely(!ok)) {
error_reportf_err(err, "Cannot setup SVQ %u: ", i);
return false;
}
}
return true;
}
static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
{ {
struct vhost_vdpa *v = dev->opaque; struct vhost_vdpa *v = dev->opaque;
bool ok;
trace_vhost_vdpa_dev_start(dev, started); trace_vhost_vdpa_dev_start(dev, started);
if (started) { if (started) {
vhost_vdpa_host_notifiers_init(dev); vhost_vdpa_host_notifiers_init(dev);
ok = vhost_vdpa_svqs_start(dev);
if (unlikely(!ok)) {
return -1;
}
vhost_vdpa_set_vring_ready(dev); vhost_vdpa_set_vring_ready(dev);
} else { } else {
vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
@ -725,8 +857,16 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
struct vhost_vring_file *file) struct vhost_vring_file *file)
{ {
trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); struct vhost_vdpa *v = dev->opaque;
return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); int vdpa_idx = file->index - dev->vq_index;
if (v->shadow_vqs_enabled) {
VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
vhost_svq_set_svq_kick_fd(svq, file->fd);
return 0;
} else {
return vhost_vdpa_set_vring_dev_kick(dev, file);
}
} }
static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,

View File

@ -12,6 +12,8 @@
#ifndef HW_VIRTIO_VHOST_VDPA_H #ifndef HW_VIRTIO_VHOST_VDPA_H
#define HW_VIRTIO_VHOST_VDPA_H #define HW_VIRTIO_VHOST_VDPA_H
#include <gmodule.h>
#include "hw/virtio/virtio.h" #include "hw/virtio/virtio.h"
#include "standard-headers/linux/vhost_types.h" #include "standard-headers/linux/vhost_types.h"
@ -27,6 +29,8 @@ typedef struct vhost_vdpa {
bool iotlb_batch_begin_sent; bool iotlb_batch_begin_sent;
MemoryListener listener; MemoryListener listener;
struct vhost_vdpa_iova_range iova_range; struct vhost_vdpa_iova_range iova_range;
bool shadow_vqs_enabled;
GPtrArray *shadow_vqs;
struct vhost_dev *dev; struct vhost_dev *dev;
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
} VhostVDPA; } VhostVDPA;