virtio: add MemoryListener to cache ring translations

The cached translations are RCU-protected to allow efficient use
when processing virtqueues.

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Paolo Bonzini 2017-01-27 16:40:17 +01:00 committed by Michael S. Tsirkin
parent 5eba0404b9
commit c611c76417
2 changed files with 103 additions and 3 deletions

View File

@ -60,6 +60,13 @@ typedef struct VRingUsed
VRingUsedElem ring[0];
} VRingUsed;
typedef struct VRingMemoryRegionCaches {
struct rcu_head rcu;
MemoryRegionCache desc;
MemoryRegionCache avail;
MemoryRegionCache used;
} VRingMemoryRegionCaches;
typedef struct VRing
{
unsigned int num;
@ -68,6 +75,7 @@ typedef struct VRing
hwaddr desc;
hwaddr avail;
hwaddr used;
VRingMemoryRegionCaches *caches;
} VRing;
struct VirtQueue
@ -104,6 +112,51 @@ struct VirtQueue
QLIST_ENTRY(VirtQueue) node;
};
static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
{
if (!caches) {
return;
}
address_space_cache_destroy(&caches->desc);
address_space_cache_destroy(&caches->avail);
address_space_cache_destroy(&caches->used);
g_free(caches);
}
static void virtio_init_region_cache(VirtIODevice *vdev, int n)
{
VirtQueue *vq = &vdev->vq[n];
VRingMemoryRegionCaches *old = vq->vring.caches;
VRingMemoryRegionCaches *new;
hwaddr addr, size;
int event_size;
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
addr = vq->vring.desc;
if (!addr) {
return;
}
new = g_new0(VRingMemoryRegionCaches, 1);
size = virtio_queue_get_desc_size(vdev, n);
address_space_cache_init(&new->desc, vdev->dma_as,
addr, size, false);
size = virtio_queue_get_used_size(vdev, n) + event_size;
address_space_cache_init(&new->used, vdev->dma_as,
vq->vring.used, size, true);
size = virtio_queue_get_avail_size(vdev, n) + event_size;
address_space_cache_init(&new->avail, vdev->dma_as,
vq->vring.avail, size, false);
atomic_rcu_set(&vq->vring.caches, new);
if (old) {
call_rcu(old, virtio_free_region_cache, rcu);
}
}
/* virt queue functions */
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
@ -117,6 +170,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
virtio_init_region_cache(vdev, n);
}
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
@ -1264,6 +1318,7 @@ void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
virtio_init_region_cache(vdev, n);
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
@ -1984,9 +2039,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
void virtio_cleanup(VirtIODevice *vdev)
{
qemu_del_vm_change_state_handler(vdev->vmstate);
g_free(vdev->config);
g_free(vdev->vq);
g_free(vdev->vector_queues);
}
static void virtio_vmstate_change(void *opaque, int running, RunState state)
@ -2248,6 +2300,19 @@ void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
}
}
static void virtio_memory_listener_commit(MemoryListener *listener)
{
VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
int i;
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
if (vdev->vq[i].vring.num == 0) {
break;
}
virtio_init_region_cache(vdev, i);
}
}
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@ -2270,6 +2335,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err);
return;
}
vdev->listener.commit = virtio_memory_listener_commit;
memory_listener_register(&vdev->listener, vdev->dma_as);
}
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
@ -2292,6 +2360,36 @@ static void virtio_device_unrealize(DeviceState *dev, Error **errp)
vdev->bus_name = NULL;
}
static void virtio_device_free_virtqueues(VirtIODevice *vdev)
{
int i;
if (!vdev->vq) {
return;
}
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
VRingMemoryRegionCaches *caches;
if (vdev->vq[i].vring.num == 0) {
break;
}
caches = atomic_read(&vdev->vq[i].vring.caches);
atomic_set(&vdev->vq[i].vring.caches, NULL);
virtio_free_region_cache(caches);
}
g_free(vdev->vq);
}
static void virtio_device_instance_finalize(Object *obj)
{
VirtIODevice *vdev = VIRTIO_DEVICE(obj);
memory_listener_unregister(&vdev->listener);
virtio_device_free_virtqueues(vdev);
g_free(vdev->config);
g_free(vdev->vector_queues);
}
static Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_END_OF_LIST(),
@ -2418,6 +2516,7 @@ static const TypeInfo virtio_device_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIODevice),
.class_init = virtio_device_class_init,
.instance_finalize = virtio_device_instance_finalize,
.abstract = true,
.class_size = sizeof(VirtioDeviceClass),
};

View File

@ -85,6 +85,7 @@ struct VirtIODevice
uint32_t generation;
int nvectors;
VirtQueue *vq;
MemoryListener listener;
uint16_t device_id;
bool vm_running;
bool broken; /* device in invalid state, needs reset */