f6fe3e333f
Next patches will register the vhost_vdpa memory listener while the VM is migrating at the destination, so we can map the memory to the device before stopping the VM at the source. The main goal is to reduce the downtime. However, the destination QEMU is unaware of which vhost_vdpa device will register its memory_listener. If the source guest has CVQ enabled, it will be the CVQ device. Otherwise, it will be the first one. Move the memory listener to a common place rather than always in the first / last vhost_vdpa. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-14-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
87 lines
2.3 KiB
C
87 lines
2.3 KiB
C
/*
|
|
* vhost-vdpa.h
|
|
*
|
|
* Copyright(c) 2017-2018 Intel Corporation.
|
|
* Copyright(c) 2020 Red Hat, Inc.
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
* See the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
|
|
#ifndef HW_VIRTIO_VHOST_VDPA_H
|
|
#define HW_VIRTIO_VHOST_VDPA_H
|
|
|
|
#include <gmodule.h>
|
|
|
|
#include "hw/virtio/vhost-iova-tree.h"
|
|
#include "hw/virtio/vhost-shadow-virtqueue.h"
|
|
#include "hw/virtio/virtio.h"
|
|
#include "standard-headers/linux/vhost_types.h"
|
|
|
|
/*
|
|
* ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
|
|
* qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
|
|
*/
|
|
#define VHOST_VDPA_GUEST_PA_ASID 0
|
|
|
|
typedef struct VhostVDPAHostNotifier {
|
|
MemoryRegion mr;
|
|
void *addr;
|
|
} VhostVDPAHostNotifier;
|
|
|
|
/* Info shared by all vhost_vdpa device models */
|
|
typedef struct vhost_vdpa_shared {
|
|
int device_fd;
|
|
MemoryListener listener;
|
|
struct vhost_vdpa_iova_range iova_range;
|
|
QLIST_HEAD(, vdpa_iommu) iommu_list;
|
|
|
|
/* IOVA mapping used by the Shadow Virtqueue */
|
|
VhostIOVATree *iova_tree;
|
|
|
|
/* Copy of backend features */
|
|
uint64_t backend_cap;
|
|
|
|
bool iotlb_batch_begin_sent;
|
|
|
|
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
|
|
bool shadow_data;
|
|
} VhostVDPAShared;
|
|
|
|
typedef struct vhost_vdpa {
|
|
int index;
|
|
uint32_t address_space_id;
|
|
uint64_t acked_features;
|
|
bool shadow_vqs_enabled;
|
|
/* Device suspended successfully */
|
|
bool suspended;
|
|
VhostVDPAShared *shared;
|
|
GPtrArray *shadow_vqs;
|
|
const VhostShadowVirtqueueOps *shadow_vq_ops;
|
|
void *shadow_vq_ops_opaque;
|
|
struct vhost_dev *dev;
|
|
Error *migration_blocker;
|
|
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
|
|
IOMMUNotifier n;
|
|
} VhostVDPA;
|
|
|
|
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
|
|
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
|
|
|
|
int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
|
hwaddr size, void *vaddr, bool readonly);
|
|
int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
|
hwaddr size);
|
|
|
|
typedef struct vdpa_iommu {
|
|
VhostVDPAShared *dev_shared;
|
|
IOMMUMemoryRegion *iommu_mr;
|
|
hwaddr iommu_offset;
|
|
IOMMUNotifier n;
|
|
QLIST_ENTRY(vdpa_iommu) iommu_next;
|
|
} VDPAIOMMUState;
|
|
|
|
|
|
#endif
|