virtio-iommu: Implement RESV_MEM probe request

This patch implements the PROBE request. At the moment,
only THE RESV_MEM property is handled. The first goal is
to report iommu wide reserved regions such as the MSI regions
set by the machine code. On x86 this will be the IOAPIC MSI
region, [0xFEE00000 - 0xFEEFFFFF], on ARM this may be the ITS
doorbell.

In the future we may introduce per device reserved regions.
This will be useful when protecting host assigned devices
which may expose their own reserved regions

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Message-id: 20200629070404.10969-3-eric.auger@redhat.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Eric Auger 2020-07-03 16:59:42 +01:00 committed by Peter Maydell
parent f78069253c
commit 1733eebb9e
3 changed files with 93 additions and 4 deletions

View File

@ -74,3 +74,4 @@ virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d"
virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d"
virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64
virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64

View File

@ -38,6 +38,7 @@
/* Max size */
#define VIOMMU_DEFAULT_QUEUE_SIZE 256
#define VIOMMU_PROBE_SIZE 512
typedef struct VirtIOIOMMUDomain {
uint32_t id;
@ -378,6 +379,65 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s,
return ret;
}
static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep,
uint8_t *buf, size_t free)
{
struct virtio_iommu_probe_resv_mem prop = {};
size_t size = sizeof(prop), length = size - sizeof(prop.head), total;
int i;
total = size * s->nb_reserved_regions;
if (total > free) {
return -ENOSPC;
}
for (i = 0; i < s->nb_reserved_regions; i++) {
unsigned subtype = s->reserved_regions[i].type;
assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED ||
subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI);
prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM);
prop.head.length = cpu_to_le16(length);
prop.subtype = subtype;
prop.start = cpu_to_le64(s->reserved_regions[i].low);
prop.end = cpu_to_le64(s->reserved_regions[i].high);
memcpy(buf, &prop, size);
trace_virtio_iommu_fill_resv_property(ep, prop.subtype,
prop.start, prop.end);
buf += size;
}
return total;
}
/**
* virtio_iommu_probe - Fill the probe request buffer with
* the properties the device is able to return
*/
static int virtio_iommu_probe(VirtIOIOMMU *s,
struct virtio_iommu_req_probe *req,
uint8_t *buf)
{
uint32_t ep_id = le32_to_cpu(req->endpoint);
size_t free = VIOMMU_PROBE_SIZE;
ssize_t count;
if (!virtio_iommu_mr(s, ep_id)) {
return VIRTIO_IOMMU_S_NOENT;
}
count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free);
if (count < 0) {
return VIRTIO_IOMMU_S_INVAL;
}
buf += count;
free -= count;
return VIRTIO_IOMMU_S_OK;
}
static int virtio_iommu_iov_to_req(struct iovec *iov,
unsigned int iov_cnt,
void *req, size_t req_sz)
@ -407,15 +467,27 @@ virtio_iommu_handle_req(detach)
virtio_iommu_handle_req(map)
virtio_iommu_handle_req(unmap)
static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
struct iovec *iov,
unsigned int iov_cnt,
uint8_t *buf)
{
struct virtio_iommu_req_probe req;
int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req));
return ret ? ret : virtio_iommu_probe(s, &req, buf);
}
static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
struct virtio_iommu_req_head head;
struct virtio_iommu_req_tail tail = {};
size_t output_size = sizeof(tail), sz;
VirtQueueElement *elem;
unsigned int iov_cnt;
struct iovec *iov;
size_t sz;
void *buf = NULL;
for (;;) {
elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
@ -452,6 +524,17 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
case VIRTIO_IOMMU_T_UNMAP:
tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
break;
case VIRTIO_IOMMU_T_PROBE:
{
struct virtio_iommu_req_tail *ptail;
output_size = s->config.probe_size + sizeof(tail);
buf = g_malloc0(output_size);
ptail = (struct virtio_iommu_req_tail *)
(buf + s->config.probe_size);
ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
}
default:
tail.status = VIRTIO_IOMMU_S_UNSUPP;
}
@ -459,12 +542,13 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
out:
sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
&tail, sizeof(tail));
assert(sz == sizeof(tail));
buf ? buf : &tail, output_size);
assert(sz == output_size);
virtqueue_push(vq, elem, sizeof(tail));
virtqueue_push(vq, elem, sz);
virtio_notify(vdev, vq);
g_free(elem);
g_free(buf);
}
}
@ -667,6 +751,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
s->config.page_size_mask = TARGET_PAGE_MASK;
s->config.input_range.end = -1UL;
s->config.domain_range.end = 32;
s->config.probe_size = VIOMMU_PROBE_SIZE;
virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
@ -676,6 +761,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS);
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE);
qemu_mutex_init(&s->mutex);

View File

@ -53,6 +53,8 @@ typedef struct VirtIOIOMMU {
GHashTable *as_by_busptr;
IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX];
PCIBus *primary_bus;
ReservedRegion *reserved_regions;
uint32_t nb_reserved_regions;
GTree *domains;
QemuMutex mutex;
GTree *endpoints;