util/vfio-helpers: Pass Error handle to qemu_vfio_dma_map()
Currently qemu_vfio_dma_map() displays errors on stderr. When using management interface, this information is simply lost. Pass qemu_vfio_dma_map() an Error** handle so it can propagate the error to callers. Reviewed-by: Fam Zheng <fam@euphon.net> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Klaus Jensen <k.jensen@samsung.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20210902070025.197072-7-philmd@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
526c37c19d
commit
521b97cd4e
22
block/nvme.c
22
block/nvme.c
@ -176,12 +176,11 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
||||
return false;
|
||||
}
|
||||
memset(q->queue, 0, bytes);
|
||||
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
|
||||
r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
|
||||
if (r) {
|
||||
error_setg(errp, "Cannot map queue");
|
||||
return false;
|
||||
error_prepend(errp, "Cannot map queue: ");
|
||||
}
|
||||
return true;
|
||||
return r == 0;
|
||||
}
|
||||
|
||||
static void nvme_free_queue_pair(NVMeQueuePair *q)
|
||||
@ -239,9 +238,9 @@ static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
||||
qemu_co_queue_init(&q->free_req_queue);
|
||||
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
|
||||
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
|
||||
false, &prp_list_iova);
|
||||
false, &prp_list_iova, errp);
|
||||
if (r) {
|
||||
error_setg_errno(errp, -r, "Cannot map buffer for DMA");
|
||||
error_prepend(errp, "Cannot map buffer for DMA: ");
|
||||
goto fail;
|
||||
}
|
||||
q->free_req_head = -1;
|
||||
@ -534,9 +533,9 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
|
||||
error_setg(errp, "Cannot allocate buffer for identify response");
|
||||
goto out;
|
||||
}
|
||||
r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
|
||||
r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
|
||||
if (r) {
|
||||
error_setg(errp, "Cannot map buffer for DMA");
|
||||
error_prepend(errp, "Cannot map buffer for DMA: ");
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1032,7 +1031,7 @@ static coroutine_fn int nvme_cmd_map_qiov(BlockDriverState *bs, NvmeCmd *cmd,
|
||||
try_map:
|
||||
r = qemu_vfio_dma_map(s->vfio,
|
||||
qiov->iov[i].iov_base,
|
||||
len, true, &iova);
|
||||
len, true, &iova, NULL);
|
||||
if (r == -ENOSPC) {
|
||||
/*
|
||||
* In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
|
||||
@ -1524,14 +1523,15 @@ static void nvme_aio_unplug(BlockDriverState *bs)
|
||||
static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
|
||||
{
|
||||
int ret;
|
||||
Error *local_err = NULL;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
|
||||
ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, &local_err);
|
||||
if (ret) {
|
||||
/* FIXME: we may run out of IOVA addresses after repeated
|
||||
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
|
||||
* doesn't reclaim addresses for fixed mappings. */
|
||||
error_report("nvme_register_buf failed: %s", strerror(-ret));
|
||||
error_reportf_err(local_err, "nvme_register_buf failed: ");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ typedef struct QEMUVFIOState QEMUVFIOState;
|
||||
QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
|
||||
void qemu_vfio_close(QEMUVFIOState *s);
|
||||
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
|
||||
bool temporary, uint64_t *iova_list);
|
||||
bool temporary, uint64_t *iova_list, Error **errp);
|
||||
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
|
||||
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
|
||||
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
|
||||
|
@ -463,13 +463,15 @@ static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host,
|
||||
size_t size, size_t max_size)
|
||||
{
|
||||
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
|
||||
Error *local_err = NULL;
|
||||
int ret;
|
||||
|
||||
trace_qemu_vfio_ram_block_added(s, host, max_size);
|
||||
ret = qemu_vfio_dma_map(s, host, max_size, false, NULL);
|
||||
ret = qemu_vfio_dma_map(s, host, max_size, false, NULL, &local_err);
|
||||
if (ret) {
|
||||
error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size,
|
||||
strerror(-ret));
|
||||
error_reportf_err(local_err,
|
||||
"qemu_vfio_dma_map(%p, %zu) failed: ",
|
||||
host, max_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,7 +727,7 @@ qemu_vfio_find_temp_iova(QEMUVFIOState *s, size_t size, uint64_t *iova)
|
||||
* mapping status within this area is not allowed).
|
||||
*/
|
||||
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
|
||||
bool temporary, uint64_t *iova)
|
||||
bool temporary, uint64_t *iova, Error **errp)
|
||||
{
|
||||
int ret = 0;
|
||||
int index;
|
||||
|
Loading…
Reference in New Issue
Block a user