hw/block/nvme: refactor nvme_dma

The nvme_dma function doesn't just do DMA (QEMUSGList-based) memory transfers;
it also handles QEMUIOVector copies.

Introduce the NvmeTxDirection enum and rename to nvme_tx. Remove mapping
of PRPs/SGLs from nvme_tx and instead assert that they have been mapped
previously. This allows more fine-grained use in subsequent patches.

Add new (better named) helpers, nvme_{c2h,h2c}, that does both PRP/SGL
mapping and transfer.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Klaus Jensen 2020-12-15 19:18:25 +01:00
parent 073d12d998
commit 81d07f4ff5
1 changed files with 76 additions and 62 deletions

View File

@ -871,45 +871,71 @@ static uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
}
}
static uint16_t nvme_dma(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
DMADirection dir, NvmeRequest *req)
typedef enum NvmeTxDirection {
NVME_TX_DIRECTION_TO_DEVICE = 0,
NVME_TX_DIRECTION_FROM_DEVICE = 1,
} NvmeTxDirection;
static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len,
NvmeTxDirection dir)
{
uint16_t status = NVME_SUCCESS;
assert(sg->flags & NVME_SG_ALLOC);
if (sg->flags & NVME_SG_DMA) {
uint64_t residual;
if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
residual = dma_buf_write(ptr, len, &sg->qsg);
} else {
residual = dma_buf_read(ptr, len, &sg->qsg);
}
if (unlikely(residual)) {
trace_pci_nvme_err_invalid_dma();
return NVME_INVALID_FIELD | NVME_DNR;
}
} else {
size_t bytes;
if (dir == NVME_TX_DIRECTION_TO_DEVICE) {
bytes = qemu_iovec_to_buf(&sg->iov, 0, ptr, len);
} else {
bytes = qemu_iovec_from_buf(&sg->iov, 0, ptr, len);
}
if (unlikely(bytes != len)) {
trace_pci_nvme_err_invalid_dma();
return NVME_INVALID_FIELD | NVME_DNR;
}
}
return NVME_SUCCESS;
}
static inline uint16_t nvme_c2h(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
NvmeRequest *req)
{
uint16_t status;
status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
if (status) {
return status;
}
if (req->sg.flags & NVME_SG_DMA) {
uint64_t residual;
return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_FROM_DEVICE);
}
if (dir == DMA_DIRECTION_TO_DEVICE) {
residual = dma_buf_write(ptr, len, &req->sg.qsg);
} else {
residual = dma_buf_read(ptr, len, &req->sg.qsg);
}
static inline uint16_t nvme_h2c(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
NvmeRequest *req)
{
uint16_t status;
if (unlikely(residual)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
} else {
size_t bytes;
if (dir == DMA_DIRECTION_TO_DEVICE) {
bytes = qemu_iovec_to_buf(&req->sg.iov, 0, ptr, len);
} else {
bytes = qemu_iovec_from_buf(&req->sg.iov, 0, ptr, len);
}
if (unlikely(bytes != len)) {
trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
status = nvme_map_dptr(n, &req->sg, len, &req->cmd);
if (status) {
return status;
}
return status;
return nvme_tx(n, &req->sg, ptr, len, NVME_TX_DIRECTION_TO_DEVICE);
}
static inline void nvme_blk_read(BlockBackend *blk, int64_t offset,
@ -1746,8 +1772,7 @@ static void nvme_compare_cb(void *opaque, int ret)
buf = g_malloc(ctx->iov.size);
status = nvme_dma(nvme_ctrl(req), buf, ctx->iov.size,
DMA_DIRECTION_TO_DEVICE, req);
status = nvme_h2c(nvme_ctrl(req), buf, ctx->iov.size, req);
if (status) {
req->status = status;
goto out;
@ -1783,8 +1808,7 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
NvmeDsmRange range[nr];
uintptr_t *discards = (uintptr_t *)&req->opaque;
status = nvme_dma(n, (uint8_t *)range, sizeof(range),
DMA_DIRECTION_TO_DEVICE, req);
status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
if (status) {
return status;
}
@ -1870,8 +1894,8 @@ static uint16_t nvme_copy(NvmeCtrl *n, NvmeRequest *req)
range = g_new(NvmeCopySourceRange, nr);
status = nvme_dma(n, (uint8_t *)range, nr * sizeof(NvmeCopySourceRange),
DMA_DIRECTION_TO_DEVICE, req);
status = nvme_h2c(n, (uint8_t *)range, nr * sizeof(NvmeCopySourceRange),
req);
if (status) {
return status;
}
@ -2522,8 +2546,7 @@ static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
zd_ext = nvme_get_zd_extension(ns, zone_idx);
status = nvme_dma(n, zd_ext, ns->params.zd_extension_size,
DMA_DIRECTION_TO_DEVICE, req);
status = nvme_h2c(n, zd_ext, ns->params.zd_extension_size, req);
if (status) {
trace_pci_nvme_err_zd_extension_map_error(zone_idx);
return status;
@ -2677,8 +2700,7 @@ static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
}
}
status = nvme_dma(n, (uint8_t *)buf, data_size,
DMA_DIRECTION_FROM_DEVICE, req);
status = nvme_c2h(n, (uint8_t *)buf, data_size, req);
g_free(buf);
@ -2944,8 +2966,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
nvme_clear_events(n, NVME_AER_TYPE_SMART);
}
return nvme_dma(n, (uint8_t *) &smart + off, trans_len,
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *) &smart + off, trans_len, req);
}
static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
@ -2963,8 +2984,7 @@ static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
trans_len = MIN(sizeof(fw_log) - off, buf_len);
return nvme_dma(n, (uint8_t *) &fw_log + off, trans_len,
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *) &fw_log + off, trans_len, req);
}
static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
@ -2984,8 +3004,7 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
memset(&errlog, 0x0, sizeof(errlog));
trans_len = MIN(sizeof(errlog) - off, buf_len);
return nvme_dma(n, (uint8_t *)&errlog, trans_len,
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *)&errlog, trans_len, req);
}
static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
@ -3025,8 +3044,7 @@ static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
trans_len = MIN(sizeof(log) - off, buf_len);
return nvme_dma(n, ((uint8_t *)&log) + off, trans_len,
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
}
static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
@ -3194,7 +3212,7 @@ static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest *req)
{
uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, id, sizeof(id), req);
}
static inline bool nvme_csi_has_nvm_support(NvmeNamespace *ns)
@ -3211,8 +3229,7 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
{
trace_pci_nvme_identify_ctrl();
return nvme_dma(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), req);
}
static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
@ -3235,7 +3252,7 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
return NVME_INVALID_FIELD | NVME_DNR;
}
return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, id, sizeof(id), req);
}
static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
@ -3256,8 +3273,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
}
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), req);
}
return NVME_INVALID_CMD_SET | NVME_DNR;
@ -3283,8 +3299,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
if (c->csi == NVME_CSI_NVM && nvme_csi_has_nvm_support(ns)) {
return nvme_rpt_empty_id_struct(n, req);
} else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
return nvme_dma(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
req);
}
return NVME_INVALID_FIELD | NVME_DNR;
@ -3326,7 +3342,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
}
}
return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, list, data_len, req);
}
static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
@ -3366,7 +3382,7 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
}
}
return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, list, data_len, req);
}
static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
@ -3413,7 +3429,7 @@ static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI;
ns_descrs->csi.v = ns->csi;
return nvme_dma(n, list, sizeof(list), DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, list, sizeof(list), req);
}
static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
@ -3426,7 +3442,7 @@ static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
NVME_SET_CSI(*list, NVME_CSI_NVM);
NVME_SET_CSI(*list, NVME_CSI_ZONED);
return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, list, data_len, req);
}
static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
@ -3518,8 +3534,7 @@ static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
{
uint64_t timestamp = nvme_get_timestamp(n);
return nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
DMA_DIRECTION_FROM_DEVICE, req);
return nvme_c2h(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
}
static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
@ -3680,8 +3695,7 @@ static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
uint16_t ret;
uint64_t timestamp;
ret = nvme_dma(n, (uint8_t *)&timestamp, sizeof(timestamp),
DMA_DIRECTION_TO_DEVICE, req);
ret = nvme_h2c(n, (uint8_t *)&timestamp, sizeof(timestamp), req);
if (ret) {
return ret;
}