hw/block/nvme: add support for sgl bit bucket descriptor

This adds support for SGL descriptor type 0x1 (bit bucket descriptor).
See the NVM Express v1.3d specification, Section 4.4 ("Scatter Gather
List (SGL)").

Signed-off-by: Gollu Appalanaidu <anaidu.gollu@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Gollu Appalanaidu 2020-03-18 14:11:19 +05:30 committed by Klaus Jensen
parent cba0a8a344
commit d97eee64fe
1 changed files with 27 additions and 6 deletions

View File

@ -430,6 +430,10 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
uint8_t type = NVME_SGL_TYPE(segment[i].type);
switch (type) {
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
if (req->cmd.opcode == NVME_CMD_WRITE) {
continue;
}
case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
break;
case NVME_SGL_DESCR_TYPE_SEGMENT:
@ -440,6 +444,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
}
dlen = le32_to_cpu(segment[i].len);
if (!dlen) {
continue;
}
@ -460,6 +465,11 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
}
trans_len = MIN(*len, dlen);
if (type == NVME_SGL_DESCR_TYPE_BIT_BUCKET) {
goto next;
}
addr = le64_to_cpu(segment[i].addr);
if (UINT64_MAX - addr < dlen) {
@ -471,6 +481,7 @@ static uint16_t nvme_map_sgl_data(NvmeCtrl *n, QEMUSGList *qsg,
return status;
}
next:
*len -= trans_len;
}
@ -540,7 +551,8 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
seg_len = le32_to_cpu(sgld->len);
/* check the length of the (Last) Segment descriptor */
if (!seg_len || seg_len & 0xf) {
if ((!seg_len || seg_len & 0xf) &&
(NVME_SGL_TYPE(sgld->type) != NVME_SGL_DESCR_TYPE_BIT_BUCKET)) {
return NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
}
@ -577,19 +589,27 @@ static uint16_t nvme_map_sgl(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
last_sgld = &segment[nsgld - 1];
/* if the segment ends with a Data Block, then we are done */
if (NVME_SGL_TYPE(last_sgld->type) == NVME_SGL_DESCR_TYPE_DATA_BLOCK) {
/*
* If the segment ends with a Data Block or Bit Bucket Descriptor Type,
* then we are done.
*/
switch (NVME_SGL_TYPE(last_sgld->type)) {
case NVME_SGL_DESCR_TYPE_DATA_BLOCK:
case NVME_SGL_DESCR_TYPE_BIT_BUCKET:
status = nvme_map_sgl_data(n, qsg, iov, segment, nsgld, &len, req);
if (status) {
goto unmap;
}
goto out;
default:
break;
}
/*
* If the last descriptor was not a Data Block, then the current
* segment must not be a Last Segment.
* If the last descriptor was not a Data Block or Bit Bucket, then the
* current segment must not be a Last Segment.
*/
if (NVME_SGL_TYPE(sgld->type) == NVME_SGL_DESCR_TYPE_LAST_SEGMENT) {
status = NVME_INVALID_SGL_SEG_DESCR | NVME_DNR;
@ -2651,7 +2671,8 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
id->nn = cpu_to_le32(n->num_namespaces);
id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
NVME_ONCS_FEATURES);
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN);
id->sgls = cpu_to_le32(NVME_CTRL_SGLS_SUPPORT_NO_ALIGN |
NVME_CTRL_SGLS_BITBUCKET);
subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');