hw/block/nvme: refactor zone resource management

Zone transition handling and resource management is open coded (and
semi-duplicated in the case of open, close and finish).

In preparation for Simple Copy command support (which also needs to open
zones for writing), consolidate into a set of 'nvme_zrm' functions and
in the process fix a bug with the controller not closing an open zone to
allow another zone to be explicitly opened.

Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
Klaus Jensen 2021-01-19 21:01:15 +01:00
parent eda688ee24
commit 975b646650

View File

@ -1292,7 +1292,46 @@ static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
return status;
}
static void nvme_auto_transition_zone(NvmeNamespace *ns)
static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
{
switch (nvme_get_zone_state(zone)) {
case NVME_ZONE_STATE_FULL:
return NVME_SUCCESS;
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
nvme_aor_dec_open(ns);
/* fallthrough */
case NVME_ZONE_STATE_CLOSED:
nvme_aor_dec_active(ns);
/* fallthrough */
case NVME_ZONE_STATE_EMPTY:
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
}
static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
{
switch (nvme_get_zone_state(zone)) {
case NVME_ZONE_STATE_CLOSED:
return NVME_SUCCESS;
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
nvme_aor_dec_open(ns);
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
/* fall through */
default:
return NVME_ZONE_INVAL_TRANSITION;
}
}
static void nvme_zrm_auto_transition_zone(NvmeNamespace *ns)
{
NvmeZone *zone;
@ -1304,34 +1343,74 @@ static void nvme_auto_transition_zone(NvmeNamespace *ns)
* Automatically close this implicitly open zone.
*/
QTAILQ_REMOVE(&ns->imp_open_zones, zone, entry);
nvme_aor_dec_open(ns);
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
nvme_zrm_close(ns, zone);
}
}
}
static uint16_t nvme_auto_open_zone(NvmeNamespace *ns, NvmeZone *zone)
static uint16_t __nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone,
bool implicit)
{
uint16_t status = NVME_SUCCESS;
uint8_t zs = nvme_get_zone_state(zone);
int act = 0;
uint16_t status;
if (zs == NVME_ZONE_STATE_EMPTY) {
nvme_auto_transition_zone(ns);
status = nvme_aor_check(ns, 1, 1);
} else if (zs == NVME_ZONE_STATE_CLOSED) {
nvme_auto_transition_zone(ns);
status = nvme_aor_check(ns, 0, 1);
switch (nvme_get_zone_state(zone)) {
case NVME_ZONE_STATE_EMPTY:
act = 1;
/* fallthrough */
case NVME_ZONE_STATE_CLOSED:
nvme_zrm_auto_transition_zone(ns);
status = nvme_aor_check(ns, act, 1);
if (status) {
return status;
}
return status;
if (act) {
nvme_aor_inc_active(ns);
}
nvme_aor_inc_open(ns);
if (implicit) {
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
return NVME_SUCCESS;
}
/* fallthrough */
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
if (implicit) {
return NVME_SUCCESS;
}
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
/* fallthrough */
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
}
static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req,
bool failed)
static inline uint16_t nvme_zrm_auto(NvmeNamespace *ns, NvmeZone *zone)
{
return __nvme_zrm_open(ns, zone, true);
}
static inline uint16_t nvme_zrm_open(NvmeNamespace *ns, NvmeZone *zone)
{
return __nvme_zrm_open(ns, zone, false);
}
static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req)
{
NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
NvmeZone *zone;
NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe;
uint64_t slba;
uint32_t nlb;
@ -1341,47 +1420,8 @@ static void nvme_finalize_zoned_write(NvmeNamespace *ns, NvmeRequest *req,
zone->d.wp += nlb;
if (failed) {
res->slba = 0;
}
if (zone->d.wp == nvme_zone_wr_boundary(zone)) {
switch (nvme_get_zone_state(zone)) {
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
nvme_aor_dec_open(ns);
/* fall through */
case NVME_ZONE_STATE_CLOSED:
nvme_aor_dec_active(ns);
/* fall through */
case NVME_ZONE_STATE_EMPTY:
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
/* fall through */
case NVME_ZONE_STATE_FULL:
break;
default:
assert(false);
}
}
}
static void nvme_advance_zone_wp(NvmeNamespace *ns, NvmeZone *zone,
uint32_t nlb)
{
uint8_t zs;
zone->w_ptr += nlb;
if (zone->w_ptr < nvme_zone_wr_boundary(zone)) {
zs = nvme_get_zone_state(zone);
switch (zs) {
case NVME_ZONE_STATE_EMPTY:
nvme_aor_inc_active(ns);
/* fall through */
case NVME_ZONE_STATE_CLOSED:
nvme_aor_inc_open(ns);
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
}
nvme_zrm_finish(ns, zone);
}
}
@ -1406,7 +1446,7 @@ static void nvme_rw_cb(void *opaque, int ret)
trace_pci_nvme_rw_cb(nvme_cid(req), blk_name(blk));
if (ns->params.zoned && nvme_is_write(req)) {
nvme_finalize_zoned_write(ns, req, ret != 0);
nvme_finalize_zoned_write(ns, req);
}
if (!ret) {
@ -1782,12 +1822,12 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
goto invalid;
}
status = nvme_auto_open_zone(ns, zone);
status = nvme_zrm_auto(ns, zone);
if (status) {
goto invalid;
}
nvme_advance_zone_wp(ns, zone, nlb);
zone->w_ptr += nlb;
}
data_offset = nvme_l2b(ns, slba);
@ -1873,73 +1913,19 @@ enum NvmeZoneProcessingMask {
static uint16_t nvme_open_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
uint16_t status;
switch (state) {
case NVME_ZONE_STATE_EMPTY:
status = nvme_aor_check(ns, 1, 0);
if (status) {
return status;
}
nvme_aor_inc_active(ns);
/* fall through */
case NVME_ZONE_STATE_CLOSED:
status = nvme_aor_check(ns, 0, 1);
if (status) {
if (state == NVME_ZONE_STATE_EMPTY) {
nvme_aor_dec_active(ns);
}
return status;
}
nvme_aor_inc_open(ns);
/* fall through */
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
/* fall through */
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
return nvme_zrm_open(ns, zone);
}
static uint16_t nvme_close_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
switch (state) {
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
nvme_aor_dec_open(ns);
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
/* fall through */
case NVME_ZONE_STATE_CLOSED:
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
return nvme_zrm_close(ns, zone);
}
static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState state, NvmeRequest *req)
{
switch (state) {
case NVME_ZONE_STATE_EXPLICITLY_OPEN:
case NVME_ZONE_STATE_IMPLICITLY_OPEN:
nvme_aor_dec_open(ns);
/* fall through */
case NVME_ZONE_STATE_CLOSED:
nvme_aor_dec_active(ns);
/* fall through */
case NVME_ZONE_STATE_EMPTY:
zone->w_ptr = nvme_zone_wr_boundary(zone);
zone->d.wp = zone->w_ptr;
nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
/* fall through */
case NVME_ZONE_STATE_FULL:
return NVME_SUCCESS;
default:
return NVME_ZONE_INVAL_TRANSITION;
}
return nvme_zrm_finish(ns, zone);
}
static uint16_t nvme_reset_zone(NvmeNamespace *ns, NvmeZone *zone,