blk-mq: only iterate over inflight requests in blk_mq_tagset_busy_iter

We already check for started commands in all callbacks, but we should
also protect against already completed commands.  Do this by taking
the checks to common code.

Acked-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2018-05-30 18:51:00 +02:00 committed by Jens Axboe
parent 5e3c3a7ece
commit d250bf4e77
5 changed files with 4 additions and 21 deletions

View File

@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* test and set the bit before assining ->rqs[]. * test and set the bit before assining ->rqs[].
*/ */
rq = tags->rqs[bitnr]; rq = tags->rqs[bitnr];
if (rq) if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
iter_data->fn(rq, iter_data->data, reserved); iter_data->fn(rq, iter_data->data, reserved);
return true; return true;

View File

@ -2725,15 +2725,11 @@ static void mtip_softirq_done_fn(struct request *rq)
blk_mq_end_request(rq, cmd->status); blk_mq_end_request(rq, cmd->status);
} }
static void mtip_abort_cmd(struct request *req, void *data, static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
bool reserved)
{ {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data; struct driver_data *dd = data;
if (!blk_mq_request_started(req))
return;
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
clear_bit(req->tag, dd->port->cmds_to_issue); clear_bit(req->tag, dd->port->cmds_to_issue);
@ -2741,14 +2737,10 @@ static void mtip_abort_cmd(struct request *req, void *data,
mtip_softirq_done_fn(req); mtip_softirq_done_fn(req);
} }
static void mtip_queue_cmd(struct request *req, void *data, static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
bool reserved)
{ {
struct driver_data *dd = data; struct driver_data *dd = data;
if (!blk_mq_request_started(req))
return;
set_bit(req->tag, dd->port->cmds_to_issue); set_bit(req->tag, dd->port->cmds_to_issue);
blk_abort_request(req); blk_abort_request(req);
} }

View File

@ -676,11 +676,8 @@ static void recv_work(struct work_struct *work)
static void nbd_clear_req(struct request *req, void *data, bool reserved) static void nbd_clear_req(struct request *req, void *data, bool reserved)
{ {
struct nbd_cmd *cmd; struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
if (!blk_mq_request_started(req))
return;
cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_IOERR; cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(req); blk_mq_complete_request(req);
} }

View File

@ -242,9 +242,6 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_cancel_request(struct request *req, void *data, bool reserved) void nvme_cancel_request(struct request *req, void *data, bool reserved)
{ {
if (!blk_mq_request_started(req))
return;
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag); "Cancelling I/O %d", req->tag);

View File

@ -2393,9 +2393,6 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
if (!blk_mq_request_started(req))
return;
__nvme_fc_abort_op(ctrl, op); __nvme_fc_abort_op(ctrl, op);
} }