blk-mq: return tag/queue combo in the make_request_fn handlers

Return a cookie, blk_qc_t, from the blk-mq make request functions, that
allows a later caller to uniquely identify a specific IO. The cookie
doesn't mean anything to the caller, but the caller can use it to later
pass back to the block layer. The block layer can then identify the
hardware queue and request from that cookie.

Signed-off-by: Jens Axboe <axboe@fb.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
This commit is contained in:
Jens Axboe 2015-11-05 10:41:40 -07:00
parent dece16353e
commit 7b371636fb
1 changed files with 29 additions and 18 deletions

View File

@ -1198,7 +1198,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
return rq;
}
static int blk_mq_direct_issue_request(struct request *rq)
static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
@ -1209,6 +1209,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
.list = NULL,
.last = 1
};
blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
/*
* For OK queue, we are done. For error, kill it. Any other
@ -1216,18 +1217,21 @@ static int blk_mq_direct_issue_request(struct request *rq)
* would have done
*/
ret = q->mq_ops->queue_rq(hctx, &bd);
if (ret == BLK_MQ_RQ_QUEUE_OK)
if (ret == BLK_MQ_RQ_QUEUE_OK) {
*cookie = new_cookie;
return 0;
else {
__blk_mq_requeue_request(rq);
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
return 0;
}
return -1;
}
__blk_mq_requeue_request(rq);
if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
*cookie = BLK_QC_T_NONE;
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
return 0;
}
return -1;
}
/*
@ -1244,6 +1248,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@ -1265,6 +1270,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(!rq))
return BLK_QC_T_NONE;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
@ -1302,11 +1309,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
old_rq = rq;
blk_mq_put_ctx(data.ctx);
if (!old_rq)
return BLK_QC_T_NONE;
if (!blk_mq_direct_issue_request(old_rq))
return BLK_QC_T_NONE;
goto done;
if (!blk_mq_direct_issue_request(old_rq, &cookie))
goto done;
blk_mq_insert_request(old_rq, false, true, true);
return BLK_QC_T_NONE;
goto done;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@ -1320,7 +1327,8 @@ run_queue:
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
return BLK_QC_T_NONE;
done:
return cookie;
}
/*
@ -1335,6 +1343,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
unsigned int request_count = 0;
struct blk_map_ctx data;
struct request *rq;
blk_qc_t cookie;
blk_queue_bounce(q, &bio);
@ -1353,6 +1362,8 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(!rq))
return BLK_QC_T_NONE;
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
@ -1375,7 +1386,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
}
list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
return BLK_QC_T_NONE;
return cookie;
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@ -1390,7 +1401,7 @@ run_queue:
}
blk_mq_put_ctx(data.ctx);
return BLK_QC_T_NONE;
return cookie;
}
/*