Return a cookie, blk_qc_t, from the blk-mq make request functions, that
allows a later caller to uniquely identify a specific IO. The cookie
doesn't mean anything to the caller, but the caller can use it to later
pass back to the block layer. The block layer can then identify the
hardware queue and request from that cookie.
Signed-off-by: Jens Axboe <axboe@fb.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Acked-by: Keith Busch <keith.busch@intel.com>
-static int blk_mq_direct_issue_request(struct request *rq)
+static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
{
int ret;
struct request_queue *q = rq->q;
.list = NULL,
.last = 1
};
.list = NULL,
.last = 1
};
+ blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
/*
* For OK queue, we are done. For error, kill it. Any other
/*
* For OK queue, we are done. For error, kill it. Any other
* would have done
*/
ret = q->mq_ops->queue_rq(hctx, &bd);
* would have done
*/
ret = q->mq_ops->queue_rq(hctx, &bd);
- if (ret == BLK_MQ_RQ_QUEUE_OK)
+ if (ret == BLK_MQ_RQ_QUEUE_OK) {
+ *cookie = new_cookie;
- else {
- __blk_mq_requeue_request(rq);
- if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
- rq->errors = -EIO;
- blk_mq_end_request(rq, rq->errors);
- return 0;
- }
- return -1;
+ __blk_mq_requeue_request(rq);
+
+ if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+ *cookie = BLK_QC_T_NONE;
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ return 0;
unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_queue_bounce(q, &bio);
blk_queue_bounce(q, &bio);
if (unlikely(!rq))
return BLK_QC_T_NONE;
if (unlikely(!rq))
return BLK_QC_T_NONE;
+ cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
+
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
old_rq = rq;
blk_mq_put_ctx(data.ctx);
if (!old_rq)
old_rq = rq;
blk_mq_put_ctx(data.ctx);
if (!old_rq)
- return BLK_QC_T_NONE;
- if (!blk_mq_direct_issue_request(old_rq))
- return BLK_QC_T_NONE;
+ goto done;
+ if (!blk_mq_direct_issue_request(old_rq, &cookie))
+ goto done;
blk_mq_insert_request(old_rq, false, true, true);
blk_mq_insert_request(old_rq, false, true, true);
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
}
blk_mq_put_ctx(data.ctx);
unsigned int request_count = 0;
struct blk_map_ctx data;
struct request *rq;
unsigned int request_count = 0;
struct blk_map_ctx data;
struct request *rq;
blk_queue_bounce(q, &bio);
blk_queue_bounce(q, &bio);
if (unlikely(!rq))
return BLK_QC_T_NONE;
if (unlikely(!rq))
return BLK_QC_T_NONE;
+ cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
+
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
if (unlikely(is_flush_fua)) {
blk_mq_bio_to_request(rq, bio);
blk_insert_flush(rq);
}
list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
}
list_add_tail(&rq->queuelist, &plug->mq_list);
blk_mq_put_ctx(data.ctx);
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
}
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
}
blk_mq_put_ctx(data.ctx);
}
blk_mq_put_ctx(data.ctx);