X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=block%2Fblk-mq.c;h=c79126e110308e8b1ea4b322506a425ceeb3085c;hb=22d3b76ed7d2a02bca513b113f2613b9b6e61d58;hp=862f458d4760340935017f61f27c59feab476119;hpb=4937e2a6f939a41bf811378e80d71f68aa0950c6;p=cascardo%2Flinux.git diff --git a/block/blk-mq.c b/block/blk-mq.c index 862f458d4760..c79126e11030 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -171,9 +171,12 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL(blk_mq_can_queue); -static void blk_mq_rq_ctx_init(struct blk_mq_ctx *ctx, struct request *rq, - unsigned int rw_flags) +static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, + struct request *rq, unsigned int rw_flags) { + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; + rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; ctx->rq_dispatched[rw_is_sync(rw_flags)]++; @@ -197,12 +200,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); if (rq) { - blk_mq_rq_ctx_init(ctx, rq, rw); - break; - } else if (!(gfp & __GFP_WAIT)) + blk_mq_rq_ctx_init(q, ctx, rq, rw); break; + } blk_mq_put_ctx(ctx); + if (!(gfp & __GFP_WAIT)) + break; + __blk_mq_run_hw_queue(hctx); blk_mq_wait_for_tags(hctx->tags); } while (1); @@ -219,7 +224,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, return NULL; rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); - blk_mq_put_ctx(rq->mq_ctx); + if (rq) + blk_mq_put_ctx(rq->mq_ctx); return rq; } @@ -232,7 +238,8 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, return NULL; rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); - blk_mq_put_ctx(rq->mq_ctx); + if (rq) + blk_mq_put_ctx(rq->mq_ctx); return rq; } EXPORT_SYMBOL(blk_mq_alloc_reserved_request); @@ -305,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error) blk_account_io_completion(rq, bytes); + blk_account_io_done(rq); + if (rq->end_io) rq->end_io(rq, error); else blk_mq_free_request(rq); - - blk_account_io_done(rq); } void __blk_mq_end_io(struct request *rq, int error) @@ -718,6 +725,8 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, { struct blk_mq_ctx *ctx = rq->mq_ctx; + trace_block_rq_insert(hctx->queue, rq); + list_add_tail(&rq->queuelist, &ctx->rq_list); blk_mq_hctx_mark_pending(hctx, ctx); @@ -921,7 +930,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) - blk_mq_rq_ctx_init(ctx, rq, rw); + blk_mq_rq_ctx_init(q, ctx, rq, rw); else { blk_mq_put_ctx(ctx); trace_block_sleeprq(q, bio, rw); @@ -1377,6 +1386,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, q->queue_hw_ctx = hctxs; q->mq_ops = reg->ops; + q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; blk_queue_make_request(q, blk_mq_make_request); blk_queue_rq_timed_out(q, reg->ops->timeout);