Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux...
[cascardo/linux.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23
24 #include <trace/events/block.h>
25
26 #include <linux/blk-mq.h>
27 #include "blk.h"
28 #include "blk-mq.h"
29 #include "blk-mq-tag.h"
30
31 static DEFINE_MUTEX(all_q_mutex);
32 static LIST_HEAD(all_q_list);
33
34 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35
36 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
37                                            unsigned int cpu)
38 {
39         return per_cpu_ptr(q->queue_ctx, cpu);
40 }
41
42 /*
43  * This assumes per-cpu software queueing queues. They could be per-node
44  * as well, for instance. For now this is hardcoded as-is. Note that we don't
45  * care about preemption, since we know the ctx's are persistent. This does
46  * mean that we can't rely on ctx always matching the currently running CPU.
47  */
48 static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
49 {
50         return __blk_mq_get_ctx(q, get_cpu());
51 }
52
53 static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
54 {
55         put_cpu();
56 }
57
58 /*
59  * Check if any of the ctx's have pending work in this hardware queue
60  */
61 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
62 {
63         unsigned int i;
64
65         for (i = 0; i < hctx->ctx_map.map_size; i++)
66                 if (hctx->ctx_map.map[i].word)
67                         return true;
68
69         return false;
70 }
71
72 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
73                                               struct blk_mq_ctx *ctx)
74 {
75         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
76 }
77
78 #define CTX_TO_BIT(hctx, ctx)   \
79         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
80
81 /*
82  * Mark this ctx as having pending work in this hardware queue
83  */
84 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
85                                      struct blk_mq_ctx *ctx)
86 {
87         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
88
89         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
90                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
91 }
92
93 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
94                                       struct blk_mq_ctx *ctx)
95 {
96         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
97
98         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
99 }
100
101 static int blk_mq_queue_enter(struct request_queue *q)
102 {
103         int ret;
104
105         __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
106         smp_wmb();
107         /* we have problems to freeze the queue if it's initializing */
108         if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
109                 return 0;
110
111         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
112
113         spin_lock_irq(q->queue_lock);
114         ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
115                 !blk_queue_bypass(q) || blk_queue_dying(q),
116                 *q->queue_lock);
117         /* inc usage with lock hold to avoid freeze_queue runs here */
118         if (!ret && !blk_queue_dying(q))
119                 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
120         else if (blk_queue_dying(q))
121                 ret = -ENODEV;
122         spin_unlock_irq(q->queue_lock);
123
124         return ret;
125 }
126
127 static void blk_mq_queue_exit(struct request_queue *q)
128 {
129         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
130 }
131
132 static void __blk_mq_drain_queue(struct request_queue *q)
133 {
134         while (true) {
135                 s64 count;
136
137                 spin_lock_irq(q->queue_lock);
138                 count = percpu_counter_sum(&q->mq_usage_counter);
139                 spin_unlock_irq(q->queue_lock);
140
141                 if (count == 0)
142                         break;
143                 blk_mq_run_queues(q, false);
144                 msleep(10);
145         }
146 }
147
148 /*
149  * Guarantee no request is in use, so we can change any data structure of
150  * the queue afterward.
151  */
152 static void blk_mq_freeze_queue(struct request_queue *q)
153 {
154         bool drain;
155
156         spin_lock_irq(q->queue_lock);
157         drain = !q->bypass_depth++;
158         queue_flag_set(QUEUE_FLAG_BYPASS, q);
159         spin_unlock_irq(q->queue_lock);
160
161         if (drain)
162                 __blk_mq_drain_queue(q);
163 }
164
165 void blk_mq_drain_queue(struct request_queue *q)
166 {
167         __blk_mq_drain_queue(q);
168 }
169
170 static void blk_mq_unfreeze_queue(struct request_queue *q)
171 {
172         bool wake = false;
173
174         spin_lock_irq(q->queue_lock);
175         if (!--q->bypass_depth) {
176                 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
177                 wake = true;
178         }
179         WARN_ON_ONCE(q->bypass_depth < 0);
180         spin_unlock_irq(q->queue_lock);
181         if (wake)
182                 wake_up_all(&q->mq_freeze_wq);
183 }
184
185 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
186 {
187         return blk_mq_has_free_tags(hctx->tags);
188 }
189 EXPORT_SYMBOL(blk_mq_can_queue);
190
191 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
192                                struct request *rq, unsigned int rw_flags)
193 {
194         if (blk_queue_io_stat(q))
195                 rw_flags |= REQ_IO_STAT;
196
197         INIT_LIST_HEAD(&rq->queuelist);
198         /* csd/requeue_work/fifo_time is initialized before use */
199         rq->q = q;
200         rq->mq_ctx = ctx;
201         rq->cmd_flags |= rw_flags;
202         /* do not touch atomic flags, it needs atomic ops against the timer */
203         rq->cpu = -1;
204         INIT_HLIST_NODE(&rq->hash);
205         RB_CLEAR_NODE(&rq->rb_node);
206         rq->rq_disk = NULL;
207         rq->part = NULL;
208 #ifdef CONFIG_BLK_CGROUP
209         rq->rl = NULL;
210         set_start_time_ns(rq);
211         rq->io_start_time_ns = 0;
212 #endif
213         rq->nr_phys_segments = 0;
214 #if defined(CONFIG_BLK_DEV_INTEGRITY)
215         rq->nr_integrity_segments = 0;
216 #endif
217         rq->special = NULL;
218         /* tag was already set */
219         rq->errors = 0;
220
221         rq->extra_len = 0;
222         rq->sense_len = 0;
223         rq->resid_len = 0;
224         rq->sense = NULL;
225
226         INIT_LIST_HEAD(&rq->timeout_list);
227         rq->end_io = NULL;
228         rq->end_io_data = NULL;
229         rq->next_rq = NULL;
230
231         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
232 }
233
234 static struct request *
235 __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
236                 struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved)
237 {
238         struct request *rq;
239         unsigned int tag;
240
241         tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
242         if (tag != BLK_MQ_TAG_FAIL) {
243                 rq = hctx->tags->rqs[tag];
244
245                 rq->cmd_flags = 0;
246                 if (blk_mq_tag_busy(hctx)) {
247                         rq->cmd_flags = REQ_MQ_INFLIGHT;
248                         atomic_inc(&hctx->nr_active);
249                 }
250
251                 rq->tag = tag;
252                 blk_mq_rq_ctx_init(q, ctx, rq, rw);
253                 return rq;
254         }
255
256         return NULL;
257 }
258
259 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
260                 bool reserved)
261 {
262         struct blk_mq_ctx *ctx;
263         struct blk_mq_hw_ctx *hctx;
264         struct request *rq;
265
266         if (blk_mq_queue_enter(q))
267                 return NULL;
268
269         ctx = blk_mq_get_ctx(q);
270         hctx = q->mq_ops->map_queue(q, ctx->cpu);
271
272         rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT,
273                                     reserved);
274         if (!rq && (gfp & __GFP_WAIT)) {
275                 __blk_mq_run_hw_queue(hctx);
276                 blk_mq_put_ctx(ctx);
277
278                 ctx = blk_mq_get_ctx(q);
279                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
280                 rq =  __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved);
281         }
282         blk_mq_put_ctx(ctx);
283         return rq;
284 }
285 EXPORT_SYMBOL(blk_mq_alloc_request);
286
287 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
288                                   struct blk_mq_ctx *ctx, struct request *rq)
289 {
290         const int tag = rq->tag;
291         struct request_queue *q = rq->q;
292
293         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
294                 atomic_dec(&hctx->nr_active);
295
296         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
297         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
298         blk_mq_queue_exit(q);
299 }
300
301 void blk_mq_free_request(struct request *rq)
302 {
303         struct blk_mq_ctx *ctx = rq->mq_ctx;
304         struct blk_mq_hw_ctx *hctx;
305         struct request_queue *q = rq->q;
306
307         ctx->rq_completed[rq_is_sync(rq)]++;
308
309         hctx = q->mq_ops->map_queue(q, ctx->cpu);
310         __blk_mq_free_request(hctx, ctx, rq);
311 }
312
313 /*
314  * Clone all relevant state from a request that has been put on hold in
315  * the flush state machine into the preallocated flush request that hangs
316  * off the request queue.
317  *
318  * For a driver the flush request should be invisible, that's why we are
319  * impersonating the original request here.
320  */
321 void blk_mq_clone_flush_request(struct request *flush_rq,
322                 struct request *orig_rq)
323 {
324         struct blk_mq_hw_ctx *hctx =
325                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
326
327         flush_rq->mq_ctx = orig_rq->mq_ctx;
328         flush_rq->tag = orig_rq->tag;
329         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
330                 hctx->cmd_size);
331 }
332
333 inline void __blk_mq_end_io(struct request *rq, int error)
334 {
335         blk_account_io_done(rq);
336
337         if (rq->end_io) {
338                 rq->end_io(rq, error);
339         } else {
340                 if (unlikely(blk_bidi_rq(rq)))
341                         blk_mq_free_request(rq->next_rq);
342                 blk_mq_free_request(rq);
343         }
344 }
345 EXPORT_SYMBOL(__blk_mq_end_io);
346
347 void blk_mq_end_io(struct request *rq, int error)
348 {
349         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
350                 BUG();
351         __blk_mq_end_io(rq, error);
352 }
353 EXPORT_SYMBOL(blk_mq_end_io);
354
355 static void __blk_mq_complete_request_remote(void *data)
356 {
357         struct request *rq = data;
358
359         rq->q->softirq_done_fn(rq);
360 }
361
362 static void blk_mq_ipi_complete_request(struct request *rq)
363 {
364         struct blk_mq_ctx *ctx = rq->mq_ctx;
365         bool shared = false;
366         int cpu;
367
368         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
369                 rq->q->softirq_done_fn(rq);
370                 return;
371         }
372
373         cpu = get_cpu();
374         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
375                 shared = cpus_share_cache(cpu, ctx->cpu);
376
377         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
378                 rq->csd.func = __blk_mq_complete_request_remote;
379                 rq->csd.info = rq;
380                 rq->csd.flags = 0;
381                 smp_call_function_single_async(ctx->cpu, &rq->csd);
382         } else {
383                 rq->q->softirq_done_fn(rq);
384         }
385         put_cpu();
386 }
387
388 void __blk_mq_complete_request(struct request *rq)
389 {
390         struct request_queue *q = rq->q;
391
392         if (!q->softirq_done_fn)
393                 blk_mq_end_io(rq, rq->errors);
394         else
395                 blk_mq_ipi_complete_request(rq);
396 }
397
398 /**
399  * blk_mq_complete_request - end I/O on a request
400  * @rq:         the request being processed
401  *
402  * Description:
403  *      Ends all I/O on a request. It does not handle partial completions.
404  *      The actual completion happens out-of-order, through a IPI handler.
405  **/
406 void blk_mq_complete_request(struct request *rq)
407 {
408         struct request_queue *q = rq->q;
409
410         if (unlikely(blk_should_fake_timeout(q)))
411                 return;
412         if (!blk_mark_rq_complete(rq))
413                 __blk_mq_complete_request(rq);
414 }
415 EXPORT_SYMBOL(blk_mq_complete_request);
416
417 static void blk_mq_start_request(struct request *rq, bool last)
418 {
419         struct request_queue *q = rq->q;
420
421         trace_block_rq_issue(q, rq);
422
423         rq->resid_len = blk_rq_bytes(rq);
424         if (unlikely(blk_bidi_rq(rq)))
425                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
426
427         /*
428          * Just mark start time and set the started bit. Due to memory
429          * ordering, we know we'll see the correct deadline as long as
430          * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
431          * unless one has been set in the request.
432          */
433         if (!rq->timeout)
434                 rq->deadline = jiffies + q->rq_timeout;
435         else
436                 rq->deadline = jiffies + rq->timeout;
437
438         /*
439          * Mark us as started and clear complete. Complete might have been
440          * set if requeue raced with timeout, which then marked it as
441          * complete. So be sure to clear complete again when we start
442          * the request, otherwise we'll ignore the completion event.
443          */
444         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
445                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
446         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
447                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
448
449         if (q->dma_drain_size && blk_rq_bytes(rq)) {
450                 /*
451                  * Make sure space for the drain appears.  We know we can do
452                  * this because max_hw_segments has been adjusted to be one
453                  * fewer than the device can handle.
454                  */
455                 rq->nr_phys_segments++;
456         }
457
458         /*
459          * Flag the last request in the series so that drivers know when IO
460          * should be kicked off, if they don't do it on a per-request basis.
461          *
462          * Note: the flag isn't the only condition drivers should do kick off.
463          * If drive is busy, the last request might not have the bit set.
464          */
465         if (last)
466                 rq->cmd_flags |= REQ_END;
467 }
468
469 static void __blk_mq_requeue_request(struct request *rq)
470 {
471         struct request_queue *q = rq->q;
472
473         trace_block_rq_requeue(q, rq);
474         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
475
476         rq->cmd_flags &= ~REQ_END;
477
478         if (q->dma_drain_size && blk_rq_bytes(rq))
479                 rq->nr_phys_segments--;
480 }
481
482 void blk_mq_requeue_request(struct request *rq)
483 {
484         __blk_mq_requeue_request(rq);
485         blk_clear_rq_complete(rq);
486
487         BUG_ON(blk_queued_rq(rq));
488         blk_mq_add_to_requeue_list(rq, true);
489 }
490 EXPORT_SYMBOL(blk_mq_requeue_request);
491
492 static void blk_mq_requeue_work(struct work_struct *work)
493 {
494         struct request_queue *q =
495                 container_of(work, struct request_queue, requeue_work);
496         LIST_HEAD(rq_list);
497         struct request *rq, *next;
498         unsigned long flags;
499
500         spin_lock_irqsave(&q->requeue_lock, flags);
501         list_splice_init(&q->requeue_list, &rq_list);
502         spin_unlock_irqrestore(&q->requeue_lock, flags);
503
504         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
505                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
506                         continue;
507
508                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
509                 list_del_init(&rq->queuelist);
510                 blk_mq_insert_request(rq, true, false, false);
511         }
512
513         while (!list_empty(&rq_list)) {
514                 rq = list_entry(rq_list.next, struct request, queuelist);
515                 list_del_init(&rq->queuelist);
516                 blk_mq_insert_request(rq, false, false, false);
517         }
518
519         blk_mq_run_queues(q, false);
520 }
521
522 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
523 {
524         struct request_queue *q = rq->q;
525         unsigned long flags;
526
527         /*
528          * We abuse this flag that is otherwise used by the I/O scheduler to
529          * request head insertation from the workqueue.
530          */
531         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
532
533         spin_lock_irqsave(&q->requeue_lock, flags);
534         if (at_head) {
535                 rq->cmd_flags |= REQ_SOFTBARRIER;
536                 list_add(&rq->queuelist, &q->requeue_list);
537         } else {
538                 list_add_tail(&rq->queuelist, &q->requeue_list);
539         }
540         spin_unlock_irqrestore(&q->requeue_lock, flags);
541 }
542 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
543
544 void blk_mq_kick_requeue_list(struct request_queue *q)
545 {
546         kblockd_schedule_work(&q->requeue_work);
547 }
548 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
549
550 struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag)
551 {
552         struct request_queue *q = hctx->queue;
553
554         if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) &&
555             q->flush_rq->tag == tag)
556                 return q->flush_rq;
557
558         return hctx->tags->rqs[tag];
559 }
560 EXPORT_SYMBOL(blk_mq_tag_to_rq);
561
562 struct blk_mq_timeout_data {
563         struct blk_mq_hw_ctx *hctx;
564         unsigned long *next;
565         unsigned int *next_set;
566 };
567
568 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
569 {
570         struct blk_mq_timeout_data *data = __data;
571         struct blk_mq_hw_ctx *hctx = data->hctx;
572         unsigned int tag;
573
574          /* It may not be in flight yet (this is where
575          * the REQ_ATOMIC_STARTED flag comes in). The requests are
576          * statically allocated, so we know it's always safe to access the
577          * memory associated with a bit offset into ->rqs[].
578          */
579         tag = 0;
580         do {
581                 struct request *rq;
582
583                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
584                 if (tag >= hctx->tags->nr_tags)
585                         break;
586
587                 rq = blk_mq_tag_to_rq(hctx, tag++);
588                 if (rq->q != hctx->queue)
589                         continue;
590                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
591                         continue;
592
593                 blk_rq_check_expired(rq, data->next, data->next_set);
594         } while (1);
595 }
596
597 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
598                                         unsigned long *next,
599                                         unsigned int *next_set)
600 {
601         struct blk_mq_timeout_data data = {
602                 .hctx           = hctx,
603                 .next           = next,
604                 .next_set       = next_set,
605         };
606
607         /*
608          * Ask the tagging code to iterate busy requests, so we can
609          * check them for timeout.
610          */
611         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
612 }
613
614 static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
615 {
616         struct request_queue *q = rq->q;
617
618         /*
619          * We know that complete is set at this point. If STARTED isn't set
620          * anymore, then the request isn't active and the "timeout" should
621          * just be ignored. This can happen due to the bitflag ordering.
622          * Timeout first checks if STARTED is set, and if it is, assumes
623          * the request is active. But if we race with completion, then
624          * we both flags will get cleared. So check here again, and ignore
625          * a timeout event with a request that isn't active.
626          */
627         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
628                 return BLK_EH_NOT_HANDLED;
629
630         if (!q->mq_ops->timeout)
631                 return BLK_EH_RESET_TIMER;
632
633         return q->mq_ops->timeout(rq);
634 }
635
636 static void blk_mq_rq_timer(unsigned long data)
637 {
638         struct request_queue *q = (struct request_queue *) data;
639         struct blk_mq_hw_ctx *hctx;
640         unsigned long next = 0;
641         int i, next_set = 0;
642
643         queue_for_each_hw_ctx(q, hctx, i) {
644                 /*
645                  * If not software queues are currently mapped to this
646                  * hardware queue, there's nothing to check
647                  */
648                 if (!hctx->nr_ctx || !hctx->tags)
649                         continue;
650
651                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
652         }
653
654         if (next_set) {
655                 next = blk_rq_timeout(round_jiffies_up(next));
656                 mod_timer(&q->timeout, next);
657         } else {
658                 queue_for_each_hw_ctx(q, hctx, i)
659                         blk_mq_tag_idle(hctx);
660         }
661 }
662
663 /*
664  * Reverse check our software queue for entries that we could potentially
665  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
666  * too much time checking for merges.
667  */
668 static bool blk_mq_attempt_merge(struct request_queue *q,
669                                  struct blk_mq_ctx *ctx, struct bio *bio)
670 {
671         struct request *rq;
672         int checked = 8;
673
674         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
675                 int el_ret;
676
677                 if (!checked--)
678                         break;
679
680                 if (!blk_rq_merge_ok(rq, bio))
681                         continue;
682
683                 el_ret = blk_try_merge(rq, bio);
684                 if (el_ret == ELEVATOR_BACK_MERGE) {
685                         if (bio_attempt_back_merge(q, rq, bio)) {
686                                 ctx->rq_merged++;
687                                 return true;
688                         }
689                         break;
690                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
691                         if (bio_attempt_front_merge(q, rq, bio)) {
692                                 ctx->rq_merged++;
693                                 return true;
694                         }
695                         break;
696                 }
697         }
698
699         return false;
700 }
701
702 /*
703  * Process software queues that have been marked busy, splicing them
704  * to the for-dispatch
705  */
706 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
707 {
708         struct blk_mq_ctx *ctx;
709         int i;
710
711         for (i = 0; i < hctx->ctx_map.map_size; i++) {
712                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
713                 unsigned int off, bit;
714
715                 if (!bm->word)
716                         continue;
717
718                 bit = 0;
719                 off = i * hctx->ctx_map.bits_per_word;
720                 do {
721                         bit = find_next_bit(&bm->word, bm->depth, bit);
722                         if (bit >= bm->depth)
723                                 break;
724
725                         ctx = hctx->ctxs[bit + off];
726                         clear_bit(bit, &bm->word);
727                         spin_lock(&ctx->lock);
728                         list_splice_tail_init(&ctx->rq_list, list);
729                         spin_unlock(&ctx->lock);
730
731                         bit++;
732                 } while (1);
733         }
734 }
735
736 /*
737  * Run this hardware queue, pulling any software queues mapped to it in.
738  * Note that this function currently has various problems around ordering
739  * of IO. In particular, we'd like FIFO behaviour on handling existing
740  * items on the hctx->dispatch list. Ignore that for now.
741  */
742 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
743 {
744         struct request_queue *q = hctx->queue;
745         struct request *rq;
746         LIST_HEAD(rq_list);
747         int queued;
748
749         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
750
751         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
752                 return;
753
754         hctx->run++;
755
756         /*
757          * Touch any software queue that has pending entries.
758          */
759         flush_busy_ctxs(hctx, &rq_list);
760
761         /*
762          * If we have previous entries on our dispatch list, grab them
763          * and stuff them at the front for more fair dispatch.
764          */
765         if (!list_empty_careful(&hctx->dispatch)) {
766                 spin_lock(&hctx->lock);
767                 if (!list_empty(&hctx->dispatch))
768                         list_splice_init(&hctx->dispatch, &rq_list);
769                 spin_unlock(&hctx->lock);
770         }
771
772         /*
773          * Now process all the entries, sending them to the driver.
774          */
775         queued = 0;
776         while (!list_empty(&rq_list)) {
777                 int ret;
778
779                 rq = list_first_entry(&rq_list, struct request, queuelist);
780                 list_del_init(&rq->queuelist);
781
782                 blk_mq_start_request(rq, list_empty(&rq_list));
783
784                 ret = q->mq_ops->queue_rq(hctx, rq);
785                 switch (ret) {
786                 case BLK_MQ_RQ_QUEUE_OK:
787                         queued++;
788                         continue;
789                 case BLK_MQ_RQ_QUEUE_BUSY:
790                         list_add(&rq->queuelist, &rq_list);
791                         __blk_mq_requeue_request(rq);
792                         break;
793                 default:
794                         pr_err("blk-mq: bad return on queue: %d\n", ret);
795                 case BLK_MQ_RQ_QUEUE_ERROR:
796                         rq->errors = -EIO;
797                         blk_mq_end_io(rq, rq->errors);
798                         break;
799                 }
800
801                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
802                         break;
803         }
804
805         if (!queued)
806                 hctx->dispatched[0]++;
807         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
808                 hctx->dispatched[ilog2(queued) + 1]++;
809
810         /*
811          * Any items that need requeuing? Stuff them into hctx->dispatch,
812          * that is where we will continue on next queue run.
813          */
814         if (!list_empty(&rq_list)) {
815                 spin_lock(&hctx->lock);
816                 list_splice(&rq_list, &hctx->dispatch);
817                 spin_unlock(&hctx->lock);
818         }
819 }
820
821 /*
822  * It'd be great if the workqueue API had a way to pass
823  * in a mask and had some smarts for more clever placement.
824  * For now we just round-robin here, switching for every
825  * BLK_MQ_CPU_WORK_BATCH queued items.
826  */
827 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
828 {
829         int cpu = hctx->next_cpu;
830
831         if (--hctx->next_cpu_batch <= 0) {
832                 int next_cpu;
833
834                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
835                 if (next_cpu >= nr_cpu_ids)
836                         next_cpu = cpumask_first(hctx->cpumask);
837
838                 hctx->next_cpu = next_cpu;
839                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
840         }
841
842         return cpu;
843 }
844
845 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
846 {
847         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
848                 return;
849
850         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
851                 __blk_mq_run_hw_queue(hctx);
852         else if (hctx->queue->nr_hw_queues == 1)
853                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
854         else {
855                 unsigned int cpu;
856
857                 cpu = blk_mq_hctx_next_cpu(hctx);
858                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
859         }
860 }
861
862 void blk_mq_run_queues(struct request_queue *q, bool async)
863 {
864         struct blk_mq_hw_ctx *hctx;
865         int i;
866
867         queue_for_each_hw_ctx(q, hctx, i) {
868                 if ((!blk_mq_hctx_has_pending(hctx) &&
869                     list_empty_careful(&hctx->dispatch)) ||
870                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
871                         continue;
872
873                 preempt_disable();
874                 blk_mq_run_hw_queue(hctx, async);
875                 preempt_enable();
876         }
877 }
878 EXPORT_SYMBOL(blk_mq_run_queues);
879
880 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
881 {
882         cancel_delayed_work(&hctx->run_work);
883         cancel_delayed_work(&hctx->delay_work);
884         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
885 }
886 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
887
888 void blk_mq_stop_hw_queues(struct request_queue *q)
889 {
890         struct blk_mq_hw_ctx *hctx;
891         int i;
892
893         queue_for_each_hw_ctx(q, hctx, i)
894                 blk_mq_stop_hw_queue(hctx);
895 }
896 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
897
898 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
899 {
900         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
901
902         preempt_disable();
903         __blk_mq_run_hw_queue(hctx);
904         preempt_enable();
905 }
906 EXPORT_SYMBOL(blk_mq_start_hw_queue);
907
908 void blk_mq_start_hw_queues(struct request_queue *q)
909 {
910         struct blk_mq_hw_ctx *hctx;
911         int i;
912
913         queue_for_each_hw_ctx(q, hctx, i)
914                 blk_mq_start_hw_queue(hctx);
915 }
916 EXPORT_SYMBOL(blk_mq_start_hw_queues);
917
918
919 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
920 {
921         struct blk_mq_hw_ctx *hctx;
922         int i;
923
924         queue_for_each_hw_ctx(q, hctx, i) {
925                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
926                         continue;
927
928                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
929                 preempt_disable();
930                 blk_mq_run_hw_queue(hctx, async);
931                 preempt_enable();
932         }
933 }
934 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
935
936 static void blk_mq_run_work_fn(struct work_struct *work)
937 {
938         struct blk_mq_hw_ctx *hctx;
939
940         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
941
942         __blk_mq_run_hw_queue(hctx);
943 }
944
945 static void blk_mq_delay_work_fn(struct work_struct *work)
946 {
947         struct blk_mq_hw_ctx *hctx;
948
949         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
950
951         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
952                 __blk_mq_run_hw_queue(hctx);
953 }
954
955 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
956 {
957         unsigned long tmo = msecs_to_jiffies(msecs);
958
959         if (hctx->queue->nr_hw_queues == 1)
960                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
961         else {
962                 unsigned int cpu;
963
964                 cpu = blk_mq_hctx_next_cpu(hctx);
965                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
966         }
967 }
968 EXPORT_SYMBOL(blk_mq_delay_queue);
969
970 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
971                                     struct request *rq, bool at_head)
972 {
973         struct blk_mq_ctx *ctx = rq->mq_ctx;
974
975         trace_block_rq_insert(hctx->queue, rq);
976
977         if (at_head)
978                 list_add(&rq->queuelist, &ctx->rq_list);
979         else
980                 list_add_tail(&rq->queuelist, &ctx->rq_list);
981
982         blk_mq_hctx_mark_pending(hctx, ctx);
983
984         /*
985          * We do this early, to ensure we are on the right CPU.
986          */
987         blk_add_timer(rq);
988 }
989
990 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
991                 bool async)
992 {
993         struct request_queue *q = rq->q;
994         struct blk_mq_hw_ctx *hctx;
995         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
996
997         current_ctx = blk_mq_get_ctx(q);
998         if (!cpu_online(ctx->cpu))
999                 rq->mq_ctx = ctx = current_ctx;
1000
1001         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1002
1003         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
1004             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
1005                 blk_insert_flush(rq);
1006         } else {
1007                 spin_lock(&ctx->lock);
1008                 __blk_mq_insert_request(hctx, rq, at_head);
1009                 spin_unlock(&ctx->lock);
1010         }
1011
1012         if (run_queue)
1013                 blk_mq_run_hw_queue(hctx, async);
1014
1015         blk_mq_put_ctx(current_ctx);
1016 }
1017
1018 static void blk_mq_insert_requests(struct request_queue *q,
1019                                      struct blk_mq_ctx *ctx,
1020                                      struct list_head *list,
1021                                      int depth,
1022                                      bool from_schedule)
1023
1024 {
1025         struct blk_mq_hw_ctx *hctx;
1026         struct blk_mq_ctx *current_ctx;
1027
1028         trace_block_unplug(q, depth, !from_schedule);
1029
1030         current_ctx = blk_mq_get_ctx(q);
1031
1032         if (!cpu_online(ctx->cpu))
1033                 ctx = current_ctx;
1034         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1035
1036         /*
1037          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1038          * offline now
1039          */
1040         spin_lock(&ctx->lock);
1041         while (!list_empty(list)) {
1042                 struct request *rq;
1043
1044                 rq = list_first_entry(list, struct request, queuelist);
1045                 list_del_init(&rq->queuelist);
1046                 rq->mq_ctx = ctx;
1047                 __blk_mq_insert_request(hctx, rq, false);
1048         }
1049         spin_unlock(&ctx->lock);
1050
1051         blk_mq_run_hw_queue(hctx, from_schedule);
1052         blk_mq_put_ctx(current_ctx);
1053 }
1054
1055 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1056 {
1057         struct request *rqa = container_of(a, struct request, queuelist);
1058         struct request *rqb = container_of(b, struct request, queuelist);
1059
1060         return !(rqa->mq_ctx < rqb->mq_ctx ||
1061                  (rqa->mq_ctx == rqb->mq_ctx &&
1062                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1063 }
1064
1065 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1066 {
1067         struct blk_mq_ctx *this_ctx;
1068         struct request_queue *this_q;
1069         struct request *rq;
1070         LIST_HEAD(list);
1071         LIST_HEAD(ctx_list);
1072         unsigned int depth;
1073
1074         list_splice_init(&plug->mq_list, &list);
1075
1076         list_sort(NULL, &list, plug_ctx_cmp);
1077
1078         this_q = NULL;
1079         this_ctx = NULL;
1080         depth = 0;
1081
1082         while (!list_empty(&list)) {
1083                 rq = list_entry_rq(list.next);
1084                 list_del_init(&rq->queuelist);
1085                 BUG_ON(!rq->q);
1086                 if (rq->mq_ctx != this_ctx) {
1087                         if (this_ctx) {
1088                                 blk_mq_insert_requests(this_q, this_ctx,
1089                                                         &ctx_list, depth,
1090                                                         from_schedule);
1091                         }
1092
1093                         this_ctx = rq->mq_ctx;
1094                         this_q = rq->q;
1095                         depth = 0;
1096                 }
1097
1098                 depth++;
1099                 list_add_tail(&rq->queuelist, &ctx_list);
1100         }
1101
1102         /*
1103          * If 'this_ctx' is set, we know we have entries to complete
1104          * on 'ctx_list'. Do those.
1105          */
1106         if (this_ctx) {
1107                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1108                                        from_schedule);
1109         }
1110 }
1111
1112 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1113 {
1114         init_request_from_bio(rq, bio);
1115
1116         if (blk_do_io_stat(rq)) {
1117                 rq->start_time = jiffies;
1118                 blk_account_io_start(rq, 1);
1119         }
1120 }
1121
1122 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1123                                          struct blk_mq_ctx *ctx,
1124                                          struct request *rq, struct bio *bio)
1125 {
1126         struct request_queue *q = hctx->queue;
1127
1128         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1129                 blk_mq_bio_to_request(rq, bio);
1130                 spin_lock(&ctx->lock);
1131 insert_rq:
1132                 __blk_mq_insert_request(hctx, rq, false);
1133                 spin_unlock(&ctx->lock);
1134                 return false;
1135         } else {
1136                 spin_lock(&ctx->lock);
1137                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1138                         blk_mq_bio_to_request(rq, bio);
1139                         goto insert_rq;
1140                 }
1141
1142                 spin_unlock(&ctx->lock);
1143                 __blk_mq_free_request(hctx, ctx, rq);
1144                 return true;
1145         }
1146 }
1147
1148 struct blk_map_ctx {
1149         struct blk_mq_hw_ctx *hctx;
1150         struct blk_mq_ctx *ctx;
1151 };
1152
1153 static struct request *blk_mq_map_request(struct request_queue *q,
1154                                           struct bio *bio,
1155                                           struct blk_map_ctx *data)
1156 {
1157         struct blk_mq_hw_ctx *hctx;
1158         struct blk_mq_ctx *ctx;
1159         struct request *rq;
1160         int rw = bio_data_dir(bio);
1161
1162         if (unlikely(blk_mq_queue_enter(q))) {
1163                 bio_endio(bio, -EIO);
1164                 return NULL;
1165         }
1166
1167         ctx = blk_mq_get_ctx(q);
1168         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1169
1170         if (rw_is_sync(bio->bi_rw))
1171                 rw |= REQ_SYNC;
1172
1173         trace_block_getrq(q, bio, rw);
1174         rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
1175         if (unlikely(!rq)) {
1176                 __blk_mq_run_hw_queue(hctx);
1177                 blk_mq_put_ctx(ctx);
1178                 trace_block_sleeprq(q, bio, rw);
1179
1180                 ctx = blk_mq_get_ctx(q);
1181                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1182                 rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
1183                                             __GFP_WAIT|GFP_ATOMIC, false);
1184         }
1185
1186         hctx->queued++;
1187         data->hctx = hctx;
1188         data->ctx = ctx;
1189         return rq;
1190 }
1191
1192 /*
1193  * Multiple hardware queue variant. This will not use per-process plugs,
1194  * but will attempt to bypass the hctx queueing if we can go straight to
1195  * hardware for SYNC IO.
1196  */
1197 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1198 {
1199         const int is_sync = rw_is_sync(bio->bi_rw);
1200         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1201         struct blk_map_ctx data;
1202         struct request *rq;
1203
1204         blk_queue_bounce(q, &bio);
1205
1206         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1207                 bio_endio(bio, -EIO);
1208                 return;
1209         }
1210
1211         rq = blk_mq_map_request(q, bio, &data);
1212         if (unlikely(!rq))
1213                 return;
1214
1215         if (unlikely(is_flush_fua)) {
1216                 blk_mq_bio_to_request(rq, bio);
1217                 blk_insert_flush(rq);
1218                 goto run_queue;
1219         }
1220
1221         if (is_sync) {
1222                 int ret;
1223
1224                 blk_mq_bio_to_request(rq, bio);
1225                 blk_mq_start_request(rq, true);
1226                 blk_add_timer(rq);
1227
1228                 /*
1229                  * For OK queue, we are done. For error, kill it. Any other
1230                  * error (busy), just add it to our list as we previously
1231                  * would have done
1232                  */
1233                 ret = q->mq_ops->queue_rq(data.hctx, rq);
1234                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1235                         goto done;
1236                 else {
1237                         __blk_mq_requeue_request(rq);
1238
1239                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1240                                 rq->errors = -EIO;
1241                                 blk_mq_end_io(rq, rq->errors);
1242                                 goto done;
1243                         }
1244                 }
1245         }
1246
1247         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1248                 /*
1249                  * For a SYNC request, send it to the hardware immediately. For
1250                  * an ASYNC request, just ensure that we run it later on. The
1251                  * latter allows for merging opportunities and more efficient
1252                  * dispatching.
1253                  */
1254 run_queue:
1255                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1256         }
1257 done:
1258         blk_mq_put_ctx(data.ctx);
1259 }
1260
1261 /*
1262  * Single hardware queue variant. This will attempt to use any per-process
1263  * plug for merging and IO deferral.
1264  */
1265 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1266 {
1267         const int is_sync = rw_is_sync(bio->bi_rw);
1268         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1269         unsigned int use_plug, request_count = 0;
1270         struct blk_map_ctx data;
1271         struct request *rq;
1272
1273         /*
1274          * If we have multiple hardware queues, just go directly to
1275          * one of those for sync IO.
1276          */
1277         use_plug = !is_flush_fua && !is_sync;
1278
1279         blk_queue_bounce(q, &bio);
1280
1281         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1282                 bio_endio(bio, -EIO);
1283                 return;
1284         }
1285
1286         if (use_plug && !blk_queue_nomerges(q) &&
1287             blk_attempt_plug_merge(q, bio, &request_count))
1288                 return;
1289
1290         rq = blk_mq_map_request(q, bio, &data);
1291
1292         if (unlikely(is_flush_fua)) {
1293                 blk_mq_bio_to_request(rq, bio);
1294                 blk_insert_flush(rq);
1295                 goto run_queue;
1296         }
1297
1298         /*
1299          * A task plug currently exists. Since this is completely lockless,
1300          * utilize that to temporarily store requests until the task is
1301          * either done or scheduled away.
1302          */
1303         if (use_plug) {
1304                 struct blk_plug *plug = current->plug;
1305
1306                 if (plug) {
1307                         blk_mq_bio_to_request(rq, bio);
1308                         if (list_empty(&plug->mq_list))
1309                                 trace_block_plug(q);
1310                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1311                                 blk_flush_plug_list(plug, false);
1312                                 trace_block_plug(q);
1313                         }
1314                         list_add_tail(&rq->queuelist, &plug->mq_list);
1315                         blk_mq_put_ctx(data.ctx);
1316                         return;
1317                 }
1318         }
1319
1320         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1321                 /*
1322                  * For a SYNC request, send it to the hardware immediately. For
1323                  * an ASYNC request, just ensure that we run it later on. The
1324                  * latter allows for merging opportunities and more efficient
1325                  * dispatching.
1326                  */
1327 run_queue:
1328                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1329         }
1330
1331         blk_mq_put_ctx(data.ctx);
1332 }
1333
1334 /*
1335  * Default mapping to a software queue, since we use one per CPU.
1336  */
1337 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1338 {
1339         return q->queue_hw_ctx[q->mq_map[cpu]];
1340 }
1341 EXPORT_SYMBOL(blk_mq_map_queue);
1342
1343 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1344                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1345 {
1346         struct page *page;
1347
1348         if (tags->rqs && set->ops->exit_request) {
1349                 int i;
1350
1351                 for (i = 0; i < tags->nr_tags; i++) {
1352                         if (!tags->rqs[i])
1353                                 continue;
1354                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1355                                                 hctx_idx, i);
1356                 }
1357         }
1358
1359         while (!list_empty(&tags->page_list)) {
1360                 page = list_first_entry(&tags->page_list, struct page, lru);
1361                 list_del_init(&page->lru);
1362                 __free_pages(page, page->private);
1363         }
1364
1365         kfree(tags->rqs);
1366
1367         blk_mq_free_tags(tags);
1368 }
1369
1370 static size_t order_to_size(unsigned int order)
1371 {
1372         return (size_t)PAGE_SIZE << order;
1373 }
1374
1375 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1376                 unsigned int hctx_idx)
1377 {
1378         struct blk_mq_tags *tags;
1379         unsigned int i, j, entries_per_page, max_order = 4;
1380         size_t rq_size, left;
1381
1382         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1383                                 set->numa_node);
1384         if (!tags)
1385                 return NULL;
1386
1387         INIT_LIST_HEAD(&tags->page_list);
1388
1389         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1390                                         GFP_KERNEL, set->numa_node);
1391         if (!tags->rqs) {
1392                 blk_mq_free_tags(tags);
1393                 return NULL;
1394         }
1395
1396         /*
1397          * rq_size is the size of the request plus driver payload, rounded
1398          * to the cacheline size
1399          */
1400         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1401                                 cache_line_size());
1402         left = rq_size * set->queue_depth;
1403
1404         for (i = 0; i < set->queue_depth; ) {
1405                 int this_order = max_order;
1406                 struct page *page;
1407                 int to_do;
1408                 void *p;
1409
1410                 while (left < order_to_size(this_order - 1) && this_order)
1411                         this_order--;
1412
1413                 do {
1414                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1415                                                 this_order);
1416                         if (page)
1417                                 break;
1418                         if (!this_order--)
1419                                 break;
1420                         if (order_to_size(this_order) < rq_size)
1421                                 break;
1422                 } while (1);
1423
1424                 if (!page)
1425                         goto fail;
1426
1427                 page->private = this_order;
1428                 list_add_tail(&page->lru, &tags->page_list);
1429
1430                 p = page_address(page);
1431                 entries_per_page = order_to_size(this_order) / rq_size;
1432                 to_do = min(entries_per_page, set->queue_depth - i);
1433                 left -= to_do * rq_size;
1434                 for (j = 0; j < to_do; j++) {
1435                         tags->rqs[i] = p;
1436                         if (set->ops->init_request) {
1437                                 if (set->ops->init_request(set->driver_data,
1438                                                 tags->rqs[i], hctx_idx, i,
1439                                                 set->numa_node))
1440                                         goto fail;
1441                         }
1442
1443                         p += rq_size;
1444                         i++;
1445                 }
1446         }
1447
1448         return tags;
1449
1450 fail:
1451         pr_warn("%s: failed to allocate requests\n", __func__);
1452         blk_mq_free_rq_map(set, tags, hctx_idx);
1453         return NULL;
1454 }
1455
1456 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1457 {
1458         kfree(bitmap->map);
1459 }
1460
1461 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1462 {
1463         unsigned int bpw = 8, total, num_maps, i;
1464
1465         bitmap->bits_per_word = bpw;
1466
1467         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1468         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1469                                         GFP_KERNEL, node);
1470         if (!bitmap->map)
1471                 return -ENOMEM;
1472
1473         bitmap->map_size = num_maps;
1474
1475         total = nr_cpu_ids;
1476         for (i = 0; i < num_maps; i++) {
1477                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1478                 total -= bitmap->map[i].depth;
1479         }
1480
1481         return 0;
1482 }
1483
1484 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1485 {
1486         struct request_queue *q = hctx->queue;
1487         struct blk_mq_ctx *ctx;
1488         LIST_HEAD(tmp);
1489
1490         /*
1491          * Move ctx entries to new CPU, if this one is going away.
1492          */
1493         ctx = __blk_mq_get_ctx(q, cpu);
1494
1495         spin_lock(&ctx->lock);
1496         if (!list_empty(&ctx->rq_list)) {
1497                 list_splice_init(&ctx->rq_list, &tmp);
1498                 blk_mq_hctx_clear_pending(hctx, ctx);
1499         }
1500         spin_unlock(&ctx->lock);
1501
1502         if (list_empty(&tmp))
1503                 return NOTIFY_OK;
1504
1505         ctx = blk_mq_get_ctx(q);
1506         spin_lock(&ctx->lock);
1507
1508         while (!list_empty(&tmp)) {
1509                 struct request *rq;
1510
1511                 rq = list_first_entry(&tmp, struct request, queuelist);
1512                 rq->mq_ctx = ctx;
1513                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1514         }
1515
1516         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1517         blk_mq_hctx_mark_pending(hctx, ctx);
1518
1519         spin_unlock(&ctx->lock);
1520
1521         blk_mq_run_hw_queue(hctx, true);
1522         blk_mq_put_ctx(ctx);
1523         return NOTIFY_OK;
1524 }
1525
1526 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1527 {
1528         struct request_queue *q = hctx->queue;
1529         struct blk_mq_tag_set *set = q->tag_set;
1530
1531         if (set->tags[hctx->queue_num])
1532                 return NOTIFY_OK;
1533
1534         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1535         if (!set->tags[hctx->queue_num])
1536                 return NOTIFY_STOP;
1537
1538         hctx->tags = set->tags[hctx->queue_num];
1539         return NOTIFY_OK;
1540 }
1541
1542 static int blk_mq_hctx_notify(void *data, unsigned long action,
1543                               unsigned int cpu)
1544 {
1545         struct blk_mq_hw_ctx *hctx = data;
1546
1547         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1548                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1549         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1550                 return blk_mq_hctx_cpu_online(hctx, cpu);
1551
1552         return NOTIFY_OK;
1553 }
1554
1555 static void blk_mq_exit_hw_queues(struct request_queue *q,
1556                 struct blk_mq_tag_set *set, int nr_queue)
1557 {
1558         struct blk_mq_hw_ctx *hctx;
1559         unsigned int i;
1560
1561         queue_for_each_hw_ctx(q, hctx, i) {
1562                 if (i == nr_queue)
1563                         break;
1564
1565                 if (set->ops->exit_hctx)
1566                         set->ops->exit_hctx(hctx, i);
1567
1568                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1569                 kfree(hctx->ctxs);
1570                 blk_mq_free_bitmap(&hctx->ctx_map);
1571         }
1572
1573 }
1574
1575 static void blk_mq_free_hw_queues(struct request_queue *q,
1576                 struct blk_mq_tag_set *set)
1577 {
1578         struct blk_mq_hw_ctx *hctx;
1579         unsigned int i;
1580
1581         queue_for_each_hw_ctx(q, hctx, i) {
1582                 free_cpumask_var(hctx->cpumask);
1583                 kfree(hctx);
1584         }
1585 }
1586
1587 static int blk_mq_init_hw_queues(struct request_queue *q,
1588                 struct blk_mq_tag_set *set)
1589 {
1590         struct blk_mq_hw_ctx *hctx;
1591         unsigned int i;
1592
1593         /*
1594          * Initialize hardware queues
1595          */
1596         queue_for_each_hw_ctx(q, hctx, i) {
1597                 int node;
1598
1599                 node = hctx->numa_node;
1600                 if (node == NUMA_NO_NODE)
1601                         node = hctx->numa_node = set->numa_node;
1602
1603                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1604                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1605                 spin_lock_init(&hctx->lock);
1606                 INIT_LIST_HEAD(&hctx->dispatch);
1607                 hctx->queue = q;
1608                 hctx->queue_num = i;
1609                 hctx->flags = set->flags;
1610                 hctx->cmd_size = set->cmd_size;
1611
1612                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1613                                                 blk_mq_hctx_notify, hctx);
1614                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1615
1616                 hctx->tags = set->tags[i];
1617
1618                 /*
1619                  * Allocate space for all possible cpus to avoid allocation in
1620                  * runtime
1621                  */
1622                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1623                                                 GFP_KERNEL, node);
1624                 if (!hctx->ctxs)
1625                         break;
1626
1627                 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1628                         break;
1629
1630                 hctx->nr_ctx = 0;
1631
1632                 if (set->ops->init_hctx &&
1633                     set->ops->init_hctx(hctx, set->driver_data, i))
1634                         break;
1635         }
1636
1637         if (i == q->nr_hw_queues)
1638                 return 0;
1639
1640         /*
1641          * Init failed
1642          */
1643         blk_mq_exit_hw_queues(q, set, i);
1644
1645         return 1;
1646 }
1647
1648 static void blk_mq_init_cpu_queues(struct request_queue *q,
1649                                    unsigned int nr_hw_queues)
1650 {
1651         unsigned int i;
1652
1653         for_each_possible_cpu(i) {
1654                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1655                 struct blk_mq_hw_ctx *hctx;
1656
1657                 memset(__ctx, 0, sizeof(*__ctx));
1658                 __ctx->cpu = i;
1659                 spin_lock_init(&__ctx->lock);
1660                 INIT_LIST_HEAD(&__ctx->rq_list);
1661                 __ctx->queue = q;
1662
1663                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1664                 if (!cpu_online(i))
1665                         continue;
1666
1667                 hctx = q->mq_ops->map_queue(q, i);
1668                 cpumask_set_cpu(i, hctx->cpumask);
1669                 hctx->nr_ctx++;
1670
1671                 /*
1672                  * Set local node, IFF we have more than one hw queue. If
1673                  * not, we remain on the home node of the device
1674                  */
1675                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1676                         hctx->numa_node = cpu_to_node(i);
1677         }
1678 }
1679
1680 static void blk_mq_map_swqueue(struct request_queue *q)
1681 {
1682         unsigned int i;
1683         struct blk_mq_hw_ctx *hctx;
1684         struct blk_mq_ctx *ctx;
1685
1686         queue_for_each_hw_ctx(q, hctx, i) {
1687                 cpumask_clear(hctx->cpumask);
1688                 hctx->nr_ctx = 0;
1689         }
1690
1691         /*
1692          * Map software to hardware queues
1693          */
1694         queue_for_each_ctx(q, ctx, i) {
1695                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1696                 if (!cpu_online(i))
1697                         continue;
1698
1699                 hctx = q->mq_ops->map_queue(q, i);
1700                 cpumask_set_cpu(i, hctx->cpumask);
1701                 ctx->index_hw = hctx->nr_ctx;
1702                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1703         }
1704
1705         queue_for_each_hw_ctx(q, hctx, i) {
1706                 /*
1707                  * If not software queues are mapped to this hardware queue,
1708                  * disable it and free the request entries
1709                  */
1710                 if (!hctx->nr_ctx) {
1711                         struct blk_mq_tag_set *set = q->tag_set;
1712
1713                         if (set->tags[i]) {
1714                                 blk_mq_free_rq_map(set, set->tags[i], i);
1715                                 set->tags[i] = NULL;
1716                                 hctx->tags = NULL;
1717                         }
1718                         continue;
1719                 }
1720
1721                 /*
1722                  * Initialize batch roundrobin counts
1723                  */
1724                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1725                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1726         }
1727 }
1728
1729 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1730 {
1731         struct blk_mq_hw_ctx *hctx;
1732         struct request_queue *q;
1733         bool shared;
1734         int i;
1735
1736         if (set->tag_list.next == set->tag_list.prev)
1737                 shared = false;
1738         else
1739                 shared = true;
1740
1741         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1742                 blk_mq_freeze_queue(q);
1743
1744                 queue_for_each_hw_ctx(q, hctx, i) {
1745                         if (shared)
1746                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1747                         else
1748                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1749                 }
1750                 blk_mq_unfreeze_queue(q);
1751         }
1752 }
1753
1754 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1755 {
1756         struct blk_mq_tag_set *set = q->tag_set;
1757
1758         blk_mq_freeze_queue(q);
1759
1760         mutex_lock(&set->tag_list_lock);
1761         list_del_init(&q->tag_set_list);
1762         blk_mq_update_tag_set_depth(set);
1763         mutex_unlock(&set->tag_list_lock);
1764
1765         blk_mq_unfreeze_queue(q);
1766 }
1767
1768 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1769                                      struct request_queue *q)
1770 {
1771         q->tag_set = set;
1772
1773         mutex_lock(&set->tag_list_lock);
1774         list_add_tail(&q->tag_set_list, &set->tag_list);
1775         blk_mq_update_tag_set_depth(set);
1776         mutex_unlock(&set->tag_list_lock);
1777 }
1778
1779 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1780 {
1781         struct blk_mq_hw_ctx **hctxs;
1782         struct blk_mq_ctx *ctx;
1783         struct request_queue *q;
1784         unsigned int *map;
1785         int i;
1786
1787         ctx = alloc_percpu(struct blk_mq_ctx);
1788         if (!ctx)
1789                 return ERR_PTR(-ENOMEM);
1790
1791         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1792                         set->numa_node);
1793
1794         if (!hctxs)
1795                 goto err_percpu;
1796
1797         map = blk_mq_make_queue_map(set);
1798         if (!map)
1799                 goto err_map;
1800
1801         for (i = 0; i < set->nr_hw_queues; i++) {
1802                 int node = blk_mq_hw_queue_to_node(map, i);
1803
1804                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1805                                         GFP_KERNEL, node);
1806                 if (!hctxs[i])
1807                         goto err_hctxs;
1808
1809                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1810                         goto err_hctxs;
1811
1812                 atomic_set(&hctxs[i]->nr_active, 0);
1813                 hctxs[i]->numa_node = node;
1814                 hctxs[i]->queue_num = i;
1815         }
1816
1817         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1818         if (!q)
1819                 goto err_hctxs;
1820
1821         if (percpu_counter_init(&q->mq_usage_counter, 0))
1822                 goto err_map;
1823
1824         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1825         blk_queue_rq_timeout(q, 30000);
1826
1827         q->nr_queues = nr_cpu_ids;
1828         q->nr_hw_queues = set->nr_hw_queues;
1829         q->mq_map = map;
1830
1831         q->queue_ctx = ctx;
1832         q->queue_hw_ctx = hctxs;
1833
1834         q->mq_ops = set->ops;
1835         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1836
1837         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1838                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1839
1840         q->sg_reserved_size = INT_MAX;
1841
1842         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1843         INIT_LIST_HEAD(&q->requeue_list);
1844         spin_lock_init(&q->requeue_lock);
1845
1846         if (q->nr_hw_queues > 1)
1847                 blk_queue_make_request(q, blk_mq_make_request);
1848         else
1849                 blk_queue_make_request(q, blk_sq_make_request);
1850
1851         blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
1852         if (set->timeout)
1853                 blk_queue_rq_timeout(q, set->timeout);
1854
1855         /*
1856          * Do this after blk_queue_make_request() overrides it...
1857          */
1858         q->nr_requests = set->queue_depth;
1859
1860         if (set->ops->complete)
1861                 blk_queue_softirq_done(q, set->ops->complete);
1862
1863         blk_mq_init_flush(q);
1864         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1865
1866         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1867                                 set->cmd_size, cache_line_size()),
1868                                 GFP_KERNEL);
1869         if (!q->flush_rq)
1870                 goto err_hw;
1871
1872         if (blk_mq_init_hw_queues(q, set))
1873                 goto err_flush_rq;
1874
1875         mutex_lock(&all_q_mutex);
1876         list_add_tail(&q->all_q_node, &all_q_list);
1877         mutex_unlock(&all_q_mutex);
1878
1879         blk_mq_add_queue_tag_set(set, q);
1880
1881         blk_mq_map_swqueue(q);
1882
1883         return q;
1884
1885 err_flush_rq:
1886         kfree(q->flush_rq);
1887 err_hw:
1888         blk_cleanup_queue(q);
1889 err_hctxs:
1890         kfree(map);
1891         for (i = 0; i < set->nr_hw_queues; i++) {
1892                 if (!hctxs[i])
1893                         break;
1894                 free_cpumask_var(hctxs[i]->cpumask);
1895                 kfree(hctxs[i]);
1896         }
1897 err_map:
1898         kfree(hctxs);
1899 err_percpu:
1900         free_percpu(ctx);
1901         return ERR_PTR(-ENOMEM);
1902 }
1903 EXPORT_SYMBOL(blk_mq_init_queue);
1904
1905 void blk_mq_free_queue(struct request_queue *q)
1906 {
1907         struct blk_mq_tag_set   *set = q->tag_set;
1908
1909         blk_mq_del_queue_tag_set(q);
1910
1911         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1912         blk_mq_free_hw_queues(q, set);
1913
1914         percpu_counter_destroy(&q->mq_usage_counter);
1915
1916         free_percpu(q->queue_ctx);
1917         kfree(q->queue_hw_ctx);
1918         kfree(q->mq_map);
1919
1920         q->queue_ctx = NULL;
1921         q->queue_hw_ctx = NULL;
1922         q->mq_map = NULL;
1923
1924         mutex_lock(&all_q_mutex);
1925         list_del_init(&q->all_q_node);
1926         mutex_unlock(&all_q_mutex);
1927 }
1928
1929 /* Basically redo blk_mq_init_queue with queue frozen */
1930 static void blk_mq_queue_reinit(struct request_queue *q)
1931 {
1932         blk_mq_freeze_queue(q);
1933
1934         blk_mq_sysfs_unregister(q);
1935
1936         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1937
1938         /*
1939          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1940          * we should change hctx numa_node according to new topology (this
1941          * involves free and re-allocate memory, worthy doing?)
1942          */
1943
1944         blk_mq_map_swqueue(q);
1945
1946         blk_mq_sysfs_register(q);
1947
1948         blk_mq_unfreeze_queue(q);
1949 }
1950
1951 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1952                                       unsigned long action, void *hcpu)
1953 {
1954         struct request_queue *q;
1955
1956         /*
1957          * Before new mappings are established, hotadded cpu might already
1958          * start handling requests. This doesn't break anything as we map
1959          * offline CPUs to first hardware queue. We will re-init the queue
1960          * below to get optimal settings.
1961          */
1962         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1963             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1964                 return NOTIFY_OK;
1965
1966         mutex_lock(&all_q_mutex);
1967         list_for_each_entry(q, &all_q_list, all_q_node)
1968                 blk_mq_queue_reinit(q);
1969         mutex_unlock(&all_q_mutex);
1970         return NOTIFY_OK;
1971 }
1972
1973 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1974 {
1975         int i;
1976
1977         if (!set->nr_hw_queues)
1978                 return -EINVAL;
1979         if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1980                 return -EINVAL;
1981         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1982                 return -EINVAL;
1983
1984         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
1985                 return -EINVAL;
1986
1987
1988         set->tags = kmalloc_node(set->nr_hw_queues *
1989                                  sizeof(struct blk_mq_tags *),
1990                                  GFP_KERNEL, set->numa_node);
1991         if (!set->tags)
1992                 goto out;
1993
1994         for (i = 0; i < set->nr_hw_queues; i++) {
1995                 set->tags[i] = blk_mq_init_rq_map(set, i);
1996                 if (!set->tags[i])
1997                         goto out_unwind;
1998         }
1999
2000         mutex_init(&set->tag_list_lock);
2001         INIT_LIST_HEAD(&set->tag_list);
2002
2003         return 0;
2004
2005 out_unwind:
2006         while (--i >= 0)
2007                 blk_mq_free_rq_map(set, set->tags[i], i);
2008 out:
2009         return -ENOMEM;
2010 }
2011 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2012
2013 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2014 {
2015         int i;
2016
2017         for (i = 0; i < set->nr_hw_queues; i++) {
2018                 if (set->tags[i])
2019                         blk_mq_free_rq_map(set, set->tags[i], i);
2020         }
2021
2022         kfree(set->tags);
2023 }
2024 EXPORT_SYMBOL(blk_mq_free_tag_set);
2025
2026 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2027 {
2028         struct blk_mq_tag_set *set = q->tag_set;
2029         struct blk_mq_hw_ctx *hctx;
2030         int i, ret;
2031
2032         if (!set || nr > set->queue_depth)
2033                 return -EINVAL;
2034
2035         ret = 0;
2036         queue_for_each_hw_ctx(q, hctx, i) {
2037                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2038                 if (ret)
2039                         break;
2040         }
2041
2042         if (!ret)
2043                 q->nr_requests = nr;
2044
2045         return ret;
2046 }
2047
2048 void blk_mq_disable_hotplug(void)
2049 {
2050         mutex_lock(&all_q_mutex);
2051 }
2052
2053 void blk_mq_enable_hotplug(void)
2054 {
2055         mutex_unlock(&all_q_mutex);
2056 }
2057
2058 static int __init blk_mq_init(void)
2059 {
2060         blk_mq_cpu_init();
2061
2062         /* Must be called after percpu_counter_hotcpu_callback() */
2063         hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2064
2065         return 0;
2066 }
2067 subsys_initcall(blk_mq_init);