null_blk: Fix error path in module initialization
[cascardo/linux.git] / drivers / block / null_blk.c
1 #include <linux/module.h>
2
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
5 #include <linux/fs.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
11 #include <linux/lightnvm.h>
12
13 struct nullb_cmd {
14         struct list_head list;
15         struct llist_node ll_list;
16         struct call_single_data csd;
17         struct request *rq;
18         struct bio *bio;
19         unsigned int tag;
20         struct nullb_queue *nq;
21         struct hrtimer timer;
22 };
23
24 struct nullb_queue {
25         unsigned long *tag_map;
26         wait_queue_head_t wait;
27         unsigned int queue_depth;
28
29         struct nullb_cmd *cmds;
30 };
31
32 struct nullb {
33         struct list_head list;
34         unsigned int index;
35         struct request_queue *q;
36         struct gendisk *disk;
37         struct blk_mq_tag_set tag_set;
38         struct hrtimer timer;
39         unsigned int queue_depth;
40         spinlock_t lock;
41
42         struct nullb_queue *queues;
43         unsigned int nr_queues;
44         char disk_name[DISK_NAME_LEN];
45 };
46
47 static LIST_HEAD(nullb_list);
48 static struct mutex lock;
49 static int null_major;
50 static int nullb_indexes;
51 static struct kmem_cache *ppa_cache;
52
53 enum {
54         NULL_IRQ_NONE           = 0,
55         NULL_IRQ_SOFTIRQ        = 1,
56         NULL_IRQ_TIMER          = 2,
57 };
58
59 enum {
60         NULL_Q_BIO              = 0,
61         NULL_Q_RQ               = 1,
62         NULL_Q_MQ               = 2,
63 };
64
65 static int submit_queues;
66 module_param(submit_queues, int, S_IRUGO);
67 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
68
69 static int home_node = NUMA_NO_NODE;
70 module_param(home_node, int, S_IRUGO);
71 MODULE_PARM_DESC(home_node, "Home node for the device");
72
73 static int queue_mode = NULL_Q_MQ;
74
75 static int null_param_store_val(const char *str, int *val, int min, int max)
76 {
77         int ret, new_val;
78
79         ret = kstrtoint(str, 10, &new_val);
80         if (ret)
81                 return -EINVAL;
82
83         if (new_val < min || new_val > max)
84                 return -EINVAL;
85
86         *val = new_val;
87         return 0;
88 }
89
90 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
91 {
92         return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
93 }
94
95 static const struct kernel_param_ops null_queue_mode_param_ops = {
96         .set    = null_set_queue_mode,
97         .get    = param_get_int,
98 };
99
100 device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
101 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
102
103 static int gb = 250;
104 module_param(gb, int, S_IRUGO);
105 MODULE_PARM_DESC(gb, "Size in GB");
106
107 static int bs = 512;
108 module_param(bs, int, S_IRUGO);
109 MODULE_PARM_DESC(bs, "Block size (in bytes)");
110
111 static int nr_devices = 2;
112 module_param(nr_devices, int, S_IRUGO);
113 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
114
115 static bool use_lightnvm;
116 module_param(use_lightnvm, bool, S_IRUGO);
117 MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
119 static int irqmode = NULL_IRQ_SOFTIRQ;
120
121 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
122 {
123         return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
124                                         NULL_IRQ_TIMER);
125 }
126
127 static const struct kernel_param_ops null_irqmode_param_ops = {
128         .set    = null_set_irqmode,
129         .get    = param_get_int,
130 };
131
132 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
133 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
134
135 static unsigned long completion_nsec = 10000;
136 module_param(completion_nsec, ulong, S_IRUGO);
137 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
138
139 static int hw_queue_depth = 64;
140 module_param(hw_queue_depth, int, S_IRUGO);
141 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
142
143 static bool use_per_node_hctx = false;
144 module_param(use_per_node_hctx, bool, S_IRUGO);
145 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
146
147 static void put_tag(struct nullb_queue *nq, unsigned int tag)
148 {
149         clear_bit_unlock(tag, nq->tag_map);
150
151         if (waitqueue_active(&nq->wait))
152                 wake_up(&nq->wait);
153 }
154
155 static unsigned int get_tag(struct nullb_queue *nq)
156 {
157         unsigned int tag;
158
159         do {
160                 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
161                 if (tag >= nq->queue_depth)
162                         return -1U;
163         } while (test_and_set_bit_lock(tag, nq->tag_map));
164
165         return tag;
166 }
167
168 static void free_cmd(struct nullb_cmd *cmd)
169 {
170         put_tag(cmd->nq, cmd->tag);
171 }
172
173 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
175 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
176 {
177         struct nullb_cmd *cmd;
178         unsigned int tag;
179
180         tag = get_tag(nq);
181         if (tag != -1U) {
182                 cmd = &nq->cmds[tag];
183                 cmd->tag = tag;
184                 cmd->nq = nq;
185                 if (irqmode == NULL_IRQ_TIMER) {
186                         hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187                                      HRTIMER_MODE_REL);
188                         cmd->timer.function = null_cmd_timer_expired;
189                 }
190                 return cmd;
191         }
192
193         return NULL;
194 }
195
196 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
197 {
198         struct nullb_cmd *cmd;
199         DEFINE_WAIT(wait);
200
201         cmd = __alloc_cmd(nq);
202         if (cmd || !can_wait)
203                 return cmd;
204
205         do {
206                 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
207                 cmd = __alloc_cmd(nq);
208                 if (cmd)
209                         break;
210
211                 io_schedule();
212         } while (1);
213
214         finish_wait(&nq->wait, &wait);
215         return cmd;
216 }
217
218 static void end_cmd(struct nullb_cmd *cmd)
219 {
220         struct request_queue *q = NULL;
221
222         switch (queue_mode)  {
223         case NULL_Q_MQ:
224                 blk_mq_end_request(cmd->rq, 0);
225                 return;
226         case NULL_Q_RQ:
227                 INIT_LIST_HEAD(&cmd->rq->queuelist);
228                 blk_end_request_all(cmd->rq, 0);
229                 break;
230         case NULL_Q_BIO:
231                 bio_endio(cmd->bio);
232                 goto free_cmd;
233         }
234
235         if (cmd->rq)
236                 q = cmd->rq->q;
237
238         /* Restart queue if needed, as we are freeing a tag */
239         if (q && !q->mq_ops && blk_queue_stopped(q)) {
240                 unsigned long flags;
241
242                 spin_lock_irqsave(q->queue_lock, flags);
243                 if (blk_queue_stopped(q))
244                         blk_start_queue(q);
245                 spin_unlock_irqrestore(q->queue_lock, flags);
246         }
247 free_cmd:
248         free_cmd(cmd);
249 }
250
251 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
252 {
253         end_cmd(container_of(timer, struct nullb_cmd, timer));
254
255         return HRTIMER_NORESTART;
256 }
257
258 static void null_cmd_end_timer(struct nullb_cmd *cmd)
259 {
260         ktime_t kt = ktime_set(0, completion_nsec);
261
262         hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
263 }
264
265 static void null_softirq_done_fn(struct request *rq)
266 {
267         if (queue_mode == NULL_Q_MQ)
268                 end_cmd(blk_mq_rq_to_pdu(rq));
269         else
270                 end_cmd(rq->special);
271 }
272
273 static inline void null_handle_cmd(struct nullb_cmd *cmd)
274 {
275         /* Complete IO by inline, softirq or timer */
276         switch (irqmode) {
277         case NULL_IRQ_SOFTIRQ:
278                 switch (queue_mode)  {
279                 case NULL_Q_MQ:
280                         blk_mq_complete_request(cmd->rq, cmd->rq->errors);
281                         break;
282                 case NULL_Q_RQ:
283                         blk_complete_request(cmd->rq);
284                         break;
285                 case NULL_Q_BIO:
286                         /*
287                          * XXX: no proper submitting cpu information available.
288                          */
289                         end_cmd(cmd);
290                         break;
291                 }
292                 break;
293         case NULL_IRQ_NONE:
294                 end_cmd(cmd);
295                 break;
296         case NULL_IRQ_TIMER:
297                 null_cmd_end_timer(cmd);
298                 break;
299         }
300 }
301
302 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
303 {
304         int index = 0;
305
306         if (nullb->nr_queues != 1)
307                 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
308
309         return &nullb->queues[index];
310 }
311
312 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
313 {
314         struct nullb *nullb = q->queuedata;
315         struct nullb_queue *nq = nullb_to_queue(nullb);
316         struct nullb_cmd *cmd;
317
318         cmd = alloc_cmd(nq, 1);
319         cmd->bio = bio;
320
321         null_handle_cmd(cmd);
322         return BLK_QC_T_NONE;
323 }
324
325 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
326 {
327         struct nullb *nullb = q->queuedata;
328         struct nullb_queue *nq = nullb_to_queue(nullb);
329         struct nullb_cmd *cmd;
330
331         cmd = alloc_cmd(nq, 0);
332         if (cmd) {
333                 cmd->rq = req;
334                 req->special = cmd;
335                 return BLKPREP_OK;
336         }
337         blk_stop_queue(q);
338
339         return BLKPREP_DEFER;
340 }
341
342 static void null_request_fn(struct request_queue *q)
343 {
344         struct request *rq;
345
346         while ((rq = blk_fetch_request(q)) != NULL) {
347                 struct nullb_cmd *cmd = rq->special;
348
349                 spin_unlock_irq(q->queue_lock);
350                 null_handle_cmd(cmd);
351                 spin_lock_irq(q->queue_lock);
352         }
353 }
354
355 static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
356                          const struct blk_mq_queue_data *bd)
357 {
358         struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
359
360         if (irqmode == NULL_IRQ_TIMER) {
361                 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362                 cmd->timer.function = null_cmd_timer_expired;
363         }
364         cmd->rq = bd->rq;
365         cmd->nq = hctx->driver_data;
366
367         blk_mq_start_request(bd->rq);
368
369         null_handle_cmd(cmd);
370         return BLK_MQ_RQ_QUEUE_OK;
371 }
372
373 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
374 {
375         BUG_ON(!nullb);
376         BUG_ON(!nq);
377
378         init_waitqueue_head(&nq->wait);
379         nq->queue_depth = nullb->queue_depth;
380 }
381
382 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
383                           unsigned int index)
384 {
385         struct nullb *nullb = data;
386         struct nullb_queue *nq = &nullb->queues[index];
387
388         hctx->driver_data = nq;
389         null_init_queue(nullb, nq);
390         nullb->nr_queues++;
391
392         return 0;
393 }
394
395 static struct blk_mq_ops null_mq_ops = {
396         .queue_rq       = null_queue_rq,
397         .map_queue      = blk_mq_map_queue,
398         .init_hctx      = null_init_hctx,
399         .complete       = null_softirq_done_fn,
400 };
401
402 static void cleanup_queue(struct nullb_queue *nq)
403 {
404         kfree(nq->tag_map);
405         kfree(nq->cmds);
406 }
407
408 static void cleanup_queues(struct nullb *nullb)
409 {
410         int i;
411
412         for (i = 0; i < nullb->nr_queues; i++)
413                 cleanup_queue(&nullb->queues[i]);
414
415         kfree(nullb->queues);
416 }
417
418 static void null_del_dev(struct nullb *nullb)
419 {
420         list_del_init(&nullb->list);
421
422         if (use_lightnvm)
423                 nvm_unregister(nullb->disk_name);
424         else
425                 del_gendisk(nullb->disk);
426         blk_cleanup_queue(nullb->q);
427         if (queue_mode == NULL_Q_MQ)
428                 blk_mq_free_tag_set(&nullb->tag_set);
429         if (!use_lightnvm)
430                 put_disk(nullb->disk);
431         cleanup_queues(nullb);
432         kfree(nullb);
433 }
434
435 #ifdef CONFIG_NVM
436
437 static void null_lnvm_end_io(struct request *rq, int error)
438 {
439         struct nvm_rq *rqd = rq->end_io_data;
440         struct nvm_dev *dev = rqd->dev;
441
442         dev->mt->end_io(rqd, error);
443
444         blk_put_request(rq);
445 }
446
447 static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
448 {
449         struct request_queue *q = dev->q;
450         struct request *rq;
451         struct bio *bio = rqd->bio;
452
453         rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
454         if (IS_ERR(rq))
455                 return -ENOMEM;
456
457         rq->cmd_type = REQ_TYPE_DRV_PRIV;
458         rq->__sector = bio->bi_iter.bi_sector;
459         rq->ioprio = bio_prio(bio);
460
461         if (bio_has_data(bio))
462                 rq->nr_phys_segments = bio_phys_segments(q, bio);
463
464         rq->__data_len = bio->bi_iter.bi_size;
465         rq->bio = rq->biotail = bio;
466
467         rq->end_io_data = rqd;
468
469         blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
470
471         return 0;
472 }
473
474 static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
475 {
476         sector_t size = gb * 1024 * 1024 * 1024ULL;
477         sector_t blksize;
478         struct nvm_id_group *grp;
479
480         id->ver_id = 0x1;
481         id->vmnt = 0;
482         id->cgrps = 1;
483         id->cap = 0x3;
484         id->dom = 0x1;
485
486         id->ppaf.blk_offset = 0;
487         id->ppaf.blk_len = 16;
488         id->ppaf.pg_offset = 16;
489         id->ppaf.pg_len = 16;
490         id->ppaf.sect_offset = 32;
491         id->ppaf.sect_len = 8;
492         id->ppaf.pln_offset = 40;
493         id->ppaf.pln_len = 8;
494         id->ppaf.lun_offset = 48;
495         id->ppaf.lun_len = 8;
496         id->ppaf.ch_offset = 56;
497         id->ppaf.ch_len = 8;
498
499         do_div(size, bs); /* convert size to pages */
500         do_div(size, 256); /* concert size to pgs pr blk */
501         grp = &id->groups[0];
502         grp->mtype = 0;
503         grp->fmtype = 0;
504         grp->num_ch = 1;
505         grp->num_pg = 256;
506         blksize = size;
507         do_div(size, (1 << 16));
508         grp->num_lun = size + 1;
509         do_div(blksize, grp->num_lun);
510         grp->num_blk = blksize;
511         grp->num_pln = 1;
512
513         grp->fpg_sz = bs;
514         grp->csecs = bs;
515         grp->trdt = 25000;
516         grp->trdm = 25000;
517         grp->tprt = 500000;
518         grp->tprm = 500000;
519         grp->tbet = 1500000;
520         grp->tbem = 1500000;
521         grp->mpos = 0x010101; /* single plane rwe */
522         grp->cpar = hw_queue_depth;
523
524         return 0;
525 }
526
527 static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
528 {
529         mempool_t *virtmem_pool;
530
531         virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
532         if (!virtmem_pool) {
533                 pr_err("null_blk: Unable to create virtual memory pool\n");
534                 return NULL;
535         }
536
537         return virtmem_pool;
538 }
539
540 static void null_lnvm_destroy_dma_pool(void *pool)
541 {
542         mempool_destroy(pool);
543 }
544
545 static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
546                                 gfp_t mem_flags, dma_addr_t *dma_handler)
547 {
548         return mempool_alloc(pool, mem_flags);
549 }
550
551 static void null_lnvm_dev_dma_free(void *pool, void *entry,
552                                                         dma_addr_t dma_handler)
553 {
554         mempool_free(entry, pool);
555 }
556
557 static struct nvm_dev_ops null_lnvm_dev_ops = {
558         .identity               = null_lnvm_id,
559         .submit_io              = null_lnvm_submit_io,
560
561         .create_dma_pool        = null_lnvm_create_dma_pool,
562         .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
563         .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
564         .dev_dma_free           = null_lnvm_dev_dma_free,
565
566         /* Simulate nvme protocol restriction */
567         .max_phys_sect          = 64,
568 };
569 #else
570 static struct nvm_dev_ops null_lnvm_dev_ops;
571 #endif /* CONFIG_NVM */
572
573 static int null_open(struct block_device *bdev, fmode_t mode)
574 {
575         return 0;
576 }
577
578 static void null_release(struct gendisk *disk, fmode_t mode)
579 {
580 }
581
582 static const struct block_device_operations null_fops = {
583         .owner =        THIS_MODULE,
584         .open =         null_open,
585         .release =      null_release,
586 };
587
588 static int setup_commands(struct nullb_queue *nq)
589 {
590         struct nullb_cmd *cmd;
591         int i, tag_size;
592
593         nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
594         if (!nq->cmds)
595                 return -ENOMEM;
596
597         tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
598         nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
599         if (!nq->tag_map) {
600                 kfree(nq->cmds);
601                 return -ENOMEM;
602         }
603
604         for (i = 0; i < nq->queue_depth; i++) {
605                 cmd = &nq->cmds[i];
606                 INIT_LIST_HEAD(&cmd->list);
607                 cmd->ll_list.next = NULL;
608                 cmd->tag = -1U;
609         }
610
611         return 0;
612 }
613
614 static int setup_queues(struct nullb *nullb)
615 {
616         nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
617                                                                 GFP_KERNEL);
618         if (!nullb->queues)
619                 return -ENOMEM;
620
621         nullb->nr_queues = 0;
622         nullb->queue_depth = hw_queue_depth;
623
624         return 0;
625 }
626
627 static int init_driver_queues(struct nullb *nullb)
628 {
629         struct nullb_queue *nq;
630         int i, ret = 0;
631
632         for (i = 0; i < submit_queues; i++) {
633                 nq = &nullb->queues[i];
634
635                 null_init_queue(nullb, nq);
636
637                 ret = setup_commands(nq);
638                 if (ret)
639                         return ret;
640                 nullb->nr_queues++;
641         }
642         return 0;
643 }
644
645 static int null_add_dev(void)
646 {
647         struct gendisk *disk;
648         struct nullb *nullb;
649         sector_t size;
650         int rv;
651
652         nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
653         if (!nullb) {
654                 rv = -ENOMEM;
655                 goto out;
656         }
657
658         spin_lock_init(&nullb->lock);
659
660         if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
661                 submit_queues = nr_online_nodes;
662
663         rv = setup_queues(nullb);
664         if (rv)
665                 goto out_free_nullb;
666
667         if (queue_mode == NULL_Q_MQ) {
668                 nullb->tag_set.ops = &null_mq_ops;
669                 nullb->tag_set.nr_hw_queues = submit_queues;
670                 nullb->tag_set.queue_depth = hw_queue_depth;
671                 nullb->tag_set.numa_node = home_node;
672                 nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
673                 nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
674                 nullb->tag_set.driver_data = nullb;
675
676                 rv = blk_mq_alloc_tag_set(&nullb->tag_set);
677                 if (rv)
678                         goto out_cleanup_queues;
679
680                 nullb->q = blk_mq_init_queue(&nullb->tag_set);
681                 if (IS_ERR(nullb->q)) {
682                         rv = -ENOMEM;
683                         goto out_cleanup_tags;
684                 }
685         } else if (queue_mode == NULL_Q_BIO) {
686                 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
687                 if (!nullb->q) {
688                         rv = -ENOMEM;
689                         goto out_cleanup_queues;
690                 }
691                 blk_queue_make_request(nullb->q, null_queue_bio);
692                 rv = init_driver_queues(nullb);
693                 if (rv)
694                         goto out_cleanup_blk_queue;
695         } else {
696                 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
697                 if (!nullb->q) {
698                         rv = -ENOMEM;
699                         goto out_cleanup_queues;
700                 }
701                 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
702                 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
703                 rv = init_driver_queues(nullb);
704                 if (rv)
705                         goto out_cleanup_blk_queue;
706         }
707
708         nullb->q->queuedata = nullb;
709         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
710         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
711
712
713         mutex_lock(&lock);
714         list_add_tail(&nullb->list, &nullb_list);
715         nullb->index = nullb_indexes++;
716         mutex_unlock(&lock);
717
718         blk_queue_logical_block_size(nullb->q, bs);
719         blk_queue_physical_block_size(nullb->q, bs);
720
721         sprintf(nullb->disk_name, "nullb%d", nullb->index);
722
723         if (use_lightnvm) {
724                 rv = nvm_register(nullb->q, nullb->disk_name,
725                                                         &null_lnvm_dev_ops);
726                 if (rv)
727                         goto out_cleanup_blk_queue;
728                 goto done;
729         }
730
731         disk = nullb->disk = alloc_disk_node(1, home_node);
732         if (!disk) {
733                 rv = -ENOMEM;
734                 goto out_cleanup_lightnvm;
735         }
736         size = gb * 1024 * 1024 * 1024ULL;
737         set_capacity(disk, size >> 9);
738
739         disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
740         disk->major             = null_major;
741         disk->first_minor       = nullb->index;
742         disk->fops              = &null_fops;
743         disk->private_data      = nullb;
744         disk->queue             = nullb->q;
745         strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
746
747         add_disk(disk);
748 done:
749         return 0;
750
751 out_cleanup_lightnvm:
752         if (use_lightnvm)
753                 nvm_unregister(nullb->disk_name);
754 out_cleanup_blk_queue:
755         blk_cleanup_queue(nullb->q);
756 out_cleanup_tags:
757         if (queue_mode == NULL_Q_MQ)
758                 blk_mq_free_tag_set(&nullb->tag_set);
759 out_cleanup_queues:
760         cleanup_queues(nullb);
761 out_free_nullb:
762         kfree(nullb);
763 out:
764         return rv;
765 }
766
767 static int __init null_init(void)
768 {
769         int ret = 0;
770         unsigned int i;
771         struct nullb *nullb;
772
773         if (bs > PAGE_SIZE) {
774                 pr_warn("null_blk: invalid block size\n");
775                 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
776                 bs = PAGE_SIZE;
777         }
778
779         if (use_lightnvm && bs != 4096) {
780                 pr_warn("null_blk: LightNVM only supports 4k block size\n");
781                 pr_warn("null_blk: defaults block size to 4k\n");
782                 bs = 4096;
783         }
784
785         if (use_lightnvm && queue_mode != NULL_Q_MQ) {
786                 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
787                 pr_warn("null_blk: defaults queue mode to blk-mq\n");
788                 queue_mode = NULL_Q_MQ;
789         }
790
791         if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
792                 if (submit_queues < nr_online_nodes) {
793                         pr_warn("null_blk: submit_queues param is set to %u.",
794                                                         nr_online_nodes);
795                         submit_queues = nr_online_nodes;
796                 }
797         } else if (submit_queues > nr_cpu_ids)
798                 submit_queues = nr_cpu_ids;
799         else if (!submit_queues)
800                 submit_queues = 1;
801
802         mutex_init(&lock);
803
804         null_major = register_blkdev(0, "nullb");
805         if (null_major < 0)
806                 return null_major;
807
808         if (use_lightnvm) {
809                 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
810                                                                 0, 0, NULL);
811                 if (!ppa_cache) {
812                         pr_err("null_blk: unable to create ppa cache\n");
813                         ret = -ENOMEM;
814                         goto err_ppa;
815                 }
816         }
817
818         for (i = 0; i < nr_devices; i++) {
819                 ret = null_add_dev();
820                 if (ret)
821                         goto err_dev;
822         }
823
824         pr_info("null: module loaded\n");
825         return 0;
826
827 err_dev:
828         while (!list_empty(&nullb_list)) {
829                 nullb = list_entry(nullb_list.next, struct nullb, list);
830                 null_del_dev(nullb);
831         }
832         kmem_cache_destroy(ppa_cache);
833 err_ppa:
834         unregister_blkdev(null_major, "nullb");
835         return ret;
836 }
837
838 static void __exit null_exit(void)
839 {
840         struct nullb *nullb;
841
842         unregister_blkdev(null_major, "nullb");
843
844         mutex_lock(&lock);
845         while (!list_empty(&nullb_list)) {
846                 nullb = list_entry(nullb_list.next, struct nullb, list);
847                 null_del_dev(nullb);
848         }
849         mutex_unlock(&lock);
850
851         kmem_cache_destroy(ppa_cache);
852 }
853
854 module_init(null_init);
855 module_exit(null_exit);
856
857 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
858 MODULE_LICENSE("GPL");