nvme: set queue limits for the admin queue
authorChristoph Hellwig <hch@lst.de>
Wed, 2 Mar 2016 17:07:11 +0000 (18:07 +0100)
committerJens Axboe <axboe@fb.com>
Thu, 3 Mar 2016 21:42:50 +0000 (14:42 -0700)
Factor out a helper to set all the device specific queue limits and apply
them to the admin queue in addition to the I/O queues.  Without this the
command size on the admin queue is arbitrarily low, and the missing
other limitations are just minefields waiting for victims.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reported-by: Jeff Lien <Jeff.Lien@hgst.com>
Tested-by: Jeff Lien <Jeff.Lien@hgst.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/nvme/host/core.c

index 470d4f3..cfee6ac 100644 (file)
@@ -840,6 +840,21 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
        return ret;
 }
 
+static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
+               struct request_queue *q)
+{
+       if (ctrl->max_hw_sectors) {
+               blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
+               blk_queue_max_segments(q,
+                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
+       }
+       if (ctrl->stripe_size)
+               blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+               blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+       blk_queue_virt_boundary(q, ctrl->page_size - 1);
+}
+
 /*
  * Initialize the cached copies of the Identify data and various controller
  * register in our nvme_ctrl structure.  This should be called as soon as
@@ -897,6 +912,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                }
        }
 
+       nvme_set_queue_limits(ctrl, ctrl->admin_q);
+
        kfree(id);
        return 0;
 }
@@ -1147,17 +1164,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        ns->disk = disk;
        ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
 
+
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       if (ctrl->max_hw_sectors) {
-               blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
-               blk_queue_max_segments(ns->queue,
-                       (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
-       }
-       if (ctrl->stripe_size)
-               blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
-       if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
-               blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
-       blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
+       nvme_set_queue_limits(ctrl, ns->queue);
 
        disk->major = nvme_major;
        disk->first_minor = 0;