drivers: use req op accessor
[cascardo/linux.git] / drivers / nvme / host / core.c
index 2de248b..089b8b8 100644 (file)
@@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                        break;
                }
                break;
+       case NVME_CTRL_DEAD:
+               switch (old_state) {
+               case NVME_CTRL_DELETING:
+                       changed = true;
+                       /* FALLTHRU */
+               default:
+                       break;
+               }
+               break;
        default:
                break;
        }
@@ -283,7 +292,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                memcpy(cmd, req->cmd, sizeof(*cmd));
        else if (req->cmd_flags & REQ_FLUSH)
                nvme_setup_flush(ns, cmd);
-       else if (req->cmd_flags & REQ_DISCARD)
+       else if (req_op(req) == REQ_OP_DISCARD)
                ret = nvme_setup_discard(ns, req, cmd);
        else
                nvme_setup_rw(ns, req, cmd);
@@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
        switch (ns->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
                integrity.profile = &t10_pi_type3_crc;
+               integrity.tag_size = sizeof(u16) + sizeof(u32);
+               integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
                integrity.profile = &t10_pi_type1_crc;
+               integrity.tag_size = sizeof(u16);
+               integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
                break;
        default:
                integrity.profile = NULL;
@@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
                return ctrl->ops->reset_ctrl(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
+       case NVME_IOCTL_RESCAN:
+               nvme_queue_scan(ctrl);
+               return 0;
        default:
                return -ENOTTY;
        }
@@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
 }
 static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
 
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t count)
+{
+       struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+       nvme_queue_scan(ctrl);
+       return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
 static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
                                                                char *buf)
 {
@@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid);
 
 static struct attribute *nvme_dev_attrs[] = {
        &dev_attr_reset_controller.attr,
+       &dev_attr_rescan_controller.attr,
        &dev_attr_model.attr,
        &dev_attr_serial.attr,
        &dev_attr_firmware_rev.attr,
@@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
 
+       /*
+        * The dead states indicates the controller was not gracefully
+        * disconnected. In that case, we won't be able to flush any data while
+        * removing the namespaces' disks; fail all the queues now to avoid
+        * potentially having to clean up the failed sync later.
+        */
+       if (ctrl->state == NVME_CTRL_DEAD)
+               nvme_kill_queues(ctrl);
+
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);