[SCSI] qla2xxx: Enhancements to support ISPFx00.
authorGiridhar Malavali <giridhar.malavali@qlogic.com>
Thu, 28 Mar 2013 12:21:23 +0000 (08:21 -0400)
committerJames Bottomley <JBottomley@Parallels.com>
Thu, 11 Apr 2013 22:42:04 +0000 (15:42 -0700)
[jejb: fix up checkpatch issues]
Signed-off-by: Andrew Vazquez <andrew.vasquez@qlogic.com>
Signed-off-by: Armen Baloyan <armen.baloyan@qlogic.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
15 files changed:
drivers/scsi/qla2xxx/Makefile
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mr.c [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_mr.h [new file with mode: 0644]
drivers/scsi/qla2xxx/qla_os.c

index dce7d78..c37b244 100644 (file)
@@ -1,6 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
                qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o qla_target.o
+        qla_nx.o qla_mr.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
index b3db9dc..bf60c63 100644 (file)
@@ -888,7 +888,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
        struct qla_hw_data *ha = vha->hw;
        uint32_t sn;
 
-       if (IS_FWI2_CAPABLE(ha)) {
+       if (IS_QLAFX00(vha->hw)) {
+               return snprintf(buf, PAGE_SIZE, "%s\n",
+                   vha->hw->mr.serial_num);
+       } else if (IS_FWI2_CAPABLE(ha)) {
                qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
                return snprintf(buf, PAGE_SIZE, "%s\n", buf);
        }
@@ -912,6 +915,11 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
+
+       if (IS_QLAFX00(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "%s\n",
+                   vha->hw->mr.hw_version);
+
        return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
            ha->product_id[0], ha->product_id[1], ha->product_id[2],
            ha->product_id[3]);
@@ -922,6 +930,11 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (IS_QLAFX00(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "%s\n",
+                   vha->hw->mr.product_name);
+
        return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
 }
 
@@ -1304,6 +1317,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        int rval = QLA_FUNCTION_FAILED;
        uint16_t state[5];
+       uint32_t pstate;
+
+       if (IS_QLAFX00(vha->hw)) {
+               pstate = qlafx00_fw_state_show(dev, attr, buf);
+               return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+       }
 
        if (qla2x00_reset_active(vha))
                ql_log(ql_log_warn, vha, 0x707c,
@@ -1454,6 +1473,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
                                        (shost_priv(shost)))->hw;
        u32 speed = FC_PORTSPEED_UNKNOWN;
 
+       if (IS_QLAFX00(ha)) {
+               qlafx00_get_host_speed(shost);
+               return;
+       }
+
        switch (ha->link_data_rate) {
        case PORT_SPEED_1GB:
                speed = FC_PORTSPEED_1GBIT;
@@ -1637,6 +1661,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
 {
        scsi_qla_host_t *vha = shost_priv(shost);
 
+       if (IS_QLAFX00(vha->hw))
+               return 0;
+
        qla2x00_loop_reset(vha);
        return 0;
 }
@@ -1655,6 +1682,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
        pfc_host_stat = &vha->fc_host_stat;
        memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
 
+       if (IS_QLAFX00(vha->hw))
+               goto done;
+
        if (test_bit(UNLOADING, &vha->dpc_flags))
                goto done;
 
@@ -2087,6 +2117,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
                    FC_PORTSPEED_1GBIT;
        else if (IS_QLA23XX(ha))
                speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+       else if (IS_QLAFX00(ha))
+               speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+                   FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
        else
                speed = FC_PORTSPEED_1GBIT;
        fc_host_supported_speeds(vha->host) = speed;
index ad54099..7d2f021 100644 (file)
@@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct fc_bsg_job *bsg_job = sp->u.bsg_job;
        struct qla_hw_data *ha = vha->hw;
+       struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
 
-       dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
-           bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+       if (sp->type == SRB_FXIOCB_BCMD) {
+               piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+                   &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
-       dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
-           bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+               if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+                       dma_unmap_sg(&ha->pdev->dev,
+                           bsg_job->request_payload.sg_list,
+                           bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+               if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+                       dma_unmap_sg(&ha->pdev->dev,
+                           bsg_job->reply_payload.sg_list,
+                           bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+       } else {
+               dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+                   bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+               dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+                   bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+       }
 
        if (sp->type == SRB_CT_CMD ||
+           sp->type == SRB_FXIOCB_BCMD ||
            sp->type == SRB_ELS_CMD_HST)
                kfree(sp->fcport);
        qla2x00_rel_sp(vha, sp);
@@ -1882,6 +1899,128 @@ done:
        return 0;
 }
 
+static int
+qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       struct qla_hw_data *ha = vha->hw;
+       int rval = (DRIVER_ERROR << 16);
+       struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+       srb_t *sp;
+       int req_sg_cnt = 0, rsp_sg_cnt = 0;
+       struct fc_port *fcport;
+       char  *type = "FC_BSG_HST_FX_MGMT";
+
+       /* Copy the IOCB specific information */
+       piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+           &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+       /* Dump the vendor information */
+       ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
+           (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+
+       if (!vha->flags.online) {
+               ql_log(ql_log_warn, vha, 0x70d0,
+                   "Host is not online.\n");
+               rval = -EIO;
+               goto done;
+       }
+
+       if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+               req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+                   bsg_job->request_payload.sg_list,
+                   bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+               if (!req_sg_cnt) {
+                       ql_log(ql_log_warn, vha, 0x70c7,
+                           "dma_map_sg return %d for request\n", req_sg_cnt);
+                       rval = -ENOMEM;
+                       goto done;
+               }
+       }
+
+       if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+               rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+                   bsg_job->reply_payload.sg_list,
+                   bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+               if (!rsp_sg_cnt) {
+                       ql_log(ql_log_warn, vha, 0x70c8,
+                           "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+                       rval = -ENOMEM;
+                       goto done_unmap_req_sg;
+               }
+       }
+
+       ql_dbg(ql_dbg_user, vha, 0x70c9,
+           "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+           "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+           req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+
+       /* Allocate a dummy fcport structure, since functions preparing the
+        * IOCB and mailbox command retrieves port specific information
+        * from fcport structure. For Host based ELS commands there will be
+        * no fcport structure allocated
+        */
+       fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+       if (!fcport) {
+               ql_log(ql_log_warn, vha, 0x70ca,
+                   "Failed to allocate fcport.\n");
+               rval = -ENOMEM;
+               goto done_unmap_rsp_sg;
+       }
+
+       /* Alloc SRB structure */
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp) {
+               ql_log(ql_log_warn, vha, 0x70cb,
+                   "qla2x00_get_sp failed.\n");
+               rval = -ENOMEM;
+               goto done_free_fcport;
+       }
+
+       /* Initialize all required  fields of fcport */
+       fcport->vha = vha;
+       fcport->loop_id = piocb_rqst->dataword;
+
+       sp->type = SRB_FXIOCB_BCMD;
+       sp->name = "bsg_fx_mgmt";
+       sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+       sp->u.bsg_job = bsg_job;
+       sp->free = qla2x00_bsg_sp_free;
+       sp->done = qla2x00_bsg_job_done;
+
+       ql_dbg(ql_dbg_user, vha, 0x70cc,
+           "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
+           type, piocb_rqst->func_type, fcport->loop_id);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x70cd,
+                   "qla2x00_start_sp failed=%d.\n", rval);
+               mempool_free(sp, ha->srb_mempool);
+               rval = -EIO;
+               goto done_free_fcport;
+       }
+       return rval;
+
+done_free_fcport:
+       kfree(fcport);
+
+done_unmap_rsp_sg:
+       if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+               dma_unmap_sg(&ha->pdev->dev,
+                   bsg_job->reply_payload.sg_list,
+                   bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+       if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+               dma_unmap_sg(&ha->pdev->dev,
+                   bsg_job->request_payload.sg_list,
+                   bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+done:
+       return rval;
+}
+
 static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
@@ -1928,6 +2067,8 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
        case QL_VND_DIAG_IO_CMD:
                return qla24xx_process_bidir_cmd(bsg_job);
 
+       case QL_VND_FX00_MGMT_CMD:
+               return qlafx00_mgmt_cmd(bsg_job);
        default:
                return -ENOSYS;
        }
@@ -2007,7 +2148,8 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
                        sp = req->outstanding_cmds[cnt];
                        if (sp) {
                                if (((sp->type == SRB_CT_CMD) ||
-                                       (sp->type == SRB_ELS_CMD_HST))
+                                       (sp->type == SRB_ELS_CMD_HST) ||
+                                       (sp->type == SRB_FXIOCB_BCMD))
                                        && (sp->u.bsg_job == bsg_job)) {
                                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                                        if (ha->isp_ops->abort_command(sp)) {
index e9f6b9b..04f7703 100644 (file)
@@ -22,6 +22,7 @@
 #define QL_VND_DIAG_IO_CMD     0x0A
 #define QL_VND_WRITE_I2C       0x10
 #define QL_VND_READ_I2C                0x11
+#define QL_VND_FX00_MGMT_CMD   0x12
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK                  0
index fbc305f..cfa2a20 100644 (file)
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0126       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |
- * |                              |                    | 0x112c-0x112e  |
- * |                              |                    | 0x113a         |
+ * | Module Init and Probe        |       0x014f       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1179       | 0x111a-0x111b  |
  * |                              |                    | 0x1155-0x1158  |
- * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
+ * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
- * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401d       | 0x4002,0x4013  |
- * | Async Events                 |       0x5071       | 0x502b-0x502f  |
+ * |                              |                    | 0x303d-0x3041  |
+ * |                              |                    | 0x302d,0x3033  |
+ * |                              |                    | 0x3036,0x3038  |
+ * |                              |                    | 0x303a                |
+ * | DPC Thread                   |       0x4022       | 0x4002,0x4013  |
+ * | Async Events                 |       0x5081       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
+ * |                              |                    | 0x5040,0x5075  |
  * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x70c4       | 0x7018,0x702e, |
+ * | User Space Interactions      |       0x70dd       | 0x7018,0x702e, |
  * |                              |                    | 0x7020,0x7024, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
- * |                              |                    | 0x708c,        |
+ * |                              |                    | 0x707b,0x708c, |
  * |                              |                    | 0x70a5,0x70a6, |
  * |                              |                    | 0x70a8,0x70ab, |
- * |                              |                    | 0x70ad-0x70ae  |
+ * |                              |                    | 0x70ad-0x70ae, |
+ * |                              |                    | 0x70d1-0x70da  |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |               |
index 65c5ff7..e52722d 100644 (file)
 
 #define MAX_CMDSZ      16              /* SCSI maximum CDB size. */
 #include "qla_fw.h"
-
 /*
  * Timeout timer counts in seconds
  */
 #define RESPONSE_ENTRY_CNT_2300                512     /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ          128     /* Number of response entries.*/
 #define ATIO_ENTRY_CNT_24XX            4096    /* Number of ATIO entries. */
+#define RESPONSE_ENTRY_CNT_FX00                256     /* Number of response entries.*/
 
 struct req_que;
 
@@ -284,6 +284,7 @@ struct sd_dif_tuple {
 struct srb_cmd {
        struct scsi_cmnd *cmd;          /* Linux SCSI command pkt */
        uint32_t request_sense_length;
+       uint32_t fw_sense_length;
        uint8_t *request_sense_ptr;
        void *ctx;
 };
@@ -321,7 +322,39 @@ struct srb_iocb {
                        uint32_t flags;
                        uint32_t lun;
                        uint32_t data;
+                       struct completion comp;
+                       uint32_t comp_status;
                } tmf;
+               struct {
+#define SRB_FXDISC_REQ_DMA_VALID       BIT_0
+#define SRB_FXDISC_RESP_DMA_VALID      BIT_1
+#define SRB_FXDISC_REQ_DWRD_VALID      BIT_2
+#define SRB_FXDISC_RSP_DWRD_VALID      BIT_3
+#define FXDISC_TIMEOUT 20
+                       uint8_t flags;
+                       uint32_t req_len;
+                       uint32_t rsp_len;
+                       void *req_addr;
+                       void *rsp_addr;
+                       dma_addr_t req_dma_handle;
+                       dma_addr_t rsp_dma_handle;
+                       uint32_t adapter_id;
+                       uint32_t adapter_id_hi;
+                       uint32_t req_func_type;
+                       uint32_t req_data;
+                       uint32_t req_data_extra;
+                       uint32_t result;
+                       uint32_t seq_number;
+                       uint32_t fw_flags;
+                       struct completion fxiocb_comp;
+                       uint32_t reserved_0;
+                       uint8_t reserved_1;
+               } fxiocb;
+               struct {
+                       uint32_t cmd_hndl;
+                       uint32_t comp_status;
+                       struct completion comp;
+               } abt;
        } u;
 
        struct timer_list timer;
@@ -338,6 +371,10 @@ struct srb_iocb {
 #define SRB_TM_CMD     7
 #define SRB_SCSI_CMD   8
 #define SRB_BIDI_CMD   9
+#define SRB_FXIOCB_DCMD        10
+#define SRB_FXIOCB_BCMD        11
+#define SRB_ABT_CMD    12
+
 
 typedef struct srb {
        atomic_t ref_count;
@@ -368,6 +405,10 @@ typedef struct srb {
        (sp->u.scmd.request_sense_ptr)
 #define SET_CMD_SENSE_PTR(sp, ptr) \
        (sp->u.scmd.request_sense_ptr = ptr)
+#define GET_FW_SENSE_LEN(sp) \
+       (sp->u.scmd.fw_sense_length)
+#define SET_FW_SENSE_LEN(sp, len) \
+       (sp->u.scmd.fw_sense_length = len)
 
 struct msg_echo_lb {
        dma_addr_t send_dma;
@@ -542,11 +583,74 @@ struct device_reg_25xxmq {
        uint32_t atio_q_out;
 };
 
+
+struct device_reg_fx00 {
+       uint32_t mailbox0;              /* 00 */
+       uint32_t mailbox1;              /* 04 */
+       uint32_t mailbox2;              /* 08 */
+       uint32_t mailbox3;              /* 0C */
+       uint32_t mailbox4;              /* 10 */
+       uint32_t mailbox5;              /* 14 */
+       uint32_t mailbox6;              /* 18 */
+       uint32_t mailbox7;              /* 1C */
+       uint32_t mailbox8;              /* 20 */
+       uint32_t mailbox9;              /* 24 */
+       uint32_t mailbox10;             /* 28 */
+       uint32_t mailbox11;
+       uint32_t mailbox12;
+       uint32_t mailbox13;
+       uint32_t mailbox14;
+       uint32_t mailbox15;
+       uint32_t mailbox16;
+       uint32_t mailbox17;
+       uint32_t mailbox18;
+       uint32_t mailbox19;
+       uint32_t mailbox20;
+       uint32_t mailbox21;
+       uint32_t mailbox22;
+       uint32_t mailbox23;
+       uint32_t mailbox24;
+       uint32_t mailbox25;
+       uint32_t mailbox26;
+       uint32_t mailbox27;
+       uint32_t mailbox28;
+       uint32_t mailbox29;
+       uint32_t mailbox30;
+       uint32_t mailbox31;
+       uint32_t aenmailbox0;
+       uint32_t aenmailbox1;
+       uint32_t aenmailbox2;
+       uint32_t aenmailbox3;
+       uint32_t aenmailbox4;
+       uint32_t aenmailbox5;
+       uint32_t aenmailbox6;
+       uint32_t aenmailbox7;
+       /* Request Queue. */
+       uint32_t req_q_in;              /* A0 - Request Queue In-Pointer */
+       uint32_t req_q_out;             /* A4 - Request Queue Out-Pointer */
+       /* Response Queue. */
+       uint32_t rsp_q_in;              /* A8 - Response Queue In-Pointer */
+       uint32_t rsp_q_out;             /* AC - Response Queue Out-Pointer */
+       /* Init values shadowed on FW Up Event */
+       uint32_t initval0;              /* B0 */
+       uint32_t initval1;              /* B4 */
+       uint32_t initval2;              /* B8 */
+       uint32_t initval3;              /* BC */
+       uint32_t initval4;              /* C0 */
+       uint32_t initval5;              /* C4 */
+       uint32_t initval6;              /* C8 */
+       uint32_t initval7;              /* CC */
+       uint32_t fwheartbeat;           /* D0 */
+};
+
+
+
 typedef union {
                struct device_reg_2xxx isp;
                struct device_reg_24xx isp24;
                struct device_reg_25xxmq isp25mq;
                struct device_reg_82xx isp82;
+               struct device_reg_fx00 ispfx00;
 } device_reg_t;
 
 #define ISP_REQ_Q_IN(ha, reg) \
@@ -602,6 +706,20 @@ typedef struct {
 #define IOCTL_CMD      BIT_2
 } mbx_cmd_t;
 
+struct mbx_cmd_32 {
+       uint32_t        out_mb;         /* outbound from driver */
+       uint32_t        in_mb;                  /* Incoming from RISC */
+       uint32_t        mb[MAILBOX_REGISTER_COUNT];
+       long            buf_size;
+       void            *bufp;
+       uint32_t        tov;
+       uint8_t         flags;
+#define MBX_DMA_IN     BIT_0
+#define        MBX_DMA_OUT     BIT_1
+#define IOCTL_CMD      BIT_2
+};
+
+
 #define        MBX_TOV_SECONDS 30
 
 /*
@@ -677,6 +795,15 @@ typedef struct {
 #define MBA_BYPASS_NOTIFICATION        0x8043  /* Auto bypass notification. */
 #define MBA_DISCARD_RND_FRAME  0x8048  /* discard RND frame due to error. */
 #define MBA_REJECTED_FCP_CMD   0x8049  /* rejected FCP_CMD. */
+#define MBA_FW_NOT_STARTED     0x8050  /* Firmware not started */
+#define MBA_FW_STARTING                0x8051  /* Firmware starting */
+#define MBA_FW_RESTART_CMPLT   0x8060  /* Firmware restart complete */
+#define MBA_INIT_REQUIRED      0x8061  /* Initialization required */
+#define MBA_SHUTDOWN_REQUESTED 0x8062  /* Shutdown Requested */
+#define MBA_FW_INIT_FAILURE    0x8401  /* Firmware initialization failure */
+#define MBA_MIRROR_LUN_CHANGE  0x8402  /* Mirror LUN State Change
+                                          Notification */
+#define MBA_FW_POLL_STATE      0x8600  /* Firmware in poll diagnostic state */
 
 /* 83XX FCoE specific */
 #define MBA_IDC_AEN            0x8200  /* FCoE: NIC Core state change AEN */
@@ -797,6 +924,12 @@ typedef struct {
 #define MBC_SEND_LFA_COMMAND           0x7D    /* Send Loop Fabric Address */
 #define MBC_LUN_RESET                  0x7E    /* Send LUN reset */
 
+/*
+ * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
+ * should be defined with MBC_MR_*
+ */
+#define MBC_MR_DRV_SHUTDOWN            0x6A
+
 /*
  * ISP24xx mailbox commands
  */
@@ -1058,6 +1191,30 @@ typedef struct {
        uint8_t  reserved_3[26];
 } init_cb_t;
 
+
+struct init_cb_fx {
+       uint16_t        version;
+       uint16_t        reserved_1[13];
+       uint16_t        request_q_outpointer;
+       uint16_t        response_q_inpointer;
+       uint16_t        reserved_2[2];
+       uint16_t        response_q_length;
+       uint16_t        request_q_length;
+       uint16_t        reserved_3[2];
+       uint32_t        request_q_address[2];
+       uint32_t        response_q_address[2];
+       uint16_t        reserved_4[4];
+       uint8_t         response_q_msivec;
+       uint8_t         reserved_5[19];
+       uint16_t        interrupt_delay_timer;
+       uint16_t        reserved_6;
+       uint32_t        fwoptions1;
+       uint32_t        fwoptions2;
+       uint32_t        fwoptions3;
+       uint8_t         reserved_7[24];
+};
+
+
 /*
  * Get Link Status mailbox command return buffer.
  */
@@ -1831,6 +1988,9 @@ typedef struct fc_port {
        uint16_t loop_id;
        uint16_t old_loop_id;
 
+       uint16_t tgt_id;
+       uint16_t old_tgt_id;
+
        uint8_t fcp_prio;
 
        uint8_t fabric_port_name[WWN_SIZE];
@@ -1848,8 +2008,15 @@ typedef struct fc_port {
 
        uint8_t fc4_type;
        uint8_t scan_state;
+
+       unsigned long last_queue_full;
+       unsigned long last_ramp_up;
+
+       uint16_t port_id;
 } fc_port_t;
 
+#include "qla_mr.h"
+
 /*
  * Fibre channel port/lun states.
  */
@@ -2391,6 +2558,7 @@ struct isp_operations {
        int (*start_scsi) (srb_t *);
        int (*abort_isp) (struct scsi_qla_host *);
        int (*iospace_config)(struct qla_hw_data*);
+       int (*initialize_adapter)(struct scsi_qla_host *);
 };
 
 /* MSI-X Support *************************************************************/
@@ -2429,6 +2597,7 @@ enum qla_work_type {
        QLA_EVT_ASYNC_ADISC,
        QLA_EVT_ASYNC_ADISC_DONE,
        QLA_EVT_UEVENT,
+       QLA_EVT_AENFX,
 };
 
 
@@ -2456,7 +2625,15 @@ struct qla_work_evt {
                        u32 code;
 #define QLA_UEVENT_CODE_FW_DUMP        0
                } uevent;
-       } u;
+               struct {
+                       uint32_t        evtcode;
+                       uint32_t        mbx[8];
+                       uint32_t        count;
+               } aenfx;
+               struct {
+                       srb_t *sp;
+               } iosb;
+        } u;
 };
 
 struct qla_chip_state_84xx {
@@ -2520,6 +2697,11 @@ struct rsp_que {
        struct req_que *req;
        srb_t *status_srb; /* status continuation entry */
        struct work_struct q_work;
+
+       dma_addr_t  dma_fx00;
+       response_t *ring_fx00;
+       uint16_t  length_fx00;
+       uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
 };
 
 /* Request queue data structure */
@@ -2544,6 +2726,11 @@ struct req_que {
        uint16_t num_outstanding_cmds;
 #define        MAX_Q_DEPTH             32
        int max_q_depth;
+
+       dma_addr_t  dma_fx00;
+       request_t *ring_fx00;
+       uint16_t  length_fx00;
+       uint8_t req_pkt[REQUEST_ENTRY_SIZE];
 };
 
 /* Place holder for FW buffer parameters */
@@ -2633,7 +2820,10 @@ struct qla_hw_data {
                uint32_t        isp82xx_no_md_cap:1;
                uint32_t        host_shutting_down:1;
                uint32_t        idc_compl_status:1;
-               /* 32 bits */
+
+               uint32_t        mr_reset_hdlr_active:1;
+               uint32_t        mr_intr_valid:1;
+               /* 34 bits */
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -2650,7 +2840,21 @@ struct qla_hw_data {
        resource_size_t pio_address;
 
 #define MIN_IOBASE_LEN          0x100
-/* Multi queue data structs */
+       dma_addr_t              bar0_hdl;
+
+       void __iomem *cregbase;
+       dma_addr_t              bar2_hdl;
+#define BAR0_LEN_FX00                  (1024 * 1024)
+#define BAR2_LEN_FX00                  (128 * 1024)
+
+       uint32_t                rqstq_intr_code;
+       uint32_t                mbx_intr_code;
+       uint32_t                req_que_len;
+       uint32_t                rsp_que_len;
+       uint32_t                req_que_off;
+       uint32_t                rsp_que_off;
+
+       /* Multi queue data structs */
        device_reg_t __iomem *mqiobase;
        device_reg_t __iomem *msixbase;
        uint16_t        msix_count;
@@ -2729,7 +2933,8 @@ struct qla_hw_data {
 #define DT_ISP8021                     BIT_14
 #define DT_ISP2031                     BIT_15
 #define DT_ISP8031                     BIT_16
-#define DT_ISP_LAST                    (DT_ISP8031 << 1)
+#define DT_ISPFX00                     BIT_17
+#define DT_ISP_LAST                    (DT_ISPFX00 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2757,6 +2962,7 @@ struct qla_hw_data {
 #define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
 #define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
 #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
+#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
                        IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2821,6 +3027,7 @@ struct qla_hw_data {
        uint16_t        r_a_tov;
        int             port_down_retry_count;
        uint8_t         mbx_count;
+       uint8_t         aen_mbx_count;
 
        uint32_t        login_retry_count;
        /* SNS command interfaces. */
@@ -2868,9 +3075,13 @@ struct qla_hw_data {
        void            *swl;
 
        /* These are used by mailbox operations. */
-       volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+       uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+       uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
+       uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
 
        mbx_cmd_t       *mcp;
+       struct mbx_cmd_32       *mcp32;
+
        unsigned long   mbx_cmd_flags;
 #define MBX_INTERRUPT          1
 #define MBX_INTR_WAIT          2
@@ -3014,6 +3225,7 @@ struct qla_hw_data {
        int             cur_vport_count;
 
        struct qla_chip_state_84xx *cs84xx;
+       struct qla_statistics qla_stats;
        struct isp_operations *isp_ops;
        struct workqueue_struct *wq;
        struct qlfc_fw fw_buf;
@@ -3080,6 +3292,8 @@ struct qla_hw_data {
        unsigned long   host_last_rampup_time;
        int             cfg_lun_q_depth;
 
+       struct mr_data_fx00 mr;
+
        struct qlt_hw_data tgt;
        uint16_t        thermal_support;
 #define THERMAL_SUPPORT_I2C BIT_0
@@ -3109,6 +3323,8 @@ typedef struct scsi_qla_host {
                uint32_t        process_response_queue  :1;
                uint32_t        difdix_supported:1;
                uint32_t        delete_progress:1;
+
+               uint32_t        fw_tgt_reported:1;
        } flags;
 
        atomic_t        loop_state;
@@ -3144,6 +3360,9 @@ typedef struct scsi_qla_host {
 #define SCR_PENDING            21      /* SCR in target mode */
 #define HOST_RAMP_DOWN_QUEUE_DEPTH     22
 #define HOST_RAMP_UP_QUEUE_DEPTH       23
+#define PORT_UPDATE_NEEDED     24
+#define FX00_RESET_RECOVERY    25
+#define FX00_TARGET_SCAN       26
 
        uint32_t        device_flags;
 #define SWITCH_FOUND           BIT_0
@@ -3234,6 +3453,10 @@ struct qla_tgt_vp_map {
         test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
         atomic_read(&ha->loop_state) == LOOP_DOWN)
 
+#define STATE_TRANSITION(ha) \
+               (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+                        test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
 #define QLA_VHA_MARK_BUSY(__vha, __bail) do {               \
        atomic_inc(&__vha->vref_count);                      \
        mb();                                                \
index b310fa9..026bfde 100644 (file)
@@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
 
 extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+extern int qla2x00_init_rings(scsi_qla_host_t *);
 
 /*
  * Global Data in qla_os.c source file.
@@ -134,7 +135,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
     fc_port_t *, uint16_t *);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
 
@@ -158,6 +158,7 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
 extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
 extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
 extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -211,8 +212,6 @@ extern int qla24xx_start_scsi(srb_t *sp);
 int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
                                                uint16_t, uint16_t, uint8_t);
 extern int qla2x00_start_sp(srb_t *);
-extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
-extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
@@ -424,6 +423,12 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
 extern int qla2x00_get_data_rate(scsi_qla_host_t *);
 extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
+extern srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
+       void *);
+extern void
+qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
+       uint32_t);
 
 /*
  * Global Function Prototypes in qla_sup.c source file.
@@ -561,6 +566,42 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 
+/* qlafx00 related functions */
+extern int qlafx00_pci_config(struct scsi_qla_host *);
+extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
+extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_chip_diag(scsi_qla_host_t *);
+extern void qlafx00_config_rings(struct scsi_qla_host *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern irqreturn_t qlafx00_intr_handler(int, void *);
+extern void qlafx00_enable_intrs(struct qla_hw_data *);
+extern void qlafx00_disable_intrs(struct qla_hw_data *);
+extern int qlafx00_abort_command(srb_t *);
+extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
+extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
+extern int qlafx00_start_scsi(srb_t *);
+extern int qlafx00_abort_isp(scsi_qla_host_t *);
+extern int qlafx00_iospace_config(struct qla_hw_data *);
+extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_fw_ready(scsi_qla_host_t *);
+extern int qlafx00_configure_devices(scsi_qla_host_t *);
+extern int qlafx00_reset_initialize(scsi_qla_host_t *);
+extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
+extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern int qlafx00_post_aenfx_work(struct scsi_qla_host *,  uint32_t,
+                                  uint32_t *, int);
+extern uint32_t qlafx00_fw_state_show(struct device *,
+                                     struct device_attribute *, char *);
+extern void qlafx00_get_host_speed(struct Scsi_Host *);
+extern void qlafx00_init_response_q_entries(struct rsp_que *);
+
+extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
+extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
+extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
+extern void qlafx00_timer_routine(scsi_qla_host_t *);
+extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+
 /* qla82xx related functions */
 
 /* PCI related functions */
index 9b45525..d0ea8b9 100644 (file)
@@ -639,9 +639,14 @@ void
 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
 {
        struct qla_hw_data *ha = vha->hw;
-       sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
-           ha->fw_major_version, ha->fw_minor_version,
-           ha->fw_subminor_version, qla2x00_version_str);
+
+       if (IS_QLAFX00(ha))
+               sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+                   ha->mr.fw_version, qla2x00_version_str);
+       else
+               sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+                   ha->fw_major_version, ha->fw_minor_version,
+                   ha->fw_subminor_version, qla2x00_version_str);
 }
 
 /**
@@ -923,7 +928,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
                    sns_cmd->p.gpn_data[9] != 0x02) {
                        ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
                            "GPN_ID failed, rejected request, gpn_rsp:\n");
-                       ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
+                       ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
                            sns_cmd->p.gpn_data, 16);
                        rval = QLA_FUNCTION_FAILED;
                } else {
@@ -1718,7 +1723,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
        int rval;
        struct qla_hw_data *ha = vha->hw;
 
-       if (IS_QLA2100(ha) || IS_QLA2200(ha))
+       if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
+           IS_QLAFX00(ha))
                return QLA_FUNCTION_FAILED;
 
        rval = qla2x00_mgmt_svr_login(vha);
index b592033..3565dfd 100644 (file)
@@ -25,7 +25,6 @@
 */
 static int qla2x00_isp_firmware(scsi_qla_host_t *);
 static int qla2x00_setup_chip(scsi_qla_host_t *);
-static int qla2x00_init_rings(scsi_qla_host_t *);
 static int qla2x00_fw_ready(scsi_qla_host_t *);
 static int qla2x00_configure_hba(scsi_qla_host_t *);
 static int qla2x00_configure_loop(scsi_qla_host_t *);
@@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
 
        /* Firmware should use switch negotiated r_a_tov for timeout. */
        tmo = ha->r_a_tov / 10 * 2;
-       if (!IS_FWI2_CAPABLE(ha)) {
+       if (IS_QLAFX00(ha)) {
+               tmo = FX00_DEF_RATOV * 2;
+       } else if (!IS_FWI2_CAPABLE(ha)) {
                /*
                 * Except for earlier ISPs where the timeout is seeded from the
                 * initialization control block.
@@ -1977,7 +1978,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
  *
  * Returns 0 on success.
  */
-static int
+int
 qla2x00_init_rings(scsi_qla_host_t *vha)
 {
        int     rval;
@@ -2012,7 +2013,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                if (!rsp)
                        continue;
                /* Initialize response queue entries */
-               qla2x00_init_response_q_entries(rsp);
+               if (IS_QLAFX00(ha))
+                       qlafx00_init_response_q_entries(rsp);
+               else
+                       qla2x00_init_response_q_entries(rsp);
        }
 
        ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
@@ -2024,11 +2028,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+       ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
+
+       if (IS_QLAFX00(ha)) {
+               rval = qlafx00_init_firmware(vha, ha->init_cb_size);
+               goto next_check;
+       }
+
        /* Update any ISP specific firmware options before initialization. */
        ha->isp_ops->update_fw_options(vha);
 
-       ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
        if (ha->flags.npiv_supported) {
                if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
                        ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -2042,6 +2051,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
        }
 
        rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+next_check:
        if (rval) {
                ql_log(ql_log_fatal, vha, 0x00d2,
                    "Init Firmware **** FAILED ****.\n");
@@ -2069,6 +2079,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
        uint16_t        state[5];
        struct qla_hw_data *ha = vha->hw;
 
+       if (IS_QLAFX00(vha->hw))
+               return qlafx00_fw_ready(vha);
+
        rval = QLA_SUCCESS;
 
        /* 20 seconds for loop down. */
@@ -3134,6 +3147,12 @@ void
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
        fcport->vha = vha;
+
+       if (IS_QLAFX00(vha->hw)) {
+               qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+               qla2x00_reg_remote_port(vha, fcport);
+               return;
+       }
        fcport->login_retry = 0;
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
@@ -3894,15 +3913,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
                        /* Wait at most MAX_TARGET RSCNs for a stable link. */
                        wait_time = 256;
                        do {
-                               /* Issue a marker after FW becomes ready. */
-                               qla2x00_marker(vha, req, rsp, 0, 0,
-                                       MK_SYNC_ALL);
-                               vha->marker_needed = 0;
+                               if (!IS_QLAFX00(vha->hw)) {
+                                       /*
+                                        * Issue a marker after FW becomes
+                                        * ready.
+                                        */
+                                       qla2x00_marker(vha, req, rsp, 0, 0,
+                                               MK_SYNC_ALL);
+                                       vha->marker_needed = 0;
+                               }
 
                                /* Remap devices on Loop. */
                                clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 
-                               qla2x00_configure_loop(vha);
+                               if (IS_QLAFX00(vha->hw))
+                                       qlafx00_configure_devices(vha);
+                               else
+                                       qla2x00_configure_loop(vha);
+
                                wait_time--;
                        } while (!atomic_read(&vha->loop_down_timer) &&
                                !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
@@ -3968,9 +3996,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
                        if (fcport->drport &&
                            atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
                                spin_unlock_irqrestore(&ha->vport_slock, flags);
-
                                qla2x00_rport_del(fcport);
-
                                spin_lock_irqsave(&ha->vport_slock, flags);
                        }
                }
index 68e2c4a..98ab921 100644 (file)
@@ -5,6 +5,28 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 
+/**
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+{
+       uint16_t iocbs;
+
+       iocbs = 1;
+       if (dsds > 1) {
+               iocbs += (dsds - 1) / 5;
+               if ((dsds - 1) % 5)
+                       iocbs++;
+       }
+       return iocbs;
+}
+
 /*
  * qla2x00_debounce_register
  *      Debounce register.
@@ -57,6 +79,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
        return fcp;
 }
 
+static inline void
+host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
+{
+       uint32_t *isrc = (uint32_t *) src;
+       uint32_t *odest = (uint32_t *) dst;
+       uint32_t iter = bsize >> 2;
+
+       for (; iter ; iter--)
+               *odest++ = cpu_to_le32(*isrc++);
+}
+
 static inline void
 qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
 {
@@ -213,12 +246,18 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
        sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
        add_timer(&sp->u.iocb_cmd.timer);
        sp->free = qla2x00_sp_free;
+       if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
+           (sp->type == SRB_FXIOCB_DCMD))
+               init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
 }
 
 static inline int
 qla2x00_gid_list_size(struct qla_hw_data *ha)
 {
-       return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+       if (IS_QLAFX00(ha))
+               return sizeof(uint32_t) * 32;
+       else
+               return sizeof(struct gid_list_info) * ha->max_fibre_devices;
 }
 
 static inline void
index d263031..15e4080 100644 (file)
@@ -135,7 +135,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
        cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
        /* Load packet defaults. */
-       *((uint32_t *)(&cont_pkt->entry_type)) =
+       *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
+           __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
            __constant_cpu_to_le32(CONTINUE_A64_TYPE);
 
        return (cont_pkt);
@@ -486,6 +487,10 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
                if (ha->mqenable || IS_QLA83XX(ha)) {
                        WRT_REG_DWORD(req->req_q_in, req->ring_index);
                        RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+               } else if (IS_QLAFX00(ha)) {
+                       WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
+                       RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+                       QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
                } else if (IS_FWI2_CAPABLE(ha)) {
                        WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
                        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -514,11 +519,12 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
                        uint16_t lun, uint8_t type)
 {
        mrk_entry_t *mrk;
-       struct mrk_entry_24xx *mrk24;
+       struct mrk_entry_24xx *mrk24 = NULL;
+       struct mrk_entry_fx00 *mrkfx = NULL;
+
        struct qla_hw_data *ha = vha->hw;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-       mrk24 = NULL;
        req = ha->req_q_map[0];
        mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
        if (mrk == NULL) {
@@ -531,7 +537,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
        mrk->entry_type = MARKER_TYPE;
        mrk->modifier = type;
        if (type != MK_SYNC_ALL) {
-               if (IS_FWI2_CAPABLE(ha)) {
+               if (IS_QLAFX00(ha)) {
+                       mrkfx = (struct mrk_entry_fx00 *) mrk;
+                       mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
+                       mrkfx->handle_hi = 0;
+                       mrkfx->tgt_id = cpu_to_le16(loop_id);
+                       mrkfx->lun[1] = LSB(lun);
+                       mrkfx->lun[2] = MSB(lun);
+                       host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
+               } else if (IS_FWI2_CAPABLE(ha)) {
                        mrk24 = (struct mrk_entry_24xx *) mrk;
                        mrk24->nport_handle = cpu_to_le16(loop_id);
                        mrk24->lun[1] = LSB(lun);
@@ -589,28 +603,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
        return QLA_SUCCESS;
 }
 
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of IOCB entries needed to store @dsds.
- */
-inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
-{
-       uint16_t iocbs;
-
-       iocbs = 1;
-       if (dsds > 1) {
-               iocbs += (dsds - 1) / 5;
-               if ((dsds - 1) % 5)
-                       iocbs++;
-       }
-       return iocbs;
-}
-
 static inline int
 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
        uint16_t tot_dsds)
@@ -1583,7 +1575,6 @@ queuing_error:
        return QLA_FUNCTION_FAILED;
 }
 
-
 /**
  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
  * @sp: command to send to the ISP
@@ -1852,6 +1843,8 @@ skip_cmd_array:
                        cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
                else if (IS_FWI2_CAPABLE(ha))
                        cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+               else if (IS_QLAFX00(ha))
+                       cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
                else
                        cnt = qla2x00_debounce_register(
                            ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -1869,8 +1862,13 @@ skip_cmd_array:
        req->cnt -= req_cnt;
        pkt = req->ring_ptr;
        memset(pkt, 0, REQUEST_ENTRY_SIZE);
-       pkt->entry_count = req_cnt;
-       pkt->handle = handle;
+       if (IS_QLAFX00(ha)) {
+               WRT_REG_BYTE(&pkt->entry_count, req_cnt);
+               WRT_REG_WORD(&pkt->handle, handle);
+       } else {
+               pkt->entry_count = req_cnt;
+               pkt->handle = handle;
+       }
 
 queuing_error:
        return pkt;
@@ -2625,7 +2623,16 @@ qla2x00_start_sp(srb_t *sp)
                    qla2x00_adisc_iocb(sp, pkt);
                break;
        case SRB_TM_CMD:
-               qla24xx_tm_iocb(sp, pkt);
+               IS_QLAFX00(ha) ?
+                   qlafx00_tm_iocb(sp, pkt) :
+                   qla24xx_tm_iocb(sp, pkt);
+               break;
+       case SRB_FXIOCB_DCMD:
+       case SRB_FXIOCB_BCMD:
+               qlafx00_fxdisc_iocb(sp, pkt);
+               break;
+       case SRB_ABT_CMD:
+               qlafx00_abort_iocb(sp, pkt);
                break;
        default:
                break;
index e9dbd74..4c7bd24 100644 (file)
@@ -16,8 +16,6 @@
 #include "qla_target.h"
 
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_process_completed_request(struct scsi_qla_host *,
-       struct req_que *, uint32_t);
 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
@@ -1065,9 +1063,9 @@ skip_rio:
  * @ha: SCSI driver HA context
  * @index: SRB index
  */
-static void
+void
 qla2x00_process_completed_request(struct scsi_qla_host *vha,
-                               struct req_que *req, uint32_t index)
+                                 struct req_que *req, uint32_t index)
 {
        srb_t *sp;
        struct qla_hw_data *ha = vha->hw;
@@ -1101,7 +1099,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
        }
 }
 
-static srb_t *
+srb_t *
 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
     struct req_que *req, void *iocb)
 {
@@ -1994,7 +1992,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                return;
        }
 
-       lscsi_status = scsi_status & STATUS_MASK;
+       lscsi_status = scsi_status & STATUS_MASK;
 
        fcport = sp->fcport;
 
@@ -2939,7 +2937,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 
        /* If possible, enable MSI-X. */
        if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-               !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+               !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
                goto skip_msi;
 
        if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2972,7 +2970,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
 skip_msix:
 
        if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-           !IS_QLA8001(ha) && !IS_QLA82XX(ha))
+           !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
                goto skip_msi;
 
        ret = pci_enable_msi(ha->pdev);
@@ -2998,9 +2996,11 @@ skip_msi:
                    "Failed to reserve interrupt %d already in use.\n",
                    ha->pdev->irq);
                goto fail;
-       } else if (!ha->flags.msi_enabled)
+       } else if (!ha->flags.msi_enabled) {
                ql_dbg(ql_dbg_init, vha, 0x0125,
                    "INTa mode: Enabled.\n");
+               ha->flags.mr_intr_valid = 1;
+       }
 
 clear_risc_ints:
 
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644 (file)
index 0000000..729b743
--- /dev/null
@@ -0,0 +1,3476 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/utsname.h>
+
+
+/* QLAFX00 specific Mailbox implementation functions */
+
+/*
+ * qlafx00_mailbox_command
+ *     Issue mailbox command and waits for completion.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ *     mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ *     0 : QLA_SUCCESS = cmd performed success
+ *     1 : QLA_FUNCTION_FAILED   (error encountered)
+ *     6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ *     Kernel context.
+ */
+static int
+qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
+
+{
+       int             rval;
+       unsigned long    flags = 0;
+       device_reg_t __iomem *reg;
+       uint8_t         abort_active;
+       uint8_t         io_lock_on;
+       uint16_t        command = 0;
+       uint32_t        *iptr;
+       uint32_t __iomem *optr;
+       uint32_t        cnt;
+       uint32_t        mboxes;
+       unsigned long   wait_time;
+       struct qla_hw_data *ha = vha->hw;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+       if (ha->pdev->error_state > pci_channel_io_frozen) {
+               ql_log(ql_log_warn, vha, 0x115c,
+                   "error_state is greater than pci_channel_io_frozen, "
+                   "exiting.\n");
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
+       if (vha->device_flags & DFLG_DEV_FAILED) {
+               ql_log(ql_log_warn, vha, 0x115f,
+                   "Device in failed state, exiting.\n");
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
+       reg = ha->iobase;
+       io_lock_on = base_vha->flags.init_done;
+
+       rval = QLA_SUCCESS;
+       abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+       if (ha->flags.pci_channel_io_perm_failure) {
+               ql_log(ql_log_warn, vha, 0x1175,
+                   "Perm failure on EEH timeout MBX, exiting.\n");
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
+       if (ha->flags.isp82xx_fw_hung) {
+               /* Setting Link-Down error */
+               mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+               ql_log(ql_log_warn, vha, 0x1176,
+                   "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+               rval = QLA_FUNCTION_FAILED;
+               goto premature_exit;
+       }
+
+       /*
+        * Wait for active mailbox commands to finish by waiting at most tov
+        * seconds. This is to serialize actual issuing of mailbox cmds during
+        * non ISP abort time.
+        */
+       if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+               /* Timeout occurred. Return error. */
+               ql_log(ql_log_warn, vha, 0x1177,
+                   "Cmd access timeout, cmd=0x%x, Exiting.\n",
+                   mcp->mb[0]);
+               return QLA_FUNCTION_TIMEOUT;
+       }
+
+       ha->flags.mbox_busy = 1;
+       /* Save mailbox command for debug */
+       ha->mcp32 = mcp;
+
+       ql_dbg(ql_dbg_mbx, vha, 0x1178,
+           "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Load mailbox registers. */
+       optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+
+       iptr = mcp->mb;
+       command = mcp->mb[0];
+       mboxes = mcp->out_mb;
+
+       for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+               if (mboxes & BIT_0)
+                       WRT_REG_DWORD(optr, *iptr);
+
+               mboxes >>= 1;
+               optr++;
+               iptr++;
+       }
+
+       /* Issue set host interrupt command to send cmd out. */
+       ha->flags.mbox_int = 0;
+       clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+       ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
+           (uint8_t *)mcp->mb, 16);
+       ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
+           ((uint8_t *)mcp->mb + 0x10), 16);
+       ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
+           ((uint8_t *)mcp->mb + 0x20), 8);
+
+       /* Unlock mbx registers and wait for interrupt */
+       ql_dbg(ql_dbg_mbx, vha, 0x1179,
+           "Going to unlock irq & waiting for interrupts. "
+           "jiffies=%lx.\n", jiffies);
+
+       /* Wait for mbx cmd completion until timeout */
+       if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+               set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+               QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+
+               clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+       } else {
+               ql_dbg(ql_dbg_mbx, vha, 0x112c,
+                   "Cmd=%x Polling Mode.\n", command);
+
+               QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+               wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+               while (!ha->flags.mbox_int) {
+                       if (time_after(jiffies, wait_time))
+                               break;
+
+                       /* Check for pending interrupts. */
+                       qla2x00_poll(ha->rsp_q_map[0]);
+
+                       if (!ha->flags.mbox_int &&
+                           !(IS_QLA2200(ha) &&
+                           command == MBC_LOAD_RISC_RAM_EXTENDED))
+                               usleep_range(10000, 11000);
+               } /* while */
+               ql_dbg(ql_dbg_mbx, vha, 0x112d,
+                   "Waited %d sec.\n",
+                   (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+       }
+
+       /* Check whether we timed out */
+       if (ha->flags.mbox_int) {
+               uint32_t *iptr2;
+
+               ql_dbg(ql_dbg_mbx, vha, 0x112e,
+                   "Cmd=%x completed.\n", command);
+
+               /* Got interrupt. Clear the flag. */
+               ha->flags.mbox_int = 0;
+               clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+               if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
+                       rval = QLA_FUNCTION_FAILED;
+
+               /* Load return mailbox registers. */
+               iptr2 = mcp->mb;
+               iptr = (uint32_t *)&ha->mailbox_out32[0];
+               mboxes = mcp->in_mb;
+               for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+                       if (mboxes & BIT_0)
+                               *iptr2 = *iptr;
+
+                       mboxes >>= 1;
+                       iptr2++;
+                       iptr++;
+               }
+       } else {
+
+               rval = QLA_FUNCTION_TIMEOUT;
+       }
+
+       ha->flags.mbox_busy = 0;
+
+       /* Clean up */
+       ha->mcp32 = NULL;
+
+       if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+               ql_dbg(ql_dbg_mbx, vha, 0x113a,
+                   "checking for additional resp interrupt.\n");
+
+               /* polling mode for non isp_abort commands. */
+               qla2x00_poll(ha->rsp_q_map[0]);
+       }
+
+       if (rval == QLA_FUNCTION_TIMEOUT &&
+           mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+               if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+                   ha->flags.eeh_busy) {
+                       /* not in dpc. schedule it for dpc to take over. */
+                       ql_dbg(ql_dbg_mbx, vha, 0x115d,
+                           "Timeout, schedule isp_abort_needed.\n");
+
+                       if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+                           !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+                           !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+                               ql_log(ql_log_info, base_vha, 0x115e,
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
+                                   "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+                                   "abort.\n", command, mcp->mb[0],
+                                   ha->flags.eeh_busy);
+                               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       }
+               } else if (!abort_active) {
+                       /* call abort directly since we are in the DPC thread */
+                       ql_dbg(ql_dbg_mbx, vha, 0x1160,
+                           "Timeout, calling abort_isp.\n");
+
+                       if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+                           !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+                           !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+                               ql_log(ql_log_info, base_vha, 0x1161,
+                                   "Mailbox cmd timeout occurred, cmd=0x%x, "
+                                   "mb[0]=0x%x. Scheduling ISP abort ",
+                                   command, mcp->mb[0]);
+
+                               set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+                               clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                               if (ha->isp_ops->abort_isp(vha)) {
+                                       /* Failed. retry later. */
+                                       set_bit(ISP_ABORT_NEEDED,
+                                           &vha->dpc_flags);
+                               }
+                               clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+                               ql_dbg(ql_dbg_mbx, vha, 0x1162,
+                                   "Finished abort_isp.\n");
+                       }
+               }
+       }
+
+premature_exit:
+       /* Allow next mbx cmd to come in. */
+       complete(&ha->mbx_cmd_comp);
+
+       if (rval) {
+               ql_log(ql_log_warn, base_vha, 0x1163,
+                   "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
+                   "mb[3]=%x, cmd=%x ****.\n",
+                   mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+       } else {
+               ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+/*
+ * qlafx00_driver_shutdown
+ *     Indicate a driver shutdown to firmware.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *
+ * Returns:
+ *     local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+static int
+qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
+{
+       int rval;
+       struct mbx_cmd_32 mc;
+       struct mbx_cmd_32 *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
+       mcp->out_mb = MBX_0;
+       mcp->in_mb = MBX_0;
+       if (tmo)
+               mcp->tov = tmo;
+       else
+               mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qlafx00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1167,
+                   "Failed=%x.\n", rval);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+/*
+ * qlafx00_get_firmware_state
+ *     Get adapter firmware state.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     TARGET_QUEUE_LOCK must be released.
+ *     ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *     qla7xxx local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+static int
+qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
+{
+       int rval;
+       struct mbx_cmd_32 mc;
+       struct mbx_cmd_32 *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+       mcp->out_mb = MBX_0;
+       mcp->in_mb = MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qlafx00_mailbox_command(vha, mcp);
+
+       /* Return firmware states. */
+       states[0] = mcp->mb[1];
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x116a,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
+                   "Done %s.\n", __func__);
+       }
+       return rval;
+}
+
+/*
+ * qlafx00_init_firmware
+ *     Initialize adapter firmware.
+ *
+ * Input:
+ *     ha = adapter block pointer.
+ *     dptr = Initialization control block pointer.
+ *     size = size of initialization control block.
+ *     TARGET_QUEUE_LOCK must be released.
+ *     ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *     qlafx00 local function return status code.
+ *
+ * Context:
+ *     Kernel context.
+ */
+int
+qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+       int rval;
+       struct mbx_cmd_32 mc;
+       struct mbx_cmd_32 *mcp = &mc;
+       struct qla_hw_data *ha = vha->hw;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+       mcp->mb[1] = 0;
+       mcp->mb[2] = MSD(ha->init_cb_dma);
+       mcp->mb[3] = LSD(ha->init_cb_dma);
+
+       mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->buf_size = size;
+       mcp->flags = MBX_DMA_OUT;
+       mcp->tov = MBX_TOV_SECONDS;
+       rval = qlafx00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x116d,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
+                   "Done %s.\n", __func__);
+       }
+       return rval;
+}
+
+/*
+ * qlafx00_mbx_reg_test
+ */
+static int
+qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+       int rval;
+       struct mbx_cmd_32 mc;
+       struct mbx_cmd_32 *mcp = &mc;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
+           "Entered %s.\n", __func__);
+
+
+       mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+       mcp->mb[1] = 0xAAAA;
+       mcp->mb[2] = 0x5555;
+       mcp->mb[3] = 0xAA55;
+       mcp->mb[4] = 0x55AA;
+       mcp->mb[5] = 0xA5A5;
+       mcp->mb[6] = 0x5A5A;
+       mcp->mb[7] = 0x2525;
+       mcp->mb[8] = 0xBBBB;
+       mcp->mb[9] = 0x6666;
+       mcp->mb[10] = 0xBB66;
+       mcp->mb[11] = 0x66BB;
+       mcp->mb[12] = 0xB6B6;
+       mcp->mb[13] = 0x6B6B;
+       mcp->mb[14] = 0x3636;
+       mcp->mb[15] = 0xCCCC;
+
+
+       mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+                       MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+                       MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->buf_size = 0;
+       mcp->flags = MBX_DMA_OUT;
+       mcp->tov = MBX_TOV_SECONDS;
+       rval = qlafx00_mailbox_command(vha, mcp);
+       if (rval == QLA_SUCCESS) {
+               if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
+                   mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
+                       rval = QLA_FUNCTION_FAILED;
+               if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
+                   mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
+                       rval = QLA_FUNCTION_FAILED;
+               if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
+                   mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
+                       rval = QLA_FUNCTION_FAILED;
+               if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
+                   mcp->mb[31] != 0xCCCC)
+                       rval = QLA_FUNCTION_FAILED;
+       }
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1170,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
+                   "Done %s.\n", __func__);
+       }
+       return rval;
+}
+
+/**
+ * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_pci_config(scsi_qla_host_t *vha)
+{
+       uint16_t w;
+       struct qla_hw_data *ha = vha->hw;
+
+       pci_set_master(ha->pdev);
+       pci_try_set_mwi(ha->pdev);
+
+       pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+       w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+       w &= ~PCI_COMMAND_INTX_DISABLE;
+       pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+       /* PCIe -- adjust Maximum Read Request Size (2048). */
+       if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+               pcie_set_readrq(ha->pdev, 2048);
+
+       ha->chip_revision = ha->pdev->revision;
+
+       return QLA_SUCCESS;
+}
+
+/**
+ * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * @ha: HA context
+ *
+  */
+static inline void
+qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
+{
+       unsigned long flags = 0;
+       struct qla_hw_data *ha = vha->hw;
+       int i, core;
+       uint32_t cnt;
+
+       /* Set all 4 cores in reset */
+       for (i = 0; i < 4; i++) {
+               QLAFX00_SET_HBA_SOC_REG(ha,
+                   (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
+       }
+
+       /* Set all 4 core Clock gating control */
+       for (i = 0; i < 4; i++) {
+               QLAFX00_SET_HBA_SOC_REG(ha,
+                   (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
+       }
+
+       /* Reset all units in Fabric */
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
+
+       /* Reset all interrupt control registers */
+       for (i = 0; i < 115; i++) {
+               QLAFX00_SET_HBA_SOC_REG(ha,
+                   (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
+       }
+
+       /* Reset Timers control registers. per core */
+       for (core = 0; core < 4; core++)
+               for (i = 0; i < 8; i++)
+                       QLAFX00_SET_HBA_SOC_REG(ha,
+                           (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
+
+       /* Reset per core IRQ ack register */
+       for (core = 0; core < 4; core++)
+               QLAFX00_SET_HBA_SOC_REG(ha,
+                   (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
+
+       /* Set Fabric control and config to defaults */
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Kick in Fabric units */
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
+
+       /* Kick in Core0 to start boot process */
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+
+       /* Wait 10secs for soft-reset to complete. */
+       for (cnt = 10; cnt; cnt--) {
+               msleep(1000);
+               barrier();
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qlafx00_soft_reset() - Soft Reset ISPFx00.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_soft_reset(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (unlikely(pci_channel_offline(ha->pdev) &&
+           ha->flags.pci_channel_io_perm_failure))
+               return;
+
+       ha->isp_ops->disable_intrs(ha);
+       qlafx00_soc_cpu_reset(vha);
+       ha->isp_ops->enable_intrs(ha);
+}
+
+/**
+ * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_chip_diag(scsi_qla_host_t *vha)
+{
+       int rval = 0;
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = ha->req_q_map[0];
+
+       ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+       rval = qlafx00_mbx_reg_test(vha);
+       if (rval) {
+               ql_log(ql_log_warn, vha, 0x1165,
+                   "Failed mailbox send register test\n");
+       } else {
+               /* Flag a successful rval */
+               rval = QLA_SUCCESS;
+       }
+       return rval;
+}
+
+void
+qlafx00_config_rings(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+       struct init_cb_fx *icb;
+       struct req_que *req = ha->req_q_map[0];
+       struct rsp_que *rsp = ha->rsp_q_map[0];
+
+       /* Setup ring parameters in initialization control block. */
+       icb = (struct init_cb_fx *)ha->init_cb;
+       icb->request_q_outpointer = __constant_cpu_to_le16(0);
+       icb->response_q_inpointer = __constant_cpu_to_le16(0);
+       icb->request_q_length = cpu_to_le16(req->length);
+       icb->response_q_length = cpu_to_le16(rsp->length);
+       icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+       icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+       icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+       icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+       WRT_REG_DWORD(&reg->req_q_in, 0);
+       WRT_REG_DWORD(&reg->req_q_out, 0);
+
+       WRT_REG_DWORD(&reg->rsp_q_in, 0);
+       WRT_REG_DWORD(&reg->rsp_q_out, 0);
+
+       /* PCI posting */
+       RD_REG_DWORD(&reg->rsp_q_out);
+}
+
+char *
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+       struct qla_hw_data *ha = vha->hw;
+       int pcie_reg;
+
+       pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+       if (pcie_reg) {
+               strcpy(str, "PCIe iSA");
+               return str;
+       }
+       return str;
+}
+
+char *
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       sprintf(str, "%s", ha->mr.fw_version);
+       return str;
+}
+
+void
+qlafx00_enable_intrs(struct qla_hw_data *ha)
+{
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       ha->interrupts_on = 1;
+       QLAFX00_ENABLE_ICNTRL_REG(ha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlafx00_disable_intrs(struct qla_hw_data *ha)
+{
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       ha->interrupts_on = 0;
+       QLAFX00_DISABLE_ICNTRL_REG(ha);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qlafx00_tmf_iocb_timeout(void *data)
+{
+       srb_t *sp = (srb_t *)data;
+       struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+       tmf->u.tmf.comp_status = CS_TIMEOUT;
+       complete(&tmf->u.tmf.comp);
+}
+
+static void
+qlafx00_tmf_sp_done(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+       complete(&tmf->u.tmf.comp);
+}
+
+static int
+qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
+                    uint32_t lun, uint32_t tag)
+{
+       scsi_qla_host_t *vha = fcport->vha;
+       struct srb_iocb *tm_iocb;
+       srb_t *sp;
+       int rval = QLA_FUNCTION_FAILED;
+
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp)
+               goto done;
+
+       tm_iocb = &sp->u.iocb_cmd;
+       sp->type = SRB_TM_CMD;
+       sp->name = "tmf";
+       qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+       tm_iocb->u.tmf.flags = flags;
+       tm_iocb->u.tmf.lun = lun;
+       tm_iocb->u.tmf.data = tag;
+       sp->done = qlafx00_tmf_sp_done;
+       tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
+       init_completion(&tm_iocb->u.tmf.comp);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
+       ql_dbg(ql_dbg_async, vha, 0x507b,
+           "Task management command issued target_id=%x\n",
+           fcport->tgt_id);
+
+       wait_for_completion(&tm_iocb->u.tmf.comp);
+
+       rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+           QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+       sp->free(vha, sp);
+done:
+       return rval;
+}
+
+int
+qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
+{
+       return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+}
+
+int
+qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
+{
+       return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+}
+
+int
+qlafx00_iospace_config(struct qla_hw_data *ha)
+{
+       if (pci_request_selected_regions(ha->pdev, ha->bars,
+           QLA2XXX_DRIVER_NAME)) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
+                   "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       /* Use MMIO operations for all accesses. */
+       if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+               ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
+                   "Invalid pci I/O region size (%s).\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+       if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
+               ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
+                   "Invalid PCI mem BAR0 region size (%s), aborting\n",
+                       pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       ha->cregbase =
+           ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+       if (!ha->cregbase) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
+                   "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
+               ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
+                   "region #2 not an MMIO resource (%s), aborting\n",
+                   pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+       if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
+               ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
+                   "Invalid PCI mem BAR2 region size (%s), aborting\n",
+                       pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       ha->iobase =
+           ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+       if (!ha->iobase) {
+               ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
+                   "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+               goto iospace_error_exit;
+       }
+
+       /* Determine queue resources */
+       ha->max_req_queues = ha->max_rsp_queues = 1;
+
+       ql_log_pci(ql_log_info, ha->pdev, 0x012c,
+           "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
+           ha->bars, ha->cregbase, ha->iobase);
+
+       return 0;
+
+iospace_error_exit:
+       return -ENOMEM;
+}
+
+static void
+qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = ha->req_q_map[0];
+       struct rsp_que *rsp = ha->rsp_q_map[0];
+
+       req->length_fx00 = req->length;
+       req->ring_fx00 = req->ring;
+       req->dma_fx00 = req->dma;
+
+       rsp->length_fx00 = rsp->length;
+       rsp->ring_fx00 = rsp->ring;
+       rsp->dma_fx00 = rsp->dma;
+
+       ql_dbg(ql_dbg_init, vha, 0x012d,
+           "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
+           "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
+           req->length_fx00, (u64)req->dma_fx00);
+
+       ql_dbg(ql_dbg_init, vha, 0x012e,
+           "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
+           "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
+           rsp->length_fx00, (u64)rsp->dma_fx00);
+}
+
+static int
+qlafx00_config_queues(struct scsi_qla_host *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = ha->req_q_map[0];
+       struct rsp_que *rsp = ha->rsp_q_map[0];
+       dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
+
+       req->length = ha->req_que_len;
+       req->ring = (void *)ha->iobase + ha->req_que_off;
+       req->dma = bar2_hdl + ha->req_que_off;
+       if ((!req->ring) || (req->length == 0)) {
+               ql_log_pci(ql_log_info, ha->pdev, 0x012f,
+                   "Unable to allocate memory for req_ring\n");
+               return QLA_FUNCTION_FAILED;
+       }
+
+       ql_dbg(ql_dbg_init, vha, 0x0130,
+           "req: %p req_ring pointer %p req len 0x%x "
+           "req off 0x%x\n, req->dma: 0x%llx",
+           req, req->ring, req->length,
+           ha->req_que_off, (u64)req->dma);
+
+       rsp->length = ha->rsp_que_len;
+       rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+       rsp->dma = bar2_hdl + ha->rsp_que_off;
+       if ((!rsp->ring) || (rsp->length == 0)) {
+               ql_log_pci(ql_log_info, ha->pdev, 0x0131,
+                   "Unable to allocate memory for rsp_ring\n");
+               return QLA_FUNCTION_FAILED;
+       }
+
+       ql_dbg(ql_dbg_init, vha, 0x0132,
+           "rsp: %p rsp_ring pointer %p rsp len 0x%x "
+           "rsp off 0x%x, rsp->dma: 0x%llx\n",
+           rsp, rsp->ring, rsp->length,
+           ha->rsp_que_off, (u64)rsp->dma);
+
+       return QLA_SUCCESS;
+}
+
+static int
+qlafx00_init_fw_ready(scsi_qla_host_t *vha)
+{
+       int rval = 0;
+       unsigned long wtime;
+       uint16_t wait_time;     /* Wait time */
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+       uint32_t aenmbx, aenmbx7 = 0;
+       uint32_t state[5];
+       bool done = false;
+
+       /* 30 seconds wait - Adjust if required */
+       wait_time = 30;
+
+       /* wait time before firmware ready */
+       wtime = jiffies + (wait_time * HZ);
+       do {
+               aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+               barrier();
+               ql_dbg(ql_dbg_mbx, vha, 0x0133,
+                   "aenmbx: 0x%x\n", aenmbx);
+
+               switch (aenmbx) {
+               case MBA_FW_NOT_STARTED:
+               case MBA_FW_STARTING:
+                       break;
+
+               case MBA_SYSTEM_ERR:
+               case MBA_REQ_TRANSFER_ERR:
+               case MBA_RSP_TRANSFER_ERR:
+               case MBA_FW_INIT_FAILURE:
+                       qlafx00_soft_reset(vha);
+                       break;
+
+               case MBA_FW_RESTART_CMPLT:
+                       /* Set the mbx and rqstq intr code */
+                       aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+                       ha->mbx_intr_code = MSW(aenmbx7);
+                       ha->rqstq_intr_code = LSW(aenmbx7);
+                       ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+                       ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+                       ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+                       ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+                       WRT_REG_DWORD(&reg->aenmailbox0, 0);
+                       RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+                       ql_dbg(ql_dbg_init, vha, 0x0134,
+                           "f/w returned mbx_intr_code: 0x%x, "
+                           "rqstq_intr_code: 0x%x\n",
+                           ha->mbx_intr_code, ha->rqstq_intr_code);
+                       QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+                       rval = QLA_SUCCESS;
+                       done = true;
+                       break;
+
+               default:
+                       /* If fw is apparently not ready. In order to continue,
+                        * we might need to issue Mbox cmd, but the problem is
+                        * that the DoorBell vector values that come with the
+                        * 8060 AEN are most likely gone by now (and thus no
+                        * bell would be rung on the fw side when mbox cmd is
+                        * issued). We have to therefore grab the 8060 AEN
+                        * shadow regs (filled in by FW when the last 8060
+                        * AEN was being posted).
+                        * Do the following to determine what is needed in
+                        * order to get the FW ready:
+                        * 1. reload the 8060 AEN values from the shadow regs
+                        * 2. clear int status to get rid of possible pending
+                        *    interrupts
+                        * 3. issue Get FW State Mbox cmd to determine fw state
+                        * Set the mbx and rqstq intr code from Shadow Regs
+                        */
+                       aenmbx7 = RD_REG_DWORD(&reg->initval7);
+                       ha->mbx_intr_code = MSW(aenmbx7);
+                       ha->rqstq_intr_code = LSW(aenmbx7);
+                       ha->req_que_off = RD_REG_DWORD(&reg->initval1);
+                       ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
+                       ha->req_que_len = RD_REG_DWORD(&reg->initval5);
+                       ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+                       ql_dbg(ql_dbg_init, vha, 0x0135,
+                           "f/w returned mbx_intr_code: 0x%x, "
+                           "rqstq_intr_code: 0x%x\n",
+                           ha->mbx_intr_code, ha->rqstq_intr_code);
+                       QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+                       /* Get the FW state */
+                       rval = qlafx00_get_firmware_state(vha, state);
+                       if (rval != QLA_SUCCESS) {
+                               /* Retry if timer has not expired */
+                               break;
+                       }
+
+                       if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
+                               /* Firmware is waiting to be
+                                * initialized by driver
+                                */
+                               rval = QLA_SUCCESS;
+                               done = true;
+                               break;
+                       }
+
+                       /* Issue driver shutdown and wait until f/w recovers.
+                        * Driver should continue to poll until 8060 AEN is
+                        * received indicating firmware recovery.
+                        */
+                       ql_dbg(ql_dbg_init, vha, 0x0136,
+                           "Sending Driver shutdown fw_state 0x%x\n",
+                           state[0]);
+
+                       rval = qlafx00_driver_shutdown(vha, 10);
+                       if (rval != QLA_SUCCESS) {
+                               rval = QLA_FUNCTION_FAILED;
+                               break;
+                       }
+                       msleep(500);
+
+                       wtime = jiffies + (wait_time * HZ);
+                       break;
+               }
+
+               if (!done) {
+                       if (time_after_eq(jiffies, wtime)) {
+                               ql_dbg(ql_dbg_init, vha, 0x0137,
+                                   "Init f/w failed: aen[7]: 0x%x\n",
+                                   RD_REG_DWORD(&reg->aenmailbox7));
+                               rval = QLA_FUNCTION_FAILED;
+                               done = true;
+                               break;
+                       }
+                       /* Delay for a while */
+                       msleep(500);
+               }
+       } while (!done);
+
+       if (rval)
+               ql_dbg(ql_dbg_init, vha, 0x0138,
+                   "%s **** FAILED ****.\n", __func__);
+       else
+               ql_dbg(ql_dbg_init, vha, 0x0139,
+                   "%s **** SUCCESS ****.\n", __func__);
+
+       return rval;
+}
+
+/*
+ * qlafx00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_fw_ready(scsi_qla_host_t *vha)
+{
+       int             rval;
+       unsigned long   wtime;
+       uint16_t        wait_time;      /* Wait time if loop is coming ready */
+       uint32_t        state[5];
+
+       rval = QLA_SUCCESS;
+
+       wait_time = 10;
+
+       /* wait time before firmware ready */
+       wtime = jiffies + (wait_time * HZ);
+
+       /* Wait for ISP to finish init */
+       if (!vha->flags.init_done)
+               ql_dbg(ql_dbg_init, vha, 0x013a,
+                   "Waiting for init to complete...\n");
+
+       do {
+               rval = qlafx00_get_firmware_state(vha, state);
+
+               if (rval == QLA_SUCCESS) {
+                       if (state[0] == FSTATE_FX00_INITIALIZED) {
+                               ql_dbg(ql_dbg_init, vha, 0x013b,
+                                   "fw_state=%x\n", state[0]);
+                               rval = QLA_SUCCESS;
+                                       break;
+                       }
+               }
+               rval = QLA_FUNCTION_FAILED;
+
+               if (time_after_eq(jiffies, wtime))
+                       break;
+
+               /* Delay for a while */
+               msleep(500);
+
+               ql_dbg(ql_dbg_init, vha, 0x013c,
+                   "fw_state=%x curr time=%lx.\n", state[0], jiffies);
+       } while (1);
+
+
+       if (rval)
+               ql_dbg(ql_dbg_init, vha, 0x013d,
+                   "Firmware ready **** FAILED ****.\n");
+       else
+               ql_dbg(ql_dbg_init, vha, 0x013e,
+                   "Firmware ready **** SUCCESS ****.\n");
+
+       return rval;
+}
+
+static int
+qlafx00_find_all_targets(scsi_qla_host_t *vha,
+       struct list_head *new_fcports)
+{
+       int             rval;
+       uint16_t        tgt_id;
+       fc_port_t       *fcport, *new_fcport;
+       int             found;
+       struct qla_hw_data *ha = vha->hw;
+
+       rval = QLA_SUCCESS;
+
+       if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+               return QLA_FUNCTION_FAILED;
+
+       if ((atomic_read(&vha->loop_down_timer) ||
+            STATE_TRANSITION(vha))) {
+               atomic_set(&vha->loop_down_timer, 0);
+               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+               return QLA_FUNCTION_FAILED;
+       }
+
+       ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
+           "Listing Target bit map...\n");
+       ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
+           0x2089, (uint8_t *)ha->gid_list, 32);
+
+       /* Allocate temporary rmtport for any new rmtports discovered. */
+       new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+       if (new_fcport == NULL)
+               return QLA_MEMORY_ALLOC_FAILED;
+
+       for_each_set_bit(tgt_id, (void *)ha->gid_list,
+           QLAFX00_TGT_NODE_LIST_SIZE) {
+
+               /* Send get target node info */
+               new_fcport->tgt_id = tgt_id;
+               rval = qlafx00_fx_disc(vha, new_fcport,
+                   FXDISC_GET_TGT_NODE_INFO);
+               if (rval != QLA_SUCCESS) {
+                       ql_log(ql_log_warn, vha, 0x208a,
+                           "Target info scan failed -- assuming zero-entry "
+                           "result...\n");
+                       continue;
+               }
+
+               /* Locate matching device in database. */
+               found = 0;
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (memcmp(new_fcport->port_name,
+                           fcport->port_name, WWN_SIZE))
+                               continue;
+
+                       found++;
+
+                       /*
+                        * If tgt_id is same and state FCS_ONLINE, nothing
+                        * changed.
+                        */
+                       if (fcport->tgt_id == new_fcport->tgt_id &&
+                           atomic_read(&fcport->state) == FCS_ONLINE)
+                               break;
+
+                       /*
+                        * Tgt ID changed or device was marked to be updated.
+                        */
+                       ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
+                           "TGT-ID Change(%s): Present tgt id: "
+                           "0x%x state: 0x%x "
+                           "wwnn = %llx wwpn = %llx.\n",
+                           __func__, fcport->tgt_id,
+                           atomic_read(&fcport->state),
+                           (unsigned long long)wwn_to_u64(fcport->node_name),
+                           (unsigned long long)wwn_to_u64(fcport->port_name));
+
+                       ql_log(ql_log_info, vha, 0x208c,
+                           "TGT-ID Announce(%s): Discovered tgt "
+                           "id 0x%x wwnn = %llx "
+                           "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
+                           (unsigned long long)
+                           wwn_to_u64(new_fcport->node_name),
+                           (unsigned long long)
+                           wwn_to_u64(new_fcport->port_name));
+
+                       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+                               fcport->old_tgt_id = fcport->tgt_id;
+                               fcport->tgt_id = new_fcport->tgt_id;
+                               ql_log(ql_log_info, vha, 0x208d,
+                                  "TGT-ID: New fcport Added: %p\n", fcport);
+                               qla2x00_update_fcport(vha, fcport);
+                       } else {
+                               ql_log(ql_log_info, vha, 0x208e,
+                                   " Existing TGT-ID %x did not get "
+                                   " offline event from firmware.\n",
+                                   fcport->old_tgt_id);
+                               qla2x00_mark_device_lost(vha, fcport, 0, 0);
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                               kfree(new_fcport);
+                               return rval;
+                       }
+                       break;
+               }
+
+               if (found)
+                       continue;
+
+               /* If device was not in our fcports list, then add it. */
+               list_add_tail(&new_fcport->list, new_fcports);
+
+               /* Allocate a new replacement fcport. */
+               new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+               if (new_fcport == NULL)
+                       return QLA_MEMORY_ALLOC_FAILED;
+       }
+
+       kfree(new_fcport);
+       return rval;
+}
+
+/*
+ * qlafx00_configure_all_targets
+ *      Setup target devices with node ID's.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      BIT_0 = error
+ */
+static int
+qlafx00_configure_all_targets(scsi_qla_host_t *vha)
+{
+       int rval;
+       fc_port_t *fcport, *rmptemp;
+       LIST_HEAD(new_fcports);
+
+       rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+           FXDISC_GET_TGT_NODE_LIST);
+       if (rval != QLA_SUCCESS) {
+               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+               return rval;
+       }
+
+       rval = qlafx00_find_all_targets(vha, &new_fcports);
+       if (rval != QLA_SUCCESS) {
+               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+               return rval;
+       }
+
+       /*
+        * Delete all previous devices marked lost.
+        */
+       list_for_each_entry(fcport, &vha->vp_fcports, list) {
+               if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                       break;
+
+               if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+                       if (fcport->port_type != FCT_INITIATOR)
+                               qla2x00_mark_device_lost(vha, fcport, 0, 0);
+               }
+       }
+
+       /*
+        * Add the new devices to our devices list.
+        */
+       list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+               if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+                       break;
+
+               qla2x00_update_fcport(vha, fcport);
+               list_move_tail(&fcport->list, &vha->vp_fcports);
+               ql_log(ql_log_info, vha, 0x208f,
+                   "Attach new target id 0x%x wwnn = %llx "
+                   "wwpn = %llx.\n",
+                   fcport->tgt_id,
+                   (unsigned long long)wwn_to_u64(fcport->node_name),
+                   (unsigned long long)wwn_to_u64(fcport->port_name));
+       }
+
+       /* Free all new device structures not processed. */
+       list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+               list_del(&fcport->list);
+               kfree(fcport);
+       }
+
+       return rval;
+}
+
+/*
+ * qlafx00_configure_devices
+ *      Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ *      ha                = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      1 = error.
+ *      2 = database was full and device was not configured.
+ */
+int
+qlafx00_configure_devices(scsi_qla_host_t *vha)
+{
+       int  rval;
+       unsigned long flags, save_flags;
+       rval = QLA_SUCCESS;
+
+       save_flags = flags = vha->dpc_flags;
+
+       ql_dbg(ql_dbg_disc, vha, 0x2090,
+           "Configure devices -- dpc flags =0x%lx\n", flags);
+
+       rval = qlafx00_configure_all_targets(vha);
+
+       if (rval == QLA_SUCCESS) {
+               if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+                       rval = QLA_FUNCTION_FAILED;
+               } else {
+                       atomic_set(&vha->loop_state, LOOP_READY);
+                       ql_log(ql_log_info, vha, 0x2091,
+                           "Device Ready\n");
+               }
+       }
+
+       if (rval) {
+               ql_dbg(ql_dbg_disc, vha, 0x2092,
+                   "%s *** FAILED ***.\n", __func__);
+       } else {
+               ql_dbg(ql_dbg_disc, vha, 0x2093,
+                   "%s: exiting normally.\n", __func__);
+       }
+       return rval;
+}
+
+static void
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       fc_port_t *fcport;
+
+       vha->flags.online = 0;
+       ha->flags.chip_reset_done = 0;
+       ha->mr.fw_hbt_en = 0;
+       clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       vha->qla_stats.total_isp_aborts++;
+
+       ql_log(ql_log_info, vha, 0x013f,
+           "Performing ISP error recovery - ha = %p.\n", ha);
+
+       ha->isp_ops->reset_chip(vha);
+
+       if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+               atomic_set(&vha->loop_state, LOOP_DOWN);
+               atomic_set(&vha->loop_down_timer,
+                   QLAFX00_LOOP_DOWN_TIME);
+       } else {
+               if (!atomic_read(&vha->loop_down_timer))
+                       atomic_set(&vha->loop_down_timer,
+                           QLAFX00_LOOP_DOWN_TIME);
+       }
+
+       /* Clear all async request states across all VPs. */
+       list_for_each_entry(fcport, &vha->vp_fcports, list) {
+               fcport->flags = 0;
+               if (atomic_read(&fcport->state) == FCS_ONLINE)
+                       qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+       }
+
+       if (!ha->flags.eeh_busy) {
+               /* Requeue all commands in outstanding command list. */
+               qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+       }
+
+       qla2x00_free_irqs(vha);
+       set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+
+       /* Clear the Interrupts */
+       QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+       ql_log(ql_log_info, vha, 0x0140,
+           "%s Done done - ha=%p.\n", __func__, ha);
+}
+
+/**
+ * qlafx00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_init_response_q_entries(struct rsp_que *rsp)
+{
+       uint16_t cnt;
+       response_t *pkt;
+
+       rsp->ring_ptr = rsp->ring;
+       rsp->ring_index    = 0;
+       rsp->status_srb = NULL;
+       pkt = rsp->ring_ptr;
+       for (cnt = 0; cnt < rsp->length; cnt++) {
+               pkt->signature = RESPONSE_PROCESSED;
+               WRT_REG_DWORD(&pkt->signature, RESPONSE_PROCESSED);
+               pkt++;
+       }
+}
+
+int
+qlafx00_rescan_isp(scsi_qla_host_t *vha)
+{
+       uint32_t status = QLA_FUNCTION_FAILED;
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+       uint32_t aenmbx7;
+
+       qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
+
+       aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+       ha->mbx_intr_code = MSW(aenmbx7);
+       ha->rqstq_intr_code = LSW(aenmbx7);
+       ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+       ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+       ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+       ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+
+       ql_dbg(ql_dbg_disc, vha, 0x2094,
+           "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
+           " Req que offset 0x%x Rsp que offset 0x%x\n",
+           ha->mbx_intr_code, ha->rqstq_intr_code,
+           ha->req_que_off, ha->rsp_que_len);
+
+       /* Clear the Interrupts */
+       QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+       status = qla2x00_init_rings(vha);
+       if (!status) {
+               vha->flags.online = 1;
+
+               /* if no cable then assume it's good */
+               if ((vha->device_flags & DFLG_NO_CABLE))
+                       status = 0;
+               /* Register system information */
+               if (qlafx00_fx_disc(vha,
+                   &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
+                       ql_dbg(ql_dbg_disc, vha, 0x2095,
+                           "failed to register host info\n");
+       }
+       scsi_unblock_requests(vha->host);
+       return status;
+}
+
+void
+qlafx00_timer_routine(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t fw_heart_beat;
+       uint32_t aenmbx0;
+       struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+       /* Check firmware health */
+       if (ha->mr.fw_hbt_cnt)
+               ha->mr.fw_hbt_cnt--;
+       else {
+               if ((!ha->flags.mr_reset_hdlr_active) &&
+                   (!test_bit(UNLOADING, &vha->dpc_flags)) &&
+                   (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+                   (ha->mr.fw_hbt_en)) {
+                       fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+                       if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
+                               ha->mr.old_fw_hbt_cnt = fw_heart_beat;
+                               ha->mr.fw_hbt_miss_cnt = 0;
+                       } else {
+                               ha->mr.fw_hbt_miss_cnt++;
+                               if (ha->mr.fw_hbt_miss_cnt ==
+                                   QLAFX00_HEARTBEAT_MISS_CNT) {
+                                       set_bit(ISP_ABORT_NEEDED,
+                                           &vha->dpc_flags);
+                                       qla2xxx_wake_dpc(vha);
+                                       ha->mr.fw_hbt_miss_cnt = 0;
+                               }
+                       }
+               }
+               ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+       }
+
+       if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
+               /* Reset recovery to be performed in timer routine */
+               aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+               if (ha->mr.fw_reset_timer_exp) {
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+                       ha->mr.fw_reset_timer_exp = 0;
+               } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
+                       /* Wake up DPC to rescan the targets */
+                       set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
+                       clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+                       ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+               } else if ((aenmbx0 == MBA_FW_STARTING) &&
+                   (!ha->mr.fw_hbt_en)) {
+                       ha->mr.fw_hbt_en = 1;
+               } else if (!ha->mr.fw_reset_timer_tick) {
+                       if (aenmbx0 == ha->mr.old_aenmbx0_state)
+                               ha->mr.fw_reset_timer_exp = 1;
+                       ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+               } else if (aenmbx0 == 0xFFFFFFFF) {
+                       uint32_t data0, data1;
+
+                       data0 = QLAFX00_RD_REG(ha,
+                           QLAFX00_BAR1_BASE_ADDR_REG);
+                       data1 = QLAFX00_RD_REG(ha,
+                           QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
+
+                       data0 &= 0xffff0000;
+                       data1 &= 0x0000ffff;
+
+                       QLAFX00_WR_REG(ha,
+                           QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
+                           (data0 | data1));
+               } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
+                       ha->mr.fw_reset_timer_tick =
+                           QLAFX00_MAX_RESET_INTERVAL;
+               }
+               ha->mr.old_aenmbx0_state = aenmbx0;
+               ha->mr.fw_reset_timer_tick--;
+       }
+}
+
+/*
+ *  qlfx00a_reset_initialize
+ *      Re-initialize after a iSA device reset.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_reset_initialize(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (vha->device_flags & DFLG_DEV_FAILED) {
+               ql_dbg(ql_dbg_init, vha, 0x0142,
+                   "Device in failed state\n");
+               return QLA_SUCCESS;
+       }
+
+       ha->flags.mr_reset_hdlr_active = 1;
+
+       if (vha->flags.online) {
+               scsi_block_requests(vha->host);
+               qlafx00_abort_isp_cleanup(vha);
+       }
+
+       ql_log(ql_log_info, vha, 0x0143,
+           "(%s): succeeded.\n", __func__);
+       ha->flags.mr_reset_hdlr_active = 0;
+       return QLA_SUCCESS;
+}
+
+/*
+ *  qlafx00_abort_isp
+ *      Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_abort_isp(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (vha->flags.online) {
+               if (unlikely(pci_channel_offline(ha->pdev) &&
+                   ha->flags.pci_channel_io_perm_failure)) {
+                       clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+                       return QLA_SUCCESS;
+               }
+
+               scsi_block_requests(vha->host);
+               qlafx00_abort_isp_cleanup(vha);
+       }
+
+       ql_log(ql_log_info, vha, 0x0145,
+           "(%s): succeeded.\n", __func__);
+
+       return QLA_SUCCESS;
+}
+
+static inline fc_port_t*
+qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
+{
+       fc_port_t       *fcport;
+
+       /* Check for matching device in remote port list. */
+       fcport = NULL;
+       list_for_each_entry(fcport, &vha->vp_fcports, list) {
+               if (fcport->tgt_id == tgt_id) {
+                       ql_dbg(ql_dbg_async, vha, 0x5072,
+                           "Matching fcport(%p) found with TGT-ID: 0x%x "
+                           "and Remote TGT_ID: 0x%x\n",
+                           fcport, fcport->tgt_id, tgt_id);
+                       break;
+               }
+       }
+       return fcport;
+}
+
+static void
+qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
+{
+       fc_port_t       *fcport;
+
+       ql_log(ql_log_info, vha, 0x5073,
+           "Detach TGT-ID: 0x%x\n", tgt_id);
+
+       fcport = qlafx00_get_fcport(vha, tgt_id);
+       if (!fcport)
+               return;
+
+       qla2x00_mark_device_lost(vha, fcport, 0, 0);
+
+       return;
+}
+
+int
+qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
+{
+       int rval = 0;
+       uint32_t aen_code, aen_data;
+
+       aen_code = FCH_EVT_VENDOR_UNIQUE;
+       aen_data = evt->u.aenfx.evtcode;
+
+       switch (evt->u.aenfx.evtcode) {
+       case QLAFX00_MBA_PORT_UPDATE:           /* Port database update */
+               if (evt->u.aenfx.mbx[1] == 0) {
+                       if (evt->u.aenfx.mbx[2] == 1) {
+                               if (!vha->flags.fw_tgt_reported)
+                                       vha->flags.fw_tgt_reported = 1;
+                               atomic_set(&vha->loop_down_timer, 0);
+                               atomic_set(&vha->loop_state, LOOP_UP);
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       } else if (evt->u.aenfx.mbx[2] == 2) {
+                               qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
+                       }
+               } else if (evt->u.aenfx.mbx[1] == 0xffff) {
+                       if (evt->u.aenfx.mbx[2] == 1) {
+                               if (!vha->flags.fw_tgt_reported)
+                                       vha->flags.fw_tgt_reported = 1;
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                       } else if (evt->u.aenfx.mbx[2] == 2) {
+                               vha->device_flags |= DFLG_NO_CABLE;
+                               qla2x00_mark_all_devices_lost(vha, 1);
+                       }
+               }
+               break;
+       case QLAFX00_MBA_LINK_UP:
+               aen_code = FCH_EVT_LINKUP;
+               aen_data = 0;
+               break;
+       case QLAFX00_MBA_LINK_DOWN:
+               aen_code = FCH_EVT_LINKDOWN;
+               aen_data = 0;
+               break;
+       }
+
+       fc_host_post_event(vha->host, fc_get_event_number(),
+           aen_code, aen_data);
+
+       return rval;
+}
+
+static void
+qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
+{
+       u64 port_name = 0, node_name = 0;
+
+       port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
+       node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
+
+       fc_host_node_name(vha->host) = node_name;
+       fc_host_port_name(vha->host) = port_name;
+       if (!pinfo->port_type)
+               vha->hw->current_topology = ISP_CFG_F;
+       if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
+               atomic_set(&vha->loop_state, LOOP_READY);
+       else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
+               atomic_set(&vha->loop_state, LOOP_DOWN);
+       vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
+}
+
+static void
+qla2x00_fxdisc_iocb_timeout(void *data)
+{
+       srb_t *sp = (srb_t *)data;
+       struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+       complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+static void
+qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+       complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+int
+qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t fx_type)
+{
+       srb_t *sp;
+       struct srb_iocb *fdisc;
+       int rval = QLA_FUNCTION_FAILED;
+       struct qla_hw_data *ha = vha->hw;
+       struct host_system_info *phost_info;
+       struct register_host_info *preg_hsi;
+       struct new_utsname *p_sysid = NULL;
+       struct timeval tv;
+
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp)
+               goto done;
+
+       fdisc = &sp->u.iocb_cmd;
+       switch (fx_type) {
+       case FXDISC_GET_CONFIG_INFO:
+       fdisc->u.fxiocb.flags =
+                   SRB_FXDISC_RESP_DMA_VALID;
+               fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
+               break;
+       case FXDISC_GET_PORT_INFO:
+               fdisc->u.fxiocb.flags =
+                   SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+               fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
+               fdisc->u.fxiocb.req_data = fcport->port_id;
+               break;
+       case FXDISC_GET_TGT_NODE_INFO:
+               fdisc->u.fxiocb.flags =
+                   SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+               fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
+               fdisc->u.fxiocb.req_data = fcport->tgt_id;
+               break;
+       case FXDISC_GET_TGT_NODE_LIST:
+               fdisc->u.fxiocb.flags =
+                   SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+               fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
+               break;
+       case FXDISC_REG_HOST_INFO:
+               fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
+               fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
+               p_sysid = utsname();
+               if (!p_sysid) {
+                       ql_log(ql_log_warn, vha, 0x303c,
+                           "Not able to get the system informtion\n");
+                       goto done_free_sp;
+               }
+               break;
+       default:
+               break;
+       }
+
+       if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+               fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
+                   fdisc->u.fxiocb.req_len,
+                   &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
+               if (!fdisc->u.fxiocb.req_addr)
+                       goto done_free_sp;
+
+               if (fx_type == FXDISC_REG_HOST_INFO) {
+                       preg_hsi = (struct register_host_info *)
+                               fdisc->u.fxiocb.req_addr;
+                       phost_info = &preg_hsi->hsi;
+                       memset(preg_hsi, 0, sizeof(struct register_host_info));
+                       phost_info->os_type = OS_TYPE_LINUX;
+                       strncpy(phost_info->sysname,
+                           p_sysid->sysname, SYSNAME_LENGTH);
+                       strncpy(phost_info->nodename,
+                           p_sysid->nodename, NODENAME_LENGTH);
+                       strncpy(phost_info->release,
+                           p_sysid->release, RELEASE_LENGTH);
+                       strncpy(phost_info->version,
+                           p_sysid->version, VERSION_LENGTH);
+                       strncpy(phost_info->machine,
+                           p_sysid->machine, MACHINE_LENGTH);
+                       strncpy(phost_info->domainname,
+                           p_sysid->domainname, DOMNAME_LENGTH);
+                       strncpy(phost_info->hostdriver,
+                           QLA2XXX_VERSION, VERSION_LENGTH);
+                       do_gettimeofday(&tv);
+                       preg_hsi->utc = (uint64_t)tv.tv_sec;
+                       ql_dbg(ql_dbg_init, vha, 0x0149,
+                           "ISP%04X: Host registration with firmware\n",
+                           ha->pdev->device);
+                       ql_dbg(ql_dbg_init, vha, 0x014a,
+                           "os_type = '%d', sysname = '%s', nodname = '%s'\n",
+                           phost_info->os_type,
+                           phost_info->sysname,
+                           phost_info->nodename);
+                       ql_dbg(ql_dbg_init, vha, 0x014b,
+                           "release = '%s', version = '%s'\n",
+                           phost_info->release,
+                           phost_info->version);
+                       ql_dbg(ql_dbg_init, vha, 0x014c,
+                           "machine = '%s' "
+                           "domainname = '%s', hostdriver = '%s'\n",
+                           phost_info->machine,
+                           phost_info->domainname,
+                           phost_info->hostdriver);
+                       ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
+                           (uint8_t *)phost_info,
+                           sizeof(struct host_system_info));
+               }
+       }
+
+       if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+               fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
+                   fdisc->u.fxiocb.rsp_len,
+                   &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
+               if (!fdisc->u.fxiocb.rsp_addr)
+                       goto done_unmap_req;
+       }
+
+       sp->type = SRB_FXIOCB_DCMD;
+       sp->name = "fxdisc";
+       qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+       fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+       fdisc->u.fxiocb.req_func_type = fx_type;
+       sp->done = qla2x00_fxdisc_sp_done;
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_unmap_dma;
+
+       wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
+
+       if (fx_type == FXDISC_GET_CONFIG_INFO) {
+               struct config_info_data *pinfo =
+                   (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+               memcpy(&vha->hw->mr.product_name, pinfo->product_name,
+                   sizeof(vha->hw->mr.product_name));
+               memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+                   sizeof(vha->hw->mr.symbolic_name));
+               memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
+                   sizeof(vha->hw->mr.serial_num));
+               memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
+                   sizeof(vha->hw->mr.hw_version));
+               memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
+                   sizeof(vha->hw->mr.fw_version));
+               strim(vha->hw->mr.fw_version);
+               memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
+                   sizeof(vha->hw->mr.uboot_version));
+               memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
+                   sizeof(vha->hw->mr.fru_serial_num));
+       } else if (fx_type == FXDISC_GET_PORT_INFO) {
+               struct port_info_data *pinfo =
+                   (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
+               memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
+               memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
+               vha->d_id.b.domain = pinfo->port_id[0];
+               vha->d_id.b.area = pinfo->port_id[1];
+               vha->d_id.b.al_pa = pinfo->port_id[2];
+               qlafx00_update_host_attr(vha, pinfo);
+               ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
+                   (uint8_t *)pinfo, 16);
+       } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
+               struct qlafx00_tgt_node_info *pinfo =
+                   (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+               memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
+               memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
+               fcport->port_type = FCT_TARGET;
+               ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
+                   (uint8_t *)pinfo, 16);
+       } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
+               struct qlafx00_tgt_node_info *pinfo =
+                   (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+               ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
+                   (uint8_t *)pinfo, 16);
+               memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
+       }
+       rval = fdisc->u.fxiocb.result;
+
+done_unmap_dma:
+       if (fdisc->u.fxiocb.rsp_addr)
+               dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
+                   fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
+
+done_unmap_req:
+       if (fdisc->u.fxiocb.req_addr)
+               dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
+                   fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
+done_free_sp:
+       sp->free(vha, sp);
+done:
+       return rval;
+}
+
+static void
+qlafx00_abort_iocb_timeout(void *data)
+{
+       srb_t *sp = (srb_t *)data;
+       struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+       abt->u.abt.comp_status = CS_TIMEOUT;
+       complete(&abt->u.abt.comp);
+}
+
+static void
+qlafx00_abort_sp_done(void *data, void *ptr, int res)
+{
+       srb_t *sp = (srb_t *)ptr;
+       struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+       complete(&abt->u.abt.comp);
+}
+
+static int
+qlafx00_async_abt_cmd(srb_t *cmd_sp)
+{
+       scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+       fc_port_t *fcport = cmd_sp->fcport;
+       struct srb_iocb *abt_iocb;
+       srb_t *sp;
+       int rval = QLA_FUNCTION_FAILED;
+
+       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       if (!sp)
+               goto done;
+
+       abt_iocb = &sp->u.iocb_cmd;
+       sp->type = SRB_ABT_CMD;
+       sp->name = "abort";
+       qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+       abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+       sp->done = qlafx00_abort_sp_done;
+       abt_iocb->timeout = qlafx00_abort_iocb_timeout;
+       init_completion(&abt_iocb->u.abt.comp);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
+       ql_dbg(ql_dbg_async, vha, 0x507c,
+           "Abort command issued - hdl=%x, target_id=%x\n",
+           cmd_sp->handle, fcport->tgt_id);
+
+       wait_for_completion(&abt_iocb->u.abt.comp);
+
+       rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+           QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+       sp->free(vha, sp);
+done:
+       return rval;
+}
+
+int
+qlafx00_abort_command(srb_t *sp)
+{
+       unsigned long   flags = 0;
+
+       uint32_t        handle;
+       fc_port_t       *fcport = sp->fcport;
+       struct scsi_qla_host *vha = fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = vha->req;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
+               if (req->outstanding_cmds[handle] == sp)
+                       break;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
+               /* Command not found. */
+               return QLA_FUNCTION_FAILED;
+       }
+       return qlafx00_async_abt_cmd(sp);
+}
+
+/*
+ * qlafx00_initialize_adapter
+ *      Initialize board.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_initialize_adapter(scsi_qla_host_t *vha)
+{
+       int     rval;
+       struct qla_hw_data *ha = vha->hw;
+
+       /* Clear adapter flags. */
+       vha->flags.online = 0;
+       ha->flags.chip_reset_done = 0;
+       vha->flags.reset_active = 0;
+       ha->flags.pci_channel_io_perm_failure = 0;
+       ha->flags.eeh_busy = 0;
+       ha->thermal_support = 0;
+       atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+       atomic_set(&vha->loop_state, LOOP_DOWN);
+       vha->device_flags = DFLG_NO_CABLE;
+       vha->dpc_flags = 0;
+       vha->flags.management_server_logged_in = 0;
+       vha->marker_needed = 0;
+       ha->isp_abort_cnt = 0;
+       ha->beacon_blink_led = 0;
+
+       set_bit(0, ha->req_qid_map);
+       set_bit(0, ha->rsp_qid_map);
+
+       ql_dbg(ql_dbg_init, vha, 0x0147,
+           "Configuring PCI space...\n");
+
+       rval = ha->isp_ops->pci_config(vha);
+       if (rval) {
+               ql_log(ql_log_warn, vha, 0x0148,
+                   "Unable to configure PCI space.\n");
+               return rval;
+       }
+
+       rval = qlafx00_init_fw_ready(vha);
+       if (rval != QLA_SUCCESS)
+               return rval;
+
+       qlafx00_save_queue_ptrs(vha);
+
+       rval = qlafx00_config_queues(vha);
+       if (rval != QLA_SUCCESS)
+               return rval;
+
+       /*
+        * Allocate the array of outstanding commands
+        * now that we know the firmware resources.
+        */
+       rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
+       if (rval != QLA_SUCCESS)
+               return rval;
+
+       rval = qla2x00_init_rings(vha);
+       ha->flags.chip_reset_done = 1;
+
+       return rval;
+}
+
+uint32_t
+qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int rval = QLA_FUNCTION_FAILED;
+       uint32_t state[1];
+
+       if (qla2x00_reset_active(vha))
+               ql_log(ql_log_warn, vha, 0x70ce,
+                   "ISP reset active.\n");
+       else if (!vha->hw->flags.eeh_busy) {
+               rval = qlafx00_get_firmware_state(vha, state);
+       }
+       if (rval != QLA_SUCCESS)
+               memset(state, -1, sizeof(state));
+
+       return state[0];
+}
+
+void
+qlafx00_get_host_speed(struct Scsi_Host *shost)
+{
+       struct qla_hw_data *ha = ((struct scsi_qla_host *)
+                                       (shost_priv(shost)))->hw;
+       u32 speed = FC_PORTSPEED_UNKNOWN;
+
+       switch (ha->link_data_rate) {
+       case QLAFX00_PORT_SPEED_2G:
+               speed = FC_PORTSPEED_2GBIT;
+               break;
+       case QLAFX00_PORT_SPEED_4G:
+               speed = FC_PORTSPEED_4GBIT;
+               break;
+       case QLAFX00_PORT_SPEED_8G:
+               speed = FC_PORTSPEED_8GBIT;
+               break;
+       case QLAFX00_PORT_SPEED_10G:
+               speed = FC_PORTSPEED_10GBIT;
+               break;
+       }
+       fc_host_speed(shost) = speed;
+}
+
+/** QLAFX00 specific ISR implementation functions */
+
+static inline void
+qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+                    uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+       struct scsi_qla_host *vha = sp->fcport->vha;
+       struct scsi_cmnd *cp = GET_CMD_SP(sp);
+       uint32_t track_sense_len;
+
+       SET_FW_SENSE_LEN(sp, sense_len);
+
+       if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+               sense_len = SCSI_SENSE_BUFFERSIZE;
+
+       SET_CMD_SENSE_LEN(sp, sense_len);
+       SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+       track_sense_len = sense_len;
+
+       if (sense_len > par_sense_len)
+               sense_len = par_sense_len;
+
+       memcpy(cp->sense_buffer, sense_data, sense_len);
+
+       SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
+
+       SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+       track_sense_len -= sense_len;
+       SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+       ql_dbg(ql_dbg_io, vha, 0x304d,
+           "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
+           sense_len, par_sense_len, track_sense_len);
+       if (GET_FW_SENSE_LEN(sp) > 0) {
+               rsp->status_srb = sp;
+               cp->result = res;
+       }
+
+       if (sense_len) {
+               ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
+                   "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+                   sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+                   cp);
+               ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
+                   cp->sense_buffer, sense_len);
+       }
+}
+
+static void
+qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+                     struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
+                     uint16_t sstatus, uint16_t cpstatus)
+{
+       struct srb_iocb *tmf;
+
+       tmf = &sp->u.iocb_cmd;
+       if (cpstatus != CS_COMPLETE ||
+           (sstatus & SS_RESPONSE_INFO_LEN_VALID))
+               cpstatus = CS_INCOMPLETE;
+       tmf->u.tmf.comp_status = cpstatus;
+       sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+                        struct abort_iocb_entry_fx00 *pkt)
+{
+       const char func[] = "ABT_IOCB";
+       srb_t *sp;
+       struct srb_iocb *abt;
+
+       sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+       if (!sp)
+               return;
+
+       abt = &sp->u.iocb_cmd;
+       abt->u.abt.comp_status = le32_to_cpu(pkt->tgt_id_sts);
+       sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+                        struct ioctl_iocb_entry_fx00 *pkt)
+{
+       const char func[] = "IOSB_IOCB";
+       srb_t *sp;
+       struct fc_bsg_job *bsg_job;
+       struct srb_iocb *iocb_job;
+       int res;
+       struct qla_mt_iocb_rsp_fx00 fstatus;
+       uint8_t *fw_sts_ptr;
+
+       sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+       if (!sp)
+               return;
+
+       if (sp->type == SRB_FXIOCB_DCMD) {
+               iocb_job = &sp->u.iocb_cmd;
+               iocb_job->u.fxiocb.seq_number = le32_to_cpu(pkt->seq_no);
+               iocb_job->u.fxiocb.fw_flags = le32_to_cpu(pkt->fw_iotcl_flags);
+               iocb_job->u.fxiocb.result = le32_to_cpu(pkt->status);
+               if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
+                       iocb_job->u.fxiocb.req_data =
+                           le32_to_cpu(pkt->dataword_r);
+       } else {
+               bsg_job = sp->u.bsg_job;
+
+               memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+               fstatus.reserved_1 = pkt->reserved_0;
+               fstatus.func_type = pkt->comp_func_num;
+               fstatus.ioctl_flags = pkt->fw_iotcl_flags;
+               fstatus.ioctl_data = pkt->dataword_r;
+               fstatus.adapid = pkt->adapid;
+               fstatus.adapid_hi = pkt->adapid_hi;
+               fstatus.reserved_2 = pkt->reserved_1;
+               fstatus.res_count = pkt->residuallen;
+               fstatus.status = pkt->status;
+               fstatus.seq_number = pkt->seq_no;
+               memcpy(fstatus.reserved_3,
+                   pkt->reserved_2, 20 * sizeof(uint8_t));
+
+               fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+                   sizeof(struct fc_bsg_reply);
+
+               memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
+                   sizeof(struct qla_mt_iocb_rsp_fx00));
+               bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+                       sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
+
+               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+                   sp->fcport->vha, 0x5080,
+                   (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+
+               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+                   sp->fcport->vha, 0x5074,
+                   (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+               res = bsg_job->reply->result = DID_OK << 16;
+               bsg_job->reply->reply_payload_rcv_len =
+                   bsg_job->reply_payload.payload_len;
+       }
+       sp->done(vha, sp, res);
+}
+
+/**
+ * qlafx00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+       srb_t           *sp;
+       fc_port_t       *fcport;
+       struct scsi_cmnd *cp;
+       struct sts_entry_fx00 *sts;
+       uint16_t        comp_status;
+       uint16_t        scsi_status;
+       uint16_t        ox_id;
+       uint8_t         lscsi_status;
+       int32_t         resid;
+       uint32_t        sense_len, par_sense_len, rsp_info_len, resid_len,
+           fw_resid_len;
+       uint8_t         *rsp_info = NULL, *sense_data = NULL;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t hindex, handle;
+       uint16_t que;
+       struct req_que *req;
+       int logit = 1;
+       int res = 0;
+
+       sts = (struct sts_entry_fx00 *) pkt;
+
+       comp_status = le16_to_cpu(sts->comp_status);
+       scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+       hindex = sts->handle;
+       handle = LSW(hindex);
+
+       que = MSW(hindex);
+       req = ha->req_q_map[que];
+
+       /* Validate handle. */
+       if (handle < req->num_outstanding_cmds)
+               sp = req->outstanding_cmds[handle];
+       else
+               sp = NULL;
+
+       if (sp == NULL) {
+               ql_dbg(ql_dbg_io, vha, 0x3034,
+                   "Invalid status handle (0x%x).\n", handle);
+
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               qla2xxx_wake_dpc(vha);
+               return;
+       }
+
+       if (sp->type == SRB_TM_CMD) {
+               req->outstanding_cmds[handle] = NULL;
+               qlafx00_tm_iocb_entry(vha, req, pkt, sp,
+                   scsi_status, comp_status);
+               return;
+       }
+
+       /* Fast path completion. */
+       if (comp_status == CS_COMPLETE && scsi_status == 0) {
+               qla2x00_do_host_ramp_up(vha);
+               qla2x00_process_completed_request(vha, req, handle);
+               return;
+       }
+
+       req->outstanding_cmds[handle] = NULL;
+       cp = GET_CMD_SP(sp);
+       if (cp == NULL) {
+               ql_dbg(ql_dbg_io, vha, 0x3048,
+                   "Command already returned (0x%x/%p).\n",
+                   handle, sp);
+
+               return;
+       }
+
+       lscsi_status = scsi_status & STATUS_MASK;
+
+       fcport = sp->fcport;
+
+       ox_id = 0;
+       sense_len = par_sense_len = rsp_info_len = resid_len =
+               fw_resid_len = 0;
+       if (scsi_status & SS_SENSE_LEN_VALID)
+               sense_len = le32_to_cpu(sts->sense_len);
+       if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+               resid_len = le32_to_cpu(sts->residual_len);
+       if (comp_status == CS_DATA_UNDERRUN)
+               fw_resid_len = le32_to_cpu(sts->residual_len);
+       rsp_info = sense_data = sts->data;
+       par_sense_len = sizeof(sts->data);
+
+       /* Check for overrun. */
+       if (comp_status == CS_COMPLETE &&
+           scsi_status & SS_RESIDUAL_OVER)
+               comp_status = CS_DATA_OVERRUN;
+
+       /*
+        * Based on Host and scsi status generate status code for Linux
+        */
+       switch (comp_status) {
+       case CS_COMPLETE:
+       case CS_QUEUE_FULL:
+               if (scsi_status == 0) {
+                       res = DID_OK << 16;
+                       break;
+               }
+               if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
+                       resid = resid_len;
+                       scsi_set_resid(cp, resid);
+
+                       if (!lscsi_status &&
+                           ((unsigned)(scsi_bufflen(cp) - resid) <
+                            cp->underflow)) {
+                               ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
+                                   "Mid-layer underflow "
+                                   "detected (0x%x of 0x%x bytes).\n",
+                                   resid, scsi_bufflen(cp));
+
+                               res = DID_ERROR << 16;
+                               break;
+                       }
+               }
+               res = DID_OK << 16 | lscsi_status;
+
+               if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+                       ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
+                           "QUEUE FULL detected.\n");
+                       break;
+               }
+               logit = 0;
+               if (lscsi_status != SS_CHECK_CONDITION)
+                       break;
+
+               memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+               if (!(scsi_status & SS_SENSE_LEN_VALID))
+                       break;
+
+               qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+                   rsp, res);
+               break;
+
+       case CS_DATA_UNDERRUN:
+               /* Use F/W calculated residual length. */
+               if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+                       resid = fw_resid_len;
+               else
+                       resid = resid_len;
+               scsi_set_resid(cp, resid);
+               if (scsi_status & SS_RESIDUAL_UNDER) {
+                       if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+                           && fw_resid_len != resid_len) {
+                               ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
+                                   "Dropped frame(s) detected "
+                                   "(0x%x of 0x%x bytes).\n",
+                                   resid, scsi_bufflen(cp));
+
+                               res = DID_ERROR << 16 | lscsi_status;
+                               goto check_scsi_status;
+                       }
+
+                       if (!lscsi_status &&
+                           ((unsigned)(scsi_bufflen(cp) - resid) <
+                           cp->underflow)) {
+                               ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
+                                   "Mid-layer underflow "
+                                   "detected (0x%x of 0x%x bytes, "
+                                   "cp->underflow: 0x%x).\n",
+                                   resid, scsi_bufflen(cp), cp->underflow);
+
+                               res = DID_ERROR << 16;
+                               break;
+                       }
+               } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+                           lscsi_status != SAM_STAT_BUSY) {
+                       /*
+                        * scsi status of task set and busy are considered
+                        * to be task not completed.
+                        */
+
+                       ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
+                           "Dropped frame(s) detected (0x%x "
+                           "of 0x%x bytes).\n", resid,
+                           scsi_bufflen(cp));
+
+                       res = DID_ERROR << 16 | lscsi_status;
+                       goto check_scsi_status;
+               } else {
+                       ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
+                           "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+                           scsi_status, lscsi_status);
+               }
+
+               res = DID_OK << 16 | lscsi_status;
+               logit = 0;
+
+check_scsi_status:
+               /*
+                * Check to see if SCSI Status is non zero. If so report SCSI
+                * Status.
+                */
+               if (lscsi_status != 0) {
+                       if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+                               ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
+                                   "QUEUE FULL detected.\n");
+                               logit = 1;
+                               break;
+                       }
+                       if (lscsi_status != SS_CHECK_CONDITION)
+                               break;
+
+                       memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+                       if (!(scsi_status & SS_SENSE_LEN_VALID))
+                               break;
+
+                       qlafx00_handle_sense(sp, sense_data, par_sense_len,
+                           sense_len, rsp, res);
+               }
+               break;
+
+       case CS_PORT_LOGGED_OUT:
+       case CS_PORT_CONFIG_CHG:
+       case CS_PORT_BUSY:
+       case CS_INCOMPLETE:
+       case CS_PORT_UNAVAILABLE:
+       case CS_TIMEOUT:
+       case CS_RESET:
+
+               /*
+                * We are going to have the fc class block the rport
+                * while we try to recover so instruct the mid layer
+                * to requeue until the class decides how to handle this.
+                */
+               res = DID_TRANSPORT_DISRUPTED << 16;
+
+               ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
+                   "Port down status: port-state=0x%x.\n",
+                   atomic_read(&fcport->state));
+
+               if (atomic_read(&fcport->state) == FCS_ONLINE)
+                       qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+               break;
+
+       case CS_ABORTED:
+               res = DID_RESET << 16;
+               break;
+
+       default:
+               res = DID_ERROR << 16;
+               break;
+       }
+
+       if (logit)
+               ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
+                   "FCP command status: 0x%x-0x%x (0x%x) "
+                   "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
+                   "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+                   "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
+                   "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+                   comp_status, scsi_status, res, vha->host_no,
+                   cp->device->id, cp->device->lun, fcport->tgt_id,
+                   lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
+                   cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
+                   cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+                   rsp_info_len, resid_len, fw_resid_len, sense_len,
+                   par_sense_len, rsp_info_len);
+
+       if (!res)
+               qla2x00_do_host_ramp_up(vha);
+
+       if (rsp->status_srb == NULL)
+               sp->done(ha, sp, res);
+}
+
+/**
+ * qlafx00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+       uint8_t sense_sz = 0;
+       struct qla_hw_data *ha = rsp->hw;
+       struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+       srb_t *sp = rsp->status_srb;
+       struct scsi_cmnd *cp;
+       uint32_t sense_len;
+       uint8_t *sense_ptr;
+
+       if (!sp) {
+               ql_dbg(ql_dbg_io, vha, 0x3037,
+                   "no SP, sp = %p\n", sp);
+               return;
+       }
+
+       if (!GET_FW_SENSE_LEN(sp)) {
+               ql_dbg(ql_dbg_io, vha, 0x304b,
+                   "no fw sense data, sp = %p\n", sp);
+               return;
+       }
+       cp = GET_CMD_SP(sp);
+       if (cp == NULL) {
+               ql_log(ql_log_warn, vha, 0x303b,
+                   "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+               rsp->status_srb = NULL;
+               return;
+       }
+
+       if (!GET_CMD_SENSE_LEN(sp)) {
+               ql_dbg(ql_dbg_io, vha, 0x304c,
+                   "no sense data, sp = %p\n", sp);
+       } else {
+               sense_len = GET_CMD_SENSE_LEN(sp);
+               sense_ptr = GET_CMD_SENSE_PTR(sp);
+               ql_dbg(ql_dbg_io, vha, 0x304f,
+                   "sp=%p sense_len=0x%x sense_ptr=%p.\n",
+                   sp, sense_len, sense_ptr);
+
+               if (sense_len > sizeof(pkt->data))
+                       sense_sz = sizeof(pkt->data);
+               else
+                       sense_sz = sense_len;
+
+               /* Move sense data. */
+               ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
+                   (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+               memcpy(sense_ptr, pkt->data, sense_sz);
+               ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
+                   sense_ptr, sense_sz);
+
+               sense_len -= sense_sz;
+               sense_ptr += sense_sz;
+
+               SET_CMD_SENSE_PTR(sp, sense_ptr);
+               SET_CMD_SENSE_LEN(sp, sense_len);
+       }
+       sense_len = GET_FW_SENSE_LEN(sp);
+       sense_len = (sense_len > sizeof(pkt->data)) ?
+           (sense_len - sizeof(pkt->data)) : 0;
+       SET_FW_SENSE_LEN(sp, sense_len);
+
+       /* Place command on done queue. */
+       if (sense_len == 0) {
+               rsp->status_srb = NULL;
+               sp->done(ha, sp, cp->result);
+       }
+}
+
+/**
+ * qlafx00_multistatus_entry() - Process Multi response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+       struct rsp_que *rsp, void *pkt)
+{
+       srb_t           *sp;
+       struct multi_sts_entry_fx00 *stsmfx;
+       struct qla_hw_data *ha = vha->hw;
+       uint32_t handle, hindex, handle_count, i;
+       uint16_t que;
+       struct req_que *req;
+       uint32_t *handle_ptr;
+
+       stsmfx = (struct multi_sts_entry_fx00 *) pkt;
+
+       handle_count = stsmfx->handle_count;
+
+       if (handle_count > MAX_HANDLE_COUNT) {
+               ql_dbg(ql_dbg_io, vha, 0x3035,
+                   "Invalid handle count (0x%x).\n", handle_count);
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               qla2xxx_wake_dpc(vha);
+               return;
+       }
+
+       handle_ptr = (uint32_t *) &stsmfx->handles[0];
+
+       for (i = 0; i < handle_count; i++) {
+               hindex = le32_to_cpu(*handle_ptr);
+               handle = LSW(hindex);
+               que = MSW(hindex);
+               req = ha->req_q_map[que];
+
+               /* Validate handle. */
+               if (handle < req->num_outstanding_cmds)
+                       sp = req->outstanding_cmds[handle];
+               else
+                       sp = NULL;
+
+               if (sp == NULL) {
+                       ql_dbg(ql_dbg_io, vha, 0x3044,
+                           "Invalid status handle (0x%x).\n", handle);
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+                       return;
+               }
+               qla2x00_process_completed_request(vha, req, handle);
+               handle_ptr++;
+       }
+}
+
+/**
+ * qlafx00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
+                   struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+{
+       srb_t *sp;
+       struct qla_hw_data *ha = vha->hw;
+       const char func[] = "ERROR-IOCB";
+       uint16_t que = MSW(pkt->handle);
+       struct req_que *req = NULL;
+       int res = DID_ERROR << 16;
+
+       ql_dbg(ql_dbg_async, vha, 0x507f,
+           "type of error status in response: 0x%x\n", estatus);
+
+       req = ha->req_q_map[que];
+
+       sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+       if (sp) {
+               sp->done(ha, sp, res);
+               return;
+       }
+
+       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+       qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qlafx00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_process_response_queue(struct scsi_qla_host *vha,
+       struct rsp_que *rsp)
+{
+       struct sts_entry_fx00 *pkt;
+       response_t *lptr;
+
+       if (!vha->flags.online)
+               return;
+
+       while (RD_REG_DWORD(&(rsp->ring_ptr->signature)) !=
+           RESPONSE_PROCESSED) {
+               lptr = rsp->ring_ptr;
+               memcpy_fromio(rsp->rsp_pkt, lptr, sizeof(rsp->rsp_pkt));
+               pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
+
+               rsp->ring_index++;
+               if (rsp->ring_index == rsp->length) {
+                       rsp->ring_index = 0;
+                       rsp->ring_ptr = rsp->ring;
+               } else {
+                       rsp->ring_ptr++;
+               }
+
+               if (pkt->entry_status != 0 &&
+                   pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+                       qlafx00_error_entry(vha, rsp,
+                           (struct sts_entry_fx00 *)pkt, pkt->entry_status,
+                           pkt->entry_type);
+                       goto next_iter;
+                       continue;
+               }
+
+               switch (pkt->entry_type) {
+               case STATUS_TYPE_FX00:
+                       qlafx00_status_entry(vha, rsp, pkt);
+                       break;
+
+               case STATUS_CONT_TYPE_FX00:
+                       qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+                       break;
+
+               case MULTI_STATUS_TYPE_FX00:
+                       qlafx00_multistatus_entry(vha, rsp, pkt);
+                       break;
+
+               case ABORT_IOCB_TYPE_FX00:
+                       qlafx00_abort_iocb_entry(vha, rsp->req,
+                          (struct abort_iocb_entry_fx00 *)pkt);
+                       break;
+
+               case IOCTL_IOSB_TYPE_FX00:
+                       qlafx00_ioctl_iosb_entry(vha, rsp->req,
+                           (struct ioctl_iocb_entry_fx00 *)pkt);
+                       break;
+               default:
+                       /* Type Not Supported. */
+                       ql_dbg(ql_dbg_async, vha, 0x5081,
+                           "Received unknown response pkt type %x "
+                           "entry status=%x.\n",
+                           pkt->entry_type, pkt->entry_status);
+                       break;
+               }
+next_iter:
+               WRT_REG_DWORD(&lptr->signature, RESPONSE_PROCESSED);
+               wmb();
+       }
+
+       /* Adjust ring index */
+       WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+/**
+ * qlafx00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_async_event(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_fx00 __iomem *reg;
+       int data_size = 1;
+
+       reg = &ha->iobase->ispfx00;
+       /* Setup to process RIO completion. */
+       switch (ha->aenmb[0]) {
+       case QLAFX00_MBA_SYSTEM_ERR:            /* System Error */
+               ql_log(ql_log_warn, vha, 0x5079,
+                   "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               break;
+
+       case QLAFX00_MBA_SHUTDOWN_RQSTD:        /* Shutdown requested */
+               ql_dbg(ql_dbg_async, vha, 0x5076,
+                   "Asynchronous FW shutdown requested.\n");
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               qla2xxx_wake_dpc(vha);
+               break;
+
+       case QLAFX00_MBA_PORT_UPDATE:           /* Port database update */
+               ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+               ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+               ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+               ql_dbg(ql_dbg_async, vha, 0x5077,
+                   "Asynchronous port Update received "
+                   "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
+                   ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
+               data_size = 4;
+               break;
+       default:
+               ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+               ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+               ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+               ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
+               ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
+               ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
+               ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+               ql_dbg(ql_dbg_async, vha, 0x5078,
+                   "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
+                   ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
+                   ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
+               break;
+       }
+       qlafx00_post_aenfx_work(vha, ha->aenmb[0],
+           (uint32_t *)ha->aenmb, data_size);
+}
+
+/**
+ *
+ * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb16: Mailbox16 register
+ */
+static void
+qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
+{
+       uint16_t        cnt;
+       uint16_t __iomem *wptr;
+       struct qla_hw_data *ha = vha->hw;
+       struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+       if (!ha->mcp32)
+               ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
+
+       /* Load return mailbox registers. */
+       ha->flags.mbox_int = 1;
+       ha->mailbox_out32[0] = mb0;
+       wptr = (uint16_t __iomem *)&reg->mailbox17;
+
+       for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+               ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+               wptr++;
+       }
+}
+
+/**
+ * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qlafx00_intr_handler(int irq, void *dev_id)
+{
+       scsi_qla_host_t *vha;
+       struct qla_hw_data *ha;
+       struct device_reg_fx00 __iomem *reg;
+       int             status;
+       unsigned long   iter;
+       uint32_t        stat;
+       uint32_t        mb[8];
+       struct rsp_que *rsp;
+       unsigned long   flags;
+       uint32_t clr_intr = 0;
+
+       rsp = (struct rsp_que *) dev_id;
+       if (!rsp) {
+               ql_log(ql_log_info, NULL, 0x507d,
+                   "%s: NULL response queue pointer.\n", __func__);
+               return IRQ_NONE;
+       }
+
+       ha = rsp->hw;
+       reg = &ha->iobase->ispfx00;
+       status = 0;
+
+       if (unlikely(pci_channel_offline(ha->pdev)))
+               return IRQ_HANDLED;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       vha = pci_get_drvdata(ha->pdev);
+       for (iter = 50; iter--; clr_intr = 0) {
+               stat = QLAFX00_RD_INTR_REG(ha);
+               if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+                       break;
+
+               switch (stat & QLAFX00_HST_INT_STS_BITS) {
+               case QLAFX00_INTR_MB_CMPLT:
+               case QLAFX00_INTR_MB_RSP_CMPLT:
+               case QLAFX00_INTR_MB_ASYNC_CMPLT:
+               case QLAFX00_INTR_ALL_CMPLT:
+                       mb[0] = RD_REG_WORD(&reg->mailbox16);
+                       qlafx00_mbx_completion(vha, mb[0]);
+                       status |= MBX_INTERRUPT;
+                       clr_intr |= QLAFX00_INTR_MB_CMPLT;
+                       break;
+               case QLAFX00_INTR_ASYNC_CMPLT:
+               case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+                       ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+                       qlafx00_async_event(vha);
+                       clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
+                       break;
+               case QLAFX00_INTR_RSP_CMPLT:
+                       qlafx00_process_response_queue(vha, rsp);
+                       clr_intr |= QLAFX00_INTR_RSP_CMPLT;
+                       break;
+               default:
+                       ql_dbg(ql_dbg_async, vha, 0x507a,
+                           "Unrecognized interrupt type (%d).\n", stat);
+                       break;
+               }
+               QLAFX00_CLR_INTR_REG(ha, clr_intr);
+               QLAFX00_RD_INTR_REG(ha);
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+           (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+               set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+               complete(&ha->mbx_intr_comp);
+       }
+       return IRQ_HANDLED;
+}
+
+/** QLAFX00 specific IOCB implementation functions */
+
+static inline cont_a64_entry_t *
+qlafx00_prep_cont_type1_iocb(struct req_que *req,
+                            cont_a64_entry_t *lcont_pkt)
+{
+       cont_a64_entry_t *cont_pkt;
+
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else {
+               req->ring_ptr++;
+       }
+
+       cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+       /* Load packet defaults. */
+       *((uint32_t *)(&lcont_pkt->entry_type)) =
+           __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00);
+
+       return cont_pkt;
+}
+
+static inline void
+qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
+                        uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
+{
+       uint16_t        avail_dsds;
+       uint32_t        *cur_dsd;
+       scsi_qla_host_t *vha;
+       struct scsi_cmnd *cmd;
+       struct scatterlist *sg;
+       int i, cont;
+       struct req_que *req;
+       cont_a64_entry_t lcont_pkt;
+       cont_a64_entry_t *cont_pkt;
+
+       vha = sp->fcport->vha;
+       req = vha->req;
+
+       cmd = GET_CMD_SP(sp);
+       cont = 0;
+       cont_pkt = NULL;
+
+       /* Update entry type to indicate Command Type 3 IOCB */
+       *((uint32_t *)(&lcmd_pkt->entry_type)) =
+           __constant_cpu_to_le32(FX00_COMMAND_TYPE_7);
+
+       /* No data transfer */
+       if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+               lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+               return;
+       }
+
+       /* Set transfer direction */
+       if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+               lcmd_pkt->cntrl_flags =
+                   __constant_cpu_to_le16(TMF_WRITE_DATA);
+               vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+       } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+               lcmd_pkt->cntrl_flags =
+                   __constant_cpu_to_le16(TMF_READ_DATA);
+               vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+       }
+
+       /* One DSD is available in the Command Type 3 IOCB */
+       avail_dsds = 1;
+       cur_dsd = (uint32_t *)&lcmd_pkt->dseg_0_address;
+
+       /* Load data segments */
+       scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+               dma_addr_t      sle_dma;
+
+               /* Allocate additional continuation packets? */
+               if (avail_dsds == 0) {
+                       /*
+                        * Five DSDs are available in the Continuation
+                        * Type 1 IOCB.
+                        */
+                       memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
+                       cont_pkt =
+                           qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
+                       cur_dsd = (uint32_t *)lcont_pkt.dseg_0_address;
+                       avail_dsds = 5;
+                       cont = 1;
+               }
+
+               sle_dma = sg_dma_address(sg);
+               *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+               *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+               avail_dsds--;
+               if (avail_dsds == 0 && cont == 1) {
+                       cont = 0;
+                       memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+                           REQUEST_ENTRY_SIZE);
+               }
+
+       }
+       if (avail_dsds != 0 && cont == 1) {
+               memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+                   REQUEST_ENTRY_SIZE);
+       }
+}
+
+/**
+ * qlafx00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qlafx00_start_scsi(srb_t *sp)
+{
+       int             ret, nseg;
+       unsigned long   flags;
+       uint32_t        index;
+       uint32_t        handle;
+       uint16_t        cnt;
+       uint16_t        req_cnt;
+       uint16_t        tot_dsds;
+       struct req_que *req = NULL;
+       struct rsp_que *rsp = NULL;
+       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct scsi_qla_host *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct cmd_type_7_fx00 *cmd_pkt;
+       struct cmd_type_7_fx00 lcmd_pkt;
+       struct scsi_lun llun;
+       char            tag[2];
+
+       /* Setup device pointers. */
+       ret = 0;
+
+       rsp = ha->rsp_q_map[0];
+       req = vha->req;
+
+       /* So we know we haven't pci_map'ed anything yet */
+       tot_dsds = 0;
+
+       /* Forcing marker needed for now */
+       vha->marker_needed = 0;
+
+       /* Send marker if required */
+       if (vha->marker_needed != 0) {
+               if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+                   QLA_SUCCESS)
+                       return QLA_FUNCTION_FAILED;
+               vha->marker_needed = 0;
+       }
+
+       /* Acquire ring specific lock */
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       /* Check for room in outstanding command list. */
+       handle = req->current_outstanding_cmd;
+       for (index = 1; index < req->num_outstanding_cmds; index++) {
+               handle++;
+               if (handle == req->num_outstanding_cmds)
+                       handle = 1;
+               if (!req->outstanding_cmds[handle])
+                       break;
+       }
+       if (index == req->num_outstanding_cmds)
+               goto queuing_error;
+
+       /* Map the sg table so we have an accurate count of sg entries needed */
+       if (scsi_sg_count(cmd)) {
+               nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+                   scsi_sg_count(cmd), cmd->sc_data_direction);
+               if (unlikely(!nseg))
+                       goto queuing_error;
+       } else
+               nseg = 0;
+
+       tot_dsds = nseg;
+       req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+       if (req->cnt < (req_cnt + 2)) {
+               cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+               if (req->ring_index < cnt)
+                       req->cnt = cnt - req->ring_index;
+               else
+                       req->cnt = req->length -
+                               (req->ring_index - cnt);
+               if (req->cnt < (req_cnt + 2))
+                       goto queuing_error;
+       }
+
+       /* Build command packet. */
+       req->current_outstanding_cmd = handle;
+       req->outstanding_cmds[handle] = sp;
+       sp->handle = handle;
+       cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+       req->cnt -= req_cnt;
+
+       cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
+
+       memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
+
+       lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+       lcmd_pkt.handle_hi = 0;
+       lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
+       lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
+
+       int_to_scsilun(cmd->device->lun, &llun);
+       host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
+           sizeof(lcmd_pkt.lun));
+
+       /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+       if (scsi_populate_tag_msg(cmd, tag)) {
+               switch (tag[0]) {
+               case HEAD_OF_QUEUE_TAG:
+                       lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
+                       break;
+               case ORDERED_QUEUE_TAG:
+                       lcmd_pkt.task = TSK_ORDERED;
+                       break;
+               }
+       }
+
+       /* Load SCSI command packet. */
+       host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
+       lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+       /* Build IOCB segments */
+       qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
+
+       /* Set total data segment count. */
+       lcmd_pkt.entry_count = (uint8_t)req_cnt;
+
+       /* Specify response queue number where completion should happen */
+       lcmd_pkt.entry_status = (uint8_t) rsp->id;
+
+       ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+           (uint8_t *)cmd->cmnd, cmd->cmd_len);
+       ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
+           (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+
+       memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
+       wmb();
+
+       /* Adjust ring index. */
+       req->ring_index++;
+       if (req->ring_index == req->length) {
+               req->ring_index = 0;
+               req->ring_ptr = req->ring;
+       } else
+               req->ring_ptr++;
+
+       sp->flags |= SRB_DMA_VALID;
+
+       /* Set chip new ring index. */
+       WRT_REG_DWORD(req->req_q_in, req->ring_index);
+       QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       return QLA_SUCCESS;
+
+queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+       return QLA_FUNCTION_FAILED;
+}
+
+void
+qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
+{
+       struct srb_iocb *fxio = &sp->u.iocb_cmd;
+       scsi_qla_host_t *vha = sp->fcport->vha;
+       struct req_que *req = vha->req;
+       struct tsk_mgmt_entry_fx00 tm_iocb;
+       struct scsi_lun llun;
+
+       memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
+       tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
+       tm_iocb.entry_count = 1;
+       tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+       tm_iocb.handle_hi = 0;
+       tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+       tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
+       tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
+       if (tm_iocb.control_flags == TCF_LUN_RESET) {
+               int_to_scsilun(fxio->u.tmf.lun, &llun);
+               host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
+                   sizeof(struct scsi_lun));
+       }
+
+       memcpy((void __iomem *)ptm_iocb, &tm_iocb,
+           sizeof(struct tsk_mgmt_entry_fx00));
+       wmb();
+}
+
+void
+qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
+{
+       struct srb_iocb *fxio = &sp->u.iocb_cmd;
+       scsi_qla_host_t *vha = sp->fcport->vha;
+       struct req_que *req = vha->req;
+       struct abort_iocb_entry_fx00 abt_iocb;
+
+       memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
+       abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
+       abt_iocb.entry_count = 1;
+       abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+       abt_iocb.abort_handle =
+           cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+       abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
+       abt_iocb.req_que_no = cpu_to_le16(req->id);
+
+       memcpy((void __iomem *)pabt_iocb, &abt_iocb,
+           sizeof(struct abort_iocb_entry_fx00));
+       wmb();
+}
+
+void
+qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
+{
+       struct srb_iocb *fxio = &sp->u.iocb_cmd;
+       struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+       struct fc_bsg_job *bsg_job;
+       struct fxdisc_entry_fx00 fx_iocb;
+       uint8_t entry_cnt = 1;
+
+       memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
+       fx_iocb.entry_type = FX00_IOCB_TYPE;
+       fx_iocb.handle = cpu_to_le32(sp->handle);
+       fx_iocb.entry_count = entry_cnt;
+
+       if (sp->type == SRB_FXIOCB_DCMD) {
+               fx_iocb.func_num =
+                   cpu_to_le16(sp->u.iocb_cmd.u.fxiocb.req_func_type);
+               fx_iocb.adapid = cpu_to_le32(fxio->u.fxiocb.adapter_id);
+               fx_iocb.adapid_hi = cpu_to_le32(fxio->u.fxiocb.adapter_id_hi);
+               fx_iocb.reserved_0 = cpu_to_le32(fxio->u.fxiocb.reserved_0);
+               fx_iocb.reserved_1 = cpu_to_le32(fxio->u.fxiocb.reserved_1);
+               fx_iocb.dataword_extra =
+                   cpu_to_le32(fxio->u.fxiocb.req_data_extra);
+
+               if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+                       fx_iocb.req_dsdcnt = cpu_to_le16(1);
+                       fx_iocb.req_xfrcnt =
+                           cpu_to_le16(fxio->u.fxiocb.req_len);
+                       fx_iocb.dseg_rq_address[0] =
+                           cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
+                       fx_iocb.dseg_rq_address[1] =
+                           cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
+                       fx_iocb.dseg_rq_len =
+                           cpu_to_le32(fxio->u.fxiocb.req_len);
+               }
+
+               if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+                       fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
+                       fx_iocb.rsp_xfrcnt =
+                           cpu_to_le16(fxio->u.fxiocb.rsp_len);
+                       fx_iocb.dseg_rsp_address[0] =
+                           cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
+                       fx_iocb.dseg_rsp_address[1] =
+                           cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
+                       fx_iocb.dseg_rsp_len =
+                           cpu_to_le32(fxio->u.fxiocb.rsp_len);
+               }
+
+               if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
+                       fx_iocb.dataword =
+                           cpu_to_le32(fxio->u.fxiocb.req_data);
+               }
+               fx_iocb.flags = fxio->u.fxiocb.flags;
+       } else {
+               struct scatterlist *sg;
+               bsg_job = sp->u.bsg_job;
+               piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+                       &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+               fx_iocb.func_num = piocb_rqst->func_type;
+               fx_iocb.adapid = piocb_rqst->adapid;
+               fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
+               fx_iocb.reserved_0 = piocb_rqst->reserved_0;
+               fx_iocb.reserved_1 = piocb_rqst->reserved_1;
+               fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
+               fx_iocb.dataword = piocb_rqst->dataword;
+               fx_iocb.req_xfrcnt = cpu_to_le16(piocb_rqst->req_len);
+               fx_iocb.rsp_xfrcnt = cpu_to_le16(piocb_rqst->rsp_len);
+
+               if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+                       int avail_dsds, tot_dsds;
+                       cont_a64_entry_t lcont_pkt;
+                       cont_a64_entry_t *cont_pkt = NULL;
+                       uint32_t *cur_dsd;
+                       int index = 0, cont = 0;
+
+                       fx_iocb.req_dsdcnt =
+                           cpu_to_le16(bsg_job->request_payload.sg_cnt);
+                       tot_dsds =
+                           cpu_to_le32(bsg_job->request_payload.sg_cnt);
+                       cur_dsd = (uint32_t *)&fx_iocb.dseg_rq_address[0];
+                       avail_dsds = 1;
+                       for_each_sg(bsg_job->request_payload.sg_list, sg,
+                           tot_dsds, index) {
+                               dma_addr_t sle_dma;
+
+                               /* Allocate additional continuation packets? */
+                               if (avail_dsds == 0) {
+                                       /*
+                                        * Five DSDs are available in the Cont.
+                                        * Type 1 IOCB.
+                                        */
+                                       memset(&lcont_pkt, 0,
+                                           REQUEST_ENTRY_SIZE);
+                                       cont_pkt =
+                                           qlafx00_prep_cont_type1_iocb(
+                                               sp->fcport->vha->req,
+                                               &lcont_pkt);
+                                       cur_dsd = (uint32_t *)
+                                           lcont_pkt.dseg_0_address;
+                                       avail_dsds = 5;
+                                       cont = 1;
+                                       entry_cnt++;
+                               }
+
+                               sle_dma = sg_dma_address(sg);
+                               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+                               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+                               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+                               avail_dsds--;
+
+                               if (avail_dsds == 0 && cont == 1) {
+                                       cont = 0;
+                                       memcpy_toio(
+                                           (void __iomem *)cont_pkt,
+                                           &lcont_pkt, REQUEST_ENTRY_SIZE);
+                                       ql_dump_buffer(
+                                           ql_dbg_user + ql_dbg_verbose,
+                                           sp->fcport->vha, 0x3042,
+                                           (uint8_t *)&lcont_pkt,
+                                            REQUEST_ENTRY_SIZE);
+                               }
+                       }
+                       if (avail_dsds != 0 && cont == 1) {
+                               memcpy_toio((void __iomem *)cont_pkt,
+                                   &lcont_pkt, REQUEST_ENTRY_SIZE);
+                               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+                                   sp->fcport->vha, 0x3043,
+                                   (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+                       }
+               }
+
+               if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+                       int avail_dsds, tot_dsds;
+                       cont_a64_entry_t lcont_pkt;
+                       cont_a64_entry_t *cont_pkt = NULL;
+                       uint32_t *cur_dsd;
+                       int index = 0, cont = 0;
+
+                       fx_iocb.rsp_dsdcnt =
+                          cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+                       tot_dsds = cpu_to_le32(bsg_job->reply_payload.sg_cnt);
+                       cur_dsd = (uint32_t *)&fx_iocb.dseg_rsp_address[0];
+                       avail_dsds = 1;
+
+                       for_each_sg(bsg_job->reply_payload.sg_list, sg,
+                           tot_dsds, index) {
+                               dma_addr_t sle_dma;
+
+                               /* Allocate additional continuation packets? */
+                               if (avail_dsds == 0) {
+                                       /*
+                                       * Five DSDs are available in the Cont.
+                                       * Type 1 IOCB.
+                                       */
+                                       memset(&lcont_pkt, 0,
+                                           REQUEST_ENTRY_SIZE);
+                                       cont_pkt =
+                                           qlafx00_prep_cont_type1_iocb(
+                                               sp->fcport->vha->req,
+                                               &lcont_pkt);
+                                       cur_dsd = (uint32_t *)
+                                           lcont_pkt.dseg_0_address;
+                                       avail_dsds = 5;
+                                       cont = 1;
+                                       entry_cnt++;
+                               }
+
+                               sle_dma = sg_dma_address(sg);
+                               *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+                               *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+                               *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+                               avail_dsds--;
+
+                               if (avail_dsds == 0 && cont == 1) {
+                                       cont = 0;
+                                       memcpy_toio((void __iomem *)cont_pkt,
+                                           &lcont_pkt,
+                                           REQUEST_ENTRY_SIZE);
+                                       ql_dump_buffer(
+                                           ql_dbg_user + ql_dbg_verbose,
+                                           sp->fcport->vha, 0x3045,
+                                           (uint8_t *)&lcont_pkt,
+                                           REQUEST_ENTRY_SIZE);
+                               }
+                       }
+                       if (avail_dsds != 0 && cont == 1) {
+                               memcpy_toio((void __iomem *)cont_pkt,
+                                   &lcont_pkt, REQUEST_ENTRY_SIZE);
+                               ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+                                   sp->fcport->vha, 0x3046,
+                                   (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+                       }
+               }
+
+               if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
+                       fx_iocb.dataword = cpu_to_le32(piocb_rqst->dataword);
+               fx_iocb.flags = piocb_rqst->flags;
+               fx_iocb.entry_count = entry_cnt;
+       }
+
+       ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+           sp->fcport->vha, 0x3047,
+           (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+
+       memcpy((void __iomem *)pfxiocb, &fx_iocb,
+           sizeof(struct fxdisc_entry_fx00));
+       wmb();
+}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644 (file)
index 0000000..cc327dc
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_MR_H
+#define __QLA_MR_H
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISPF001           0xF001
+
+/* FX00 specific definitions */
+
+#define FX00_COMMAND_TYPE_7    0x07    /* Command Type 7 entry for 7XXX */
+struct cmd_type_7_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t handle_hi;
+
+       uint16_t tgt_idx;               /* Target Idx. */
+       uint16_t timeout;               /* Command timeout. */
+
+       uint16_t dseg_count;            /* Data segment count. */
+       uint16_t scsi_rsp_dsd_len;
+
+       struct scsi_lun lun;            /* LUN (LE). */
+
+       uint8_t cntrl_flags;
+
+       uint8_t task_mgmt_flags;        /* Task management flags. */
+
+       uint8_t task;
+
+       uint8_t crn;
+
+       uint8_t fcp_cdb[MAX_CMDSZ];     /* SCSI command words. */
+       uint32_t byte_count;            /* Total byte count. */
+
+       uint32_t dseg_0_address[2];     /* Data segment 0 address. */
+       uint32_t dseg_0_len;            /* Data segment 0 length. */
+};
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+struct mrk_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t handle_count;           /* Handle count. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t handle_hi;             /* System handle. */
+
+       uint16_t tgt_id;                /* Target ID. */
+
+       uint8_t modifier;               /* Modifier (7-0). */
+       uint8_t reserved_1;
+
+       uint8_t reserved_2[5];
+
+       uint8_t lun[8];                 /* FCP LUN (BE). */
+       uint8_t reserved_3[36];
+};
+
+
+#define        STATUS_TYPE_FX00        0x01            /* Status entry. */
+struct sts_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t handle_hi;             /* System handle. */
+
+       uint16_t comp_status;           /* Completion status. */
+       uint16_t reserved_0;            /* OX_ID used by the firmware. */
+
+       uint32_t residual_len;          /* FW calc residual transfer length. */
+
+       uint16_t reserved_1;
+       uint16_t state_flags;           /* State flags. */
+
+       uint16_t reserved_2;
+       uint16_t scsi_status;           /* SCSI status. */
+
+       uint32_t sense_len;             /* FCP SENSE length. */
+       uint8_t data[32];               /* FCP response/sense information. */
+};
+
+
+#define MAX_HANDLE_COUNT       15
+#define MULTI_STATUS_TYPE_FX00 0x0D
+
+struct multi_sts_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t handle_count;
+       uint8_t entry_status;
+
+       uint32_t handles[MAX_HANDLE_COUNT];
+};
+
+#define TSK_MGMT_IOCB_TYPE_FX00                0x05
+struct tsk_mgmt_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+
+       uint32_t handle_hi;             /* System handle. */
+
+       uint16_t tgt_id;                /* Target Idx. */
+
+       uint16_t reserved_1;
+
+       uint16_t delay;                 /* Activity delay in seconds. */
+
+       uint16_t timeout;               /* Command timeout. */
+
+       struct scsi_lun lun;            /* LUN (LE). */
+
+       uint32_t control_flags;         /* Control Flags. */
+
+       uint8_t reserved_2[32];
+};
+
+
+#define        ABORT_IOCB_TYPE_FX00    0x08            /* Abort IOCB status. */
+struct abort_iocb_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t handle_hi;             /* System handle. */
+
+       uint16_t tgt_id_sts;            /* Completion status. */
+       uint16_t options;
+
+       uint32_t abort_handle;          /* System handle. */
+       uint32_t abort_handle_hi;       /* System handle. */
+
+       uint16_t req_que_no;
+       uint8_t reserved_1[38];
+};
+
+#define IOCTL_IOSB_TYPE_FX00   0x0C
+struct ioctl_iocb_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t reserved_0;            /* System handle. */
+
+       uint16_t comp_func_num;
+       uint16_t fw_iotcl_flags;
+
+       uint32_t dataword_r;            /* Data word returned */
+       uint32_t adapid;                /* Adapter ID */
+       uint32_t adapid_hi;             /* Adapter ID high */
+       uint32_t reserved_1;
+
+       uint32_t seq_no;
+       uint8_t reserved_2[20];
+       uint32_t residuallen;
+       uint32_t status;
+};
+
+#define STATUS_CONT_TYPE_FX00 0x04
+
+#define FX00_IOCB_TYPE         0x0B
+struct fxdisc_entry_fx00 {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System Defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint32_t reserved_0;            /* System handle. */
+
+       uint16_t func_num;
+       uint16_t req_xfrcnt;
+       uint16_t req_dsdcnt;
+       uint16_t rsp_xfrcnt;
+       uint16_t rsp_dsdcnt;
+       uint8_t flags;
+       uint8_t reserved_1;
+
+       uint32_t dseg_rq_address[2];    /* Data segment 0 address. */
+       uint32_t dseg_rq_len;           /* Data segment 0 length. */
+       uint32_t dseg_rsp_address[2];   /* Data segment 1 address. */
+       uint32_t dseg_rsp_len;          /* Data segment 1 length. */
+
+       uint32_t dataword;
+       uint32_t adapid;
+       uint32_t adapid_hi;
+       uint32_t dataword_extra;
+};
+
+struct qlafx00_tgt_node_info {
+       uint8_t tgt_node_wwpn[WWN_SIZE];
+       uint8_t tgt_node_wwnn[WWN_SIZE];
+       uint32_t tgt_node_state;
+       uint8_t reserved[128];
+       uint32_t reserved_1[8];
+       uint64_t reserved_2[4];
+} __packed;
+
+#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
+
+#define QLAFX00_LINK_STATUS_DOWN       0x10
+#define QLAFX00_LINK_STATUS_UP         0x11
+
+#define QLAFX00_PORT_SPEED_2G  0x2
+#define QLAFX00_PORT_SPEED_4G  0x4
+#define QLAFX00_PORT_SPEED_8G  0x8
+#define QLAFX00_PORT_SPEED_10G 0xa
+struct port_info_data {
+       uint8_t         port_state;
+       uint8_t         port_type;
+       uint16_t        port_identifier;
+       uint32_t        up_port_state;
+       uint8_t         fw_ver_num[32];
+       uint8_t         portal_attrib;
+       uint16_t        host_option;
+       uint8_t         reset_delay;
+       uint8_t         pdwn_retry_cnt;
+       uint16_t        max_luns2tgt;
+       uint8_t         risc_ver;
+       uint8_t         pconn_option;
+       uint16_t        risc_option;
+       uint16_t        max_frame_len;
+       uint16_t        max_iocb_alloc;
+       uint16_t        exec_throttle;
+       uint8_t         retry_cnt;
+       uint8_t         retry_delay;
+       uint8_t         port_name[8];
+       uint8_t         port_id[3];
+       uint8_t         link_status;
+       uint8_t         plink_rate;
+       uint32_t        link_config;
+       uint16_t        adap_haddr;
+       uint8_t         tgt_disc;
+       uint8_t         log_tout;
+       uint8_t         node_name[8];
+       uint16_t        erisc_opt1;
+       uint8_t         resp_acc_tmr;
+       uint8_t         intr_del_tmr;
+       uint8_t         erisc_opt2;
+       uint8_t         alt_port_name[8];
+       uint8_t         alt_node_name[8];
+       uint8_t         link_down_tout;
+       uint8_t         conn_type;
+       uint8_t         fc_fw_mode;
+       uint32_t        uiReserved[48];
+} __packed;
+
+/* OS Type Designations */
+#define OS_TYPE_UNKNOWN             0
+#define OS_TYPE_LINUX               2
+
+/* Linux Info */
+#define SYSNAME_LENGTH              128
+#define NODENAME_LENGTH             64
+#define RELEASE_LENGTH              64
+#define VERSION_LENGTH              64
+#define MACHINE_LENGTH              64
+#define DOMNAME_LENGTH              64
+
+struct host_system_info {
+       uint32_t os_type;
+       char    sysname[SYSNAME_LENGTH];
+       char    nodename[NODENAME_LENGTH];
+       char    release[RELEASE_LENGTH];
+       char    version[VERSION_LENGTH];
+       char    machine[MACHINE_LENGTH];
+       char    domainname[DOMNAME_LENGTH];
+       char    hostdriver[VERSION_LENGTH];
+       uint32_t reserved[64];
+} __packed;
+
+struct register_host_info {
+       struct host_system_info     hsi;        /* host system info */
+       uint64_t        utc;                    /* UTC (system time) */
+       uint32_t        reserved[64];           /* future additions */
+} __packed;
+
+
+#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
+#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
+
+struct config_info_data {
+       uint8_t         product_name[256];
+       uint8_t         symbolic_name[64];
+       uint8_t         serial_num[32];
+       uint8_t         hw_version[16];
+       uint8_t         fw_version[16];
+       uint8_t         uboot_version[16];
+       uint8_t         fru_serial_num[32];
+
+       uint8_t         fc_port_count;
+       uint8_t         iscsi_port_count;
+       uint8_t         reserved1[2];
+
+       uint8_t         mode;
+       uint8_t         log_level;
+       uint8_t         reserved2[2];
+
+       uint32_t        log_size;
+
+       uint8_t         tgt_pres_mode;
+       uint8_t         iqn_flags;
+       uint8_t         lun_mapping;
+
+       uint64_t        adapter_id;
+
+       uint32_t        cluster_key_len;
+       uint8_t         cluster_key[10];
+
+       uint64_t        cluster_master_id;
+       uint64_t        cluster_slave_id;
+       uint8_t         cluster_flags;
+} __packed;
+
+#define FXDISC_GET_CONFIG_INFO         0x01
+#define FXDISC_GET_PORT_INFO           0x02
+#define FXDISC_GET_TGT_NODE_INFO       0x80
+#define FXDISC_GET_TGT_NODE_LIST       0x81
+#define FXDISC_REG_HOST_INFO           0x99
+
+#define QLAFX00_HBA_ICNTRL_REG         0x21B08
+#define QLAFX00_ICR_ENB_MASK            0x80000000
+#define QLAFX00_ICR_DIS_MASK            0x7fffffff
+#define QLAFX00_HST_RST_REG            0x18264
+#define QLAFX00_HST_TO_HBA_REG         0x20A04
+#define QLAFX00_HBA_TO_HOST_REG                0x21B70
+#define QLAFX00_HST_INT_STS_BITS       0x7
+#define QLAFX00_BAR1_BASE_ADDR_REG     0x40018
+#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG        0x41824
+
+#define QLAFX00_INTR_MB_CMPLT          0x1
+#define QLAFX00_INTR_RSP_CMPLT         0x2
+#define QLAFX00_INTR_MB_RSP_CMPLT      0x3
+#define QLAFX00_INTR_ASYNC_CMPLT       0x4
+#define QLAFX00_INTR_MB_ASYNC_CMPLT    0x5
+#define QLAFX00_INTR_RSP_ASYNC_CMPLT   0x6
+#define QLAFX00_INTR_ALL_CMPLT         0x7
+
+#define QLAFX00_MBA_SYSTEM_ERR         0x8002
+#define QLAFX00_MBA_LINK_UP            0x8011
+#define QLAFX00_MBA_LINK_DOWN          0x8012
+#define QLAFX00_MBA_PORT_UPDATE                0x8014
+#define QLAFX00_MBA_SHUTDOWN_RQSTD     0x8062
+
+#define SOC_SW_RST_CONTROL_REG_CORE0     0x0020800
+#define SOC_FABRIC_RST_CONTROL_REG       0x0020840
+#define SOC_FABRIC_CONTROL_REG           0x0020200
+#define SOC_FABRIC_CONFIG_REG            0x0020204
+
+#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG     0x0020B00
+#define SOC_CORE_TIMER_REG                     0x0021850
+#define SOC_IRQ_ACK_REG                        0x00218b4
+
+#define CONTINUE_A64_TYPE_FX00 0x03    /* Continuation entry. */
+
+#define QLAFX00_SET_HST_INTR(ha, value) \
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+       value)
+
+#define QLAFX00_CLR_HST_INTR(ha, value) \
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+       ~value)
+
+#define QLAFX00_RD_INTR_REG(ha) \
+       RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+
+#define QLAFX00_CLR_INTR_REG(ha, value) \
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+       ~value)
+
+#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
+       WRT_REG_DWORD((ha)->cregbase + off, val)
+
+#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
+       RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_HBA_RST_REG(ha, val)\
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+
+#define QLAFX00_RD_ICNTRL_REG(ha) \
+       RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+
+#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+       (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
+        QLAFX00_ICR_ENB_MASK))
+
+#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
+       WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+       (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
+        QLAFX00_ICR_DIS_MASK))
+
+#define QLAFX00_RD_REG(ha, off) \
+       RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_WR_REG(ha, off, val) \
+       WRT_REG_DWORD((ha)->cregbase + off, val)
+
+struct qla_mt_iocb_rqst_fx00 {
+       uint32_t reserved_0;
+
+       uint16_t func_type;
+       uint8_t flags;
+       uint8_t reserved_1;
+
+       uint32_t dataword;
+
+       uint32_t adapid;
+       uint32_t adapid_hi;
+
+       uint32_t dataword_extra;
+
+       uint32_t req_len;
+
+       uint32_t rsp_len;
+};
+
+struct qla_mt_iocb_rsp_fx00 {
+       uint32_t reserved_1;
+
+       uint16_t func_type;
+       uint16_t ioctl_flags;
+
+       uint32_t ioctl_data;
+
+       uint32_t adapid;
+       uint32_t adapid_hi;
+
+       uint32_t reserved_2;
+       uint32_t seq_number;
+
+       uint8_t reserved_3[20];
+
+       int32_t res_count;
+
+       uint32_t status;
+};
+
+
+#define MAILBOX_REGISTER_COUNT_FX00    16
+#define AEN_MAILBOX_REGISTER_COUNT_FX00        8
+#define MAX_FIBRE_DEVICES_FX00 512
+#define MAX_LUNS_FX00          0x1024
+#define MAX_TARGETS_FX00       MAX_ISA_DEVICES
+#define REQUEST_ENTRY_CNT_FX00         512     /* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_FX00                256     /* Number of response entries.*/
+
+/*
+ * Firmware state codes for QLAFX00 adapters
+ */
+#define FSTATE_FX00_CONFIG_WAIT     0x0000     /* Waiting for driver to issue
+                                                * Initialize FW Mbox cmd
+                                                */
+#define FSTATE_FX00_INITIALIZED     0x1000     /* FW has been initialized by
+                                                * the driver
+                                                */
+
+#define FX00_DEF_RATOV 10
+
+struct mr_data_fx00 {
+       uint8_t product_name[256];
+       uint8_t symbolic_name[64];
+       uint8_t serial_num[32];
+       uint8_t hw_version[16];
+       uint8_t fw_version[16];
+       uint8_t uboot_version[16];
+       uint8_t fru_serial_num[32];
+       fc_port_t       fcport;         /* fcport used for requests
+                                        * that are not linked
+                                        * to a particular target
+                                        */
+       uint8_t fw_hbt_en;
+       uint8_t fw_hbt_cnt;
+       uint8_t fw_hbt_miss_cnt;
+       uint32_t old_fw_hbt_cnt;
+       uint16_t fw_reset_timer_tick;
+       uint8_t fw_reset_timer_exp;
+       uint32_t old_aenmbx0_state;
+};
+
+#define QLAFX00_LOOP_DOWN_TIME         615     /* 600 */
+#define QLAFX00_HEARTBEAT_INTERVAL     6       /* number of seconds */
+#define QLAFX00_HEARTBEAT_MISS_CNT     3       /* number of miss */
+#define QLAFX00_RESET_INTERVAL         120     /* number of seconds */
+#define QLAFX00_MAX_RESET_INTERVAL     600     /* number of seconds */
+#endif
index 2c6dd3d..a083715 100644 (file)
@@ -47,6 +47,7 @@ MODULE_PARM_DESC(ql2xenableclass2,
                "Specify if Class 2 operations are supported from the very "
                "beginning. Default is 0 - class 2 not supported.");
 
+
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
@@ -354,7 +355,12 @@ fail_req_map:
 
 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 {
-       if (req && req->ring)
+       if (IS_QLAFX00(ha)) {
+               if (req && req->ring_fx00)
+                       dma_free_coherent(&ha->pdev->dev,
+                           (req->length_fx00 + 1) * sizeof(request_t),
+                           req->ring_fx00, req->dma_fx00);
+       } else if (req && req->ring)
                dma_free_coherent(&ha->pdev->dev,
                (req->length + 1) * sizeof(request_t),
                req->ring, req->dma);
@@ -368,11 +374,16 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 
 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
-       if (rsp && rsp->ring)
+       if (IS_QLAFX00(ha)) {
+               if (rsp && rsp->ring)
+                       dma_free_coherent(&ha->pdev->dev,
+                           (rsp->length_fx00 + 1) * sizeof(request_t),
+                           rsp->ring_fx00, rsp->dma_fx00);
+       } else if (rsp && rsp->ring) {
                dma_free_coherent(&ha->pdev->dev,
                (rsp->length + 1) * sizeof(response_t),
                rsp->ring, rsp->dma);
-
+       }
        kfree(rsp);
        rsp = NULL;
 }
@@ -633,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-static void
+void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -657,6 +668,9 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
        cmd->scsi_done(cmd);
 }
 
+/* If we are SP1 here, we need to still take and release the host_lock as SP1
+ * does not have the changes necessary to avoid taking host->host_lock.
+ */
 static int
 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 {
@@ -1304,6 +1318,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                }
        }
 
+       if (IS_QLAFX00(ha))
+               return QLA_SUCCESS;
+
        if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1858,6 +1875,7 @@ static struct isp_operations qla2100_isp_ops = {
        .start_scsi             = qla2x00_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla2300_isp_ops = {
@@ -1895,6 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
        .start_scsi             = qla2x00_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla24xx_isp_ops = {
@@ -1932,6 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
        .start_scsi             = qla24xx_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla25xx_isp_ops = {
@@ -1969,6 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
        .start_scsi             = qla24xx_dif_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla81xx_isp_ops = {
@@ -2006,6 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
        .start_scsi             = qla24xx_dif_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla2x00_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla82xx_isp_ops = {
@@ -2043,6 +2065,7 @@ static struct isp_operations qla82xx_isp_ops = {
        .start_scsi             = qla82xx_start_scsi,
        .abort_isp              = qla82xx_abort_isp,
        .iospace_config         = qla82xx_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla83xx_isp_ops = {
@@ -2080,6 +2103,45 @@ static struct isp_operations qla83xx_isp_ops = {
        .start_scsi             = qla24xx_dif_start_scsi,
        .abort_isp              = qla2x00_abort_isp,
        .iospace_config         = qla83xx_iospace_config,
+       .initialize_adapter     = qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qlafx00_isp_ops = {
+       .pci_config             = qlafx00_pci_config,
+       .reset_chip             = qlafx00_soft_reset,
+       .chip_diag              = qlafx00_chip_diag,
+       .config_rings           = qlafx00_config_rings,
+       .reset_adapter          = qlafx00_soft_reset,
+       .nvram_config           = NULL,
+       .update_fw_options      = NULL,
+       .load_risc              = NULL,
+       .pci_info_str           = qlafx00_pci_info_str,
+       .fw_version_str         = qlafx00_fw_version_str,
+       .intr_handler           = qlafx00_intr_handler,
+       .enable_intrs           = qlafx00_enable_intrs,
+       .disable_intrs          = qlafx00_disable_intrs,
+       .abort_command          = qlafx00_abort_command,
+       .target_reset           = qlafx00_abort_target,
+       .lun_reset              = qlafx00_lun_reset,
+       .fabric_login           = NULL,
+       .fabric_logout          = NULL,
+       .calc_req_entries       = NULL,
+       .build_iocbs            = NULL,
+       .prep_ms_iocb           = qla24xx_prep_ms_iocb,
+       .prep_ms_fdmi_iocb      = qla24xx_prep_ms_fdmi_iocb,
+       .read_nvram             = qla24xx_read_nvram_data,
+       .write_nvram            = qla24xx_write_nvram_data,
+       .fw_dump                = NULL,
+       .beacon_on              = qla24xx_beacon_on,
+       .beacon_off             = qla24xx_beacon_off,
+       .beacon_blink           = NULL,
+       .read_optrom            = qla24xx_read_optrom_data,
+       .write_optrom           = qla24xx_write_optrom_data,
+       .get_flash_version      = qla24xx_get_flash_version,
+       .start_scsi             = qlafx00_start_scsi,
+       .abort_isp              = qlafx00_abort_isp,
+       .iospace_config         = qlafx00_iospace_config,
+       .initialize_adapter     = qlafx00_initialize_adapter,
 };
 
 static inline void
@@ -2192,6 +2254,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_T10_PI;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
+       case PCI_DEVICE_ID_QLOGIC_ISPF001:
+               ha->device_type |= DT_ISPFX00;
+               break;
        }
 
        if (IS_QLA82XX(ha))
@@ -2265,7 +2330,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
-           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
                bars = pci_select_bars(pdev, IORESOURCE_MEM);
                mem_only = 1;
                ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2436,6 +2502,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
                ha->nvram_conf_off = ~0;
                ha->nvram_data_off = ~0;
+       }  else if (IS_QLAFX00(ha)) {
+               ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
+               ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
+               ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
+               req_length = REQUEST_ENTRY_CNT_FX00;
+               rsp_length = RESPONSE_ENTRY_CNT_FX00;
+               ha->init_cb_size = sizeof(struct init_cb_fx);
+               ha->isp_ops = &qlafx00_isp_ops;
+               ha->port_down_retry_count = 30; /* default value */
+               ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+               ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+               ha->mr.fw_hbt_en = 1;
        }
 
        ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2500,13 +2578,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        host = base_vha->host;
        base_vha->req = req;
-       host->can_queue = req->length + 128;
+       if (IS_QLAFX00(ha))
+               host->can_queue = 1024;
+       else
+               host->can_queue = req->length + 128;
        if (IS_QLA2XXX_MIDTYPE(ha))
                base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
        else
                base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
                                                base_vha->vp_idx;
 
+       /* Setup fcport template structure. */
+       ha->mr.fcport.vha = base_vha;
+       ha->mr.fcport.port_type = FCT_UNKNOWN;
+       ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
+       qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
+       ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
+       ha->mr.fcport.scan_state = 1;
+
        /* Set the SG table size based on ISP type */
        if (!IS_FWI2_CAPABLE(ha)) {
                if (IS_QLA2100(ha))
@@ -2562,6 +2651,13 @@ que_init:
        rsp->req = req;
        req->rsp = rsp;
 
+       if (IS_QLAFX00(ha)) {
+               ha->rsp_q_map[0] = rsp;
+               ha->req_q_map[0] = req;
+               set_bit(0, ha->req_qid_map);
+               set_bit(0, ha->rsp_qid_map);
+       }
+
        /* FWI2-capable only. */
        req->req_q_in = &ha->iobase->isp24.req_q_in;
        req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2574,6 +2670,13 @@ que_init:
                rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
        }
 
+       if (IS_QLAFX00(ha)) {
+               req->req_q_in = &ha->iobase->ispfx00.req_q_in;
+               req->req_q_out = &ha->iobase->ispfx00.req_q_out;
+               rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
+               rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
+       }
+
        if (IS_QLA82XX(ha)) {
                req->req_q_out = &ha->iobase->isp82.req_q_out[0];
                rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
@@ -2595,7 +2698,7 @@ que_init:
            "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
-       if (qla2x00_initialize_adapter(base_vha)) {
+       if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
                    "Failed to initialize adapter - Adapter flags %x.\n",
                    base_vha->device_flags);
@@ -2720,6 +2823,18 @@ skip_dpc:
 
        qla2x00_alloc_sysfs_attr(base_vha);
 
+       if (IS_QLAFX00(ha)) {
+               ret = qlafx00_fx_disc(base_vha,
+                       &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+
+               ret = qlafx00_fx_disc(base_vha,
+                       &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
+
+               /* Register system information */
+               ret =  qlafx00_fx_disc(base_vha,
+                       &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
+       }
+
        qla2x00_init_host_attr(base_vha);
 
        qla2x00_dfs_setup(base_vha);
@@ -2777,6 +2892,8 @@ iospace_config_failed:
        } else {
                if (ha->iobase)
                        iounmap(ha->iobase);
+               if (ha->cregbase)
+                       iounmap(ha->cregbase);
        }
        pci_release_selected_regions(ha->pdev, ha->bars);
        kfree(ha);
@@ -2960,6 +3077,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
                if (ha->iobase)
                        iounmap(ha->iobase);
 
+               if (ha->cregbase)
+                       iounmap(ha->cregbase);
+
                if (ha->mqiobase)
                        iounmap(ha->mqiobase);
 
@@ -3068,6 +3188,12 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
     int do_login, int defer)
 {
+       if (IS_QLAFX00(vha->hw)) {
+               qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+               qla2x00_schedule_rport_del(vha, fcport, defer);
+               return;
+       }
+
        if (atomic_read(&fcport->state) == FCS_ONLINE &&
            vha->vp_idx == fcport->vha->vp_idx) {
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
@@ -3710,6 +3836,22 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
        kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
 }
 
+int
+qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
+                       uint32_t *data, int cnt)
+{
+       struct qla_work_evt *e;
+
+       e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.aenfx.evtcode = evtcode;
+       e->u.aenfx.count = cnt;
+       memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
+       return qla2x00_post_work(vha, e);
+}
+
 void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
@@ -3758,6 +3900,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                case QLA_EVT_UEVENT:
                        qla2x00_uevent_emit(vha, e->u.uevent.code);
                        break;
+               case QLA_EVT_AENFX:
+                       qlafx00_process_aen(vha, e);
+                       break;
                }
                if (e->flags & QLA_EVT_FLAG_FREE)
                        kfree(e);
@@ -4592,6 +4737,38 @@ qla2x00_do_dpc(void *data)
                                ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
                                    "FCoE context reset end.\n");
                        }
+               } else if (IS_QLAFX00(ha)) {
+                       if (test_and_clear_bit(ISP_UNRECOVERABLE,
+                               &base_vha->dpc_flags)) {
+                               ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
+                                   "Firmware Reset Recovery\n");
+                               if (qlafx00_reset_initialize(base_vha)) {
+                                       /* Failed. Abort isp later. */
+                                       if (!test_bit(UNLOADING,
+                                           &base_vha->dpc_flags))
+                                               set_bit(ISP_UNRECOVERABLE,
+                                                   &base_vha->dpc_flags);
+                                               ql_dbg(ql_dbg_dpc, base_vha,
+                                                   0x4021,
+                                                   "Reset Recovery Failed\n");
+                               }
+                       }
+
+                       if (test_and_clear_bit(FX00_TARGET_SCAN,
+                               &base_vha->dpc_flags)) {
+                               ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
+                                   "ISPFx00 Target Scan scheduled\n");
+                               if (qlafx00_rescan_isp(base_vha)) {
+                                       if (!test_bit(UNLOADING,
+                                           &base_vha->dpc_flags))
+                                               set_bit(ISP_UNRECOVERABLE,
+                                                   &base_vha->dpc_flags);
+                                       ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
+                                           "ISPFx00 Target Scan Failed\n");
+                               }
+                               ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
+                                   "ISPFx00 Target Scan End\n");
+                       }
                }
 
                if (test_and_clear_bit(ISP_ABORT_NEEDED,
@@ -4630,6 +4807,9 @@ qla2x00_do_dpc(void *data)
                        clear_bit(SCR_PENDING, &base_vha->dpc_flags);
                }
 
+               if (IS_QLAFX00(ha))
+                       goto loop_resync_check;
+
                if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
                            "Quiescence mode scheduled.\n");
@@ -4654,7 +4834,7 @@ qla2x00_do_dpc(void *data)
                }
 
                if (test_and_clear_bit(RESET_MARKER_NEEDED,
-                                                       &base_vha->dpc_flags) &&
+                               &base_vha->dpc_flags) &&
                    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
 
                        ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
@@ -4677,9 +4857,9 @@ qla2x00_do_dpc(void *data)
                        ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
                            "Relogin end.\n");
                }
-
+loop_resync_check:
                if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
-                                                       &base_vha->dpc_flags)) {
+                   &base_vha->dpc_flags)) {
 
                        ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
                            "Loop resync scheduled.\n");
@@ -4697,6 +4877,9 @@ qla2x00_do_dpc(void *data)
                            "Loop resync end.\n");
                }
 
+               if (IS_QLAFX00(ha))
+                       goto intr_on_check;
+
                if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
                    atomic_read(&base_vha->loop_state) == LOOP_READY) {
                        clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
@@ -4714,7 +4897,7 @@ qla2x00_do_dpc(void *data)
                if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
                    &base_vha->dpc_flags))
                        qla2x00_host_ramp_up_queuedepth(base_vha);
-
+intr_on_check:
                if (!ha->interrupts_on)
                        ha->isp_ops->enable_intrs(ha);
 
@@ -4722,7 +4905,8 @@ qla2x00_do_dpc(void *data)
                                        &base_vha->dpc_flags))
                        ha->isp_ops->beacon_blink(base_vha);
 
-               qla2x00_do_dpc_all_vps(base_vha);
+               if (!IS_QLAFX00(ha))
+                       qla2x00_do_dpc_all_vps(base_vha);
 
                ha->dpc_active = 0;
 end_loop:
@@ -4818,6 +5002,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
                qla82xx_watchdog(vha);
        }
 
+       if (!vha->vp_idx && IS_QLAFX00(ha))
+               qlafx00_timer_routine(vha);
+
        /* Loop down handler. */
        if (atomic_read(&vha->loop_down_timer) > 0 &&
            !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@@ -5335,6 +5522,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);