[SCSI] lpfc 8.3.22: Add new mailbox command and new BSG fix
[cascardo/linux.git] / drivers / scsi / lpfc / lpfc_sli.c
index 2ee0374..84234a4 100644 (file)
@@ -10403,7 +10403,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
 
-
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
                return -ENOMEM;
@@ -10413,11 +10412,22 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
                         LPFC_MBOX_OPCODE_CQ_CREATE,
                         length, LPFC_SLI4_MBX_EMBED);
        cq_create = &mbox->u.mqe.un.cq_create;
+       shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
        bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
                    cq->page_count);
        bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
        bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
-       bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+       bf_set(lpfc_mbox_hdr_version, &shdr->request,
+              phba->sli4_hba.pc_sli4_params.cqv);
+       if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
+               bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+                      (PAGE_SIZE/SLI4_PAGE_SIZE));
+               bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
+                      eq->queue_id);
+       } else {
+               bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
+                      eq->queue_id);
+       }
        switch (cq->entry_count) {
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10449,7 +10459,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
 
        /* The IOCTL status is embedded in the mailbox subheader. */
-       shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
        if (shdr_status || shdr_add_status || rc) {
@@ -10515,20 +10524,20 @@ lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
        bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
        switch (mq->entry_count) {
        case 16:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-                      LPFC_MQ_CNT_16);
+               bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+                      LPFC_MQ_RING_SIZE_16);
                break;
        case 32:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-                      LPFC_MQ_CNT_32);
+               bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+                      LPFC_MQ_RING_SIZE_32);
                break;
        case 64:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-                      LPFC_MQ_CNT_64);
+               bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+                      LPFC_MQ_RING_SIZE_64);
                break;
        case 128:
-               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
-                      LPFC_MQ_CNT_128);
+               bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
+                      LPFC_MQ_RING_SIZE_128);
                break;
        }
        list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10586,6 +10595,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
                         length, LPFC_SLI4_MBX_EMBED);
 
        mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
+       shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
        bf_set(lpfc_mbx_mq_create_ext_num_pages,
               &mq_create_ext->u.request, mq->page_count);
        bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
@@ -10598,9 +10608,15 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
               &mq_create_ext->u.request, 1);
        bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
               &mq_create_ext->u.request, 1);
-       bf_set(lpfc_mq_context_cq_id,
-              &mq_create_ext->u.request.context, cq->queue_id);
        bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
+       bf_set(lpfc_mbox_hdr_version, &shdr->request,
+              phba->sli4_hba.pc_sli4_params.mqv);
+       if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
+               bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
+                      cq->queue_id);
+       else
+               bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
+                      cq->queue_id);
        switch (mq->entry_count) {
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -10610,20 +10626,24 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
                        return -EINVAL;
                /* otherwise default to smallest count (drop through) */
        case 16:
-               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-                      LPFC_MQ_CNT_16);
+               bf_set(lpfc_mq_context_ring_size,
+                      &mq_create_ext->u.request.context,
+                      LPFC_MQ_RING_SIZE_16);
                break;
        case 32:
-               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-                      LPFC_MQ_CNT_32);
+               bf_set(lpfc_mq_context_ring_size,
+                      &mq_create_ext->u.request.context,
+                      LPFC_MQ_RING_SIZE_32);
                break;
        case 64:
-               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-                      LPFC_MQ_CNT_64);
+               bf_set(lpfc_mq_context_ring_size,
+                      &mq_create_ext->u.request.context,
+                      LPFC_MQ_RING_SIZE_64);
                break;
        case 128:
-               bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context,
-                      LPFC_MQ_CNT_128);
+               bf_set(lpfc_mq_context_ring_size,
+                      &mq_create_ext->u.request.context,
+                      LPFC_MQ_RING_SIZE_128);
                break;
        }
        list_for_each_entry(dmabuf, &mq->page_list, list) {
@@ -10634,7 +10654,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
                                        putPaddrHigh(dmabuf->phys);
        }
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
-       shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
        mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
                              &mq_create_ext->u.response);
        if (rc != MBX_SUCCESS) {
@@ -10711,6 +10730,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
        uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+       struct dma_address *page;
 
        if (!phba->sli4_hba.pc_sli4_params.supported)
                hw_page_size = SLI4_PAGE_SIZE;
@@ -10724,20 +10744,42 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                         LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
                         length, LPFC_SLI4_MBX_EMBED);
        wq_create = &mbox->u.mqe.un.wq_create;
+       shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
        bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
                    wq->page_count);
        bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
                    cq->queue_id);
+       bf_set(lpfc_mbox_hdr_version, &shdr->request,
+              phba->sli4_hba.pc_sli4_params.wqv);
+       if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+               bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+                      wq->entry_count);
+               switch (wq->entry_size) {
+               default:
+               case 64:
+                       bf_set(lpfc_mbx_wq_create_wqe_size,
+                              &wq_create->u.request_1,
+                              LPFC_WQ_WQE_SIZE_64);
+                       break;
+               case 128:
+                       bf_set(lpfc_mbx_wq_create_wqe_size,
+                              &wq_create->u.request_1,
+                              LPFC_WQ_WQE_SIZE_128);
+                       break;
+               }
+               bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+                      (PAGE_SIZE/SLI4_PAGE_SIZE));
+               page = wq_create->u.request_1.page;
+       } else {
+               page = wq_create->u.request.page;
+       }
        list_for_each_entry(dmabuf, &wq->page_list, list) {
                memset(dmabuf->virt, 0, hw_page_size);
-               wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
-                                       putPaddrLow(dmabuf->phys);
-               wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
-                                       putPaddrHigh(dmabuf->phys);
+               page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
+               page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
        }
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        /* The IOCTL status is embedded in the mailbox subheader. */
-       shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
        if (shdr_status || shdr_add_status || rc) {
@@ -10815,37 +10857,51 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
                         length, LPFC_SLI4_MBX_EMBED);
        rq_create = &mbox->u.mqe.un.rq_create;
-       switch (hrq->entry_count) {
-       default:
-               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "2535 Unsupported RQ count. (%d)\n",
-                               hrq->entry_count);
-               if (hrq->entry_count < 512)
-                       return -EINVAL;
-               /* otherwise default to smallest count (drop through) */
-       case 512:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_512);
-               break;
-       case 1024:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_1024);
-               break;
-       case 2048:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_2048);
-               break;
-       case 4096:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_4096);
-               break;
+       shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+       bf_set(lpfc_mbox_hdr_version, &shdr->request,
+              phba->sli4_hba.pc_sli4_params.rqv);
+       if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+               bf_set(lpfc_rq_context_rqe_count_1,
+                      &rq_create->u.request.context,
+                      hrq->entry_count);
+               rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
+       } else {
+               switch (hrq->entry_count) {
+               default:
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2535 Unsupported RQ count. (%d)\n",
+                                       hrq->entry_count);
+                       if (hrq->entry_count < 512)
+                               return -EINVAL;
+                       /* otherwise default to smallest count (drop through) */
+               case 512:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_512);
+                       break;
+               case 1024:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_1024);
+                       break;
+               case 2048:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_2048);
+                       break;
+               case 4096:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_4096);
+                       break;
+               }
+               bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+                      LPFC_HDR_BUF_SIZE);
        }
        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
               cq->queue_id);
        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
               hrq->page_count);
-       bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-              LPFC_HDR_BUF_SIZE);
        list_for_each_entry(dmabuf, &hrq->page_list, list) {
                memset(dmabuf->virt, 0, hw_page_size);
                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
@@ -10855,7 +10911,6 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        }
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
        /* The IOCTL status is embedded in the mailbox subheader. */
-       shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
        shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
        shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
        if (shdr_status || shdr_add_status || rc) {
@@ -10881,37 +10936,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
        lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
                         LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
                         length, LPFC_SLI4_MBX_EMBED);
-       switch (drq->entry_count) {
-       default:
-               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "2536 Unsupported RQ count. (%d)\n",
-                               drq->entry_count);
-               if (drq->entry_count < 512)
-                       return -EINVAL;
-               /* otherwise default to smallest count (drop through) */
-       case 512:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_512);
-               break;
-       case 1024:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_1024);
-               break;
-       case 2048:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_2048);
-               break;
-       case 4096:
-               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
-                      LPFC_RQ_RING_SIZE_4096);
-               break;
+       bf_set(lpfc_mbox_hdr_version, &shdr->request,
+              phba->sli4_hba.pc_sli4_params.rqv);
+       if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
+               bf_set(lpfc_rq_context_rqe_count_1,
+                      &rq_create->u.request.context,
+                      hrq->entry_count);
+               rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
+       } else {
+               switch (drq->entry_count) {
+               default:
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2536 Unsupported RQ count. (%d)\n",
+                                       drq->entry_count);
+                       if (drq->entry_count < 512)
+                               return -EINVAL;
+                       /* otherwise default to smallest count (drop through) */
+               case 512:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_512);
+                       break;
+               case 1024:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_1024);
+                       break;
+               case 2048:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_2048);
+                       break;
+               case 4096:
+                       bf_set(lpfc_rq_context_rqe_count,
+                              &rq_create->u.request.context,
+                              LPFC_RQ_RING_SIZE_4096);
+                       break;
+               }
+               bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+                      LPFC_DATA_BUF_SIZE);
        }
        bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
               cq->queue_id);
        bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
               drq->page_count);
-       bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
-              LPFC_DATA_BUF_SIZE);
        list_for_each_entry(dmabuf, &drq->page_list, list) {
                rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
                                        putPaddrLow(dmabuf->phys);