Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2016 19:04:17 +0000 (12:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2016 19:04:17 +0000 (12:04 -0700)
Pull SCSI target updates from Nicholas Bellinger:
 "Here are the outstanding target pending updates for v4.7-rc1.

  The highlights this round include:

   - Allow external PR/ALUA metadata path be defined at runtime via top
     level configfs attribute (Lee)
   - Fix target session shutdown bug for ib_srpt multi-channel (hch)
   - Make TFO close_session() and shutdown_session() optional (hch)
   - Drop se_sess->sess_kref + convert tcm_qla2xxx to internal kref
     (hch)
   - Add tcm_qla2xxx endpoint attribute for basic FC jammer (Laurence)
   - Refactor iscsi-target RX/TX PDU encode/decode into common code
     (Varun)
   - Extend iscsit_transport with xmit_pdu, release_cmd, get_rx_pdu,
     validate_parameters, and get_r2t_ttt for generic ISO offload
     (Varun)
   - Initial merge of cxgb iscsi-segment offload target driver (Varun)

  The bulk of the changes are Chelsio's new driver, along with a number
  of iscsi-target common code improvements made by Varun + Co along the
  way"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (29 commits)
  iscsi-target: Fix early sk_data_ready LOGIN_FLAGS_READY race
  cxgbit: Use type ISCSI_CXGBIT + cxgbit tpg_np attribute
  iscsi-target: Convert transport drivers to signal rdma_shutdown
  iscsi-target: Make iscsi_tpg_np driver show/store use generic code
  tcm_qla2xxx Add SCSI command jammer/discard capability
  iscsi-target: graceful disconnect on invalid mapping to iovec
  target: need_to_release is always false, remove redundant check and kfree
  target: remove sess_kref and ->shutdown_session
  iscsi-target: remove usage of ->shutdown_session
  tcm_qla2xxx: introduce a private sess_kref
  target: make close_session optional
  target: make ->shutdown_session optional
  target: remove acl_stop
  target: consolidate and fix session shutdown
  cxgbit: add files for cxgbit.ko
  iscsi-target: export symbols
  iscsi-target: call complete on conn_logout_comp
  iscsi-target: clear tx_thread_active
  iscsi-target: add new offload transport type
  iscsi-target: use conn_transport->transport_type in text rsp
  ...

1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_transport.c
include/target/target_core_fabric.h

@@@ -33,8 -33,7 +33,8 @@@
  
  #define       ISERT_MAX_CONN          8
  #define ISER_MAX_RX_CQ_LEN    (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
 -#define ISER_MAX_TX_CQ_LEN    (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
 +#define ISER_MAX_TX_CQ_LEN \
 +      ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
  #define ISER_MAX_CQ_LEN               (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
                                 ISERT_MAX_CONN)
  
@@@ -47,6 -46,14 +47,6 @@@ static LIST_HEAD(device_list)
  static struct workqueue_struct *isert_comp_wq;
  static struct workqueue_struct *isert_release_wq;
  
 -static void
 -isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 -static int
 -isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
 -static void
 -isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 -static int
 -isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
  static int
  isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
  static int
@@@ -135,7 -142,6 +135,7 @@@ isert_create_qp(struct isert_conn *iser
        attr.recv_cq = comp->cq;
        attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
 +      attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
        attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
        isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
                                  device->ib_device->attrs.max_sge_rd);
@@@ -264,9 -270,9 +264,9 @@@ isert_alloc_comps(struct isert_device *
                                 device->ib_device->num_comp_vectors));
  
        isert_info("Using %d CQs, %s supports %d vectors support "
 -                 "Fast registration %d pi_capable %d\n",
 +                 "pi_capable %d\n",
                   device->comps_used, device->ib_device->name,
 -                 device->ib_device->num_comp_vectors, device->use_fastreg,
 +                 device->ib_device->num_comp_vectors,
                   device->pi_capable);
  
        device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
@@@ -307,6 -313,18 +307,6 @@@ isert_create_device_ib_res(struct isert
        isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
        isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
  
 -      /* asign function handlers */
 -      if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
 -          ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
 -              device->use_fastreg = 1;
 -              device->reg_rdma_mem = isert_reg_rdma;
 -              device->unreg_rdma_mem = isert_unreg_rdma;
 -      } else {
 -              device->use_fastreg = 0;
 -              device->reg_rdma_mem = isert_map_rdma;
 -              device->unreg_rdma_mem = isert_unmap_cmd;
 -      }
 -
        ret = isert_alloc_comps(device);
        if (ret)
                goto out;
@@@ -398,6 -416,146 +398,6 @@@ isert_device_get(struct rdma_cm_id *cma
        return device;
  }
  
 -static void
 -isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
 -{
 -      struct fast_reg_descriptor *fr_desc, *tmp;
 -      int i = 0;
 -
 -      if (list_empty(&isert_conn->fr_pool))
 -              return;
 -
 -      isert_info("Freeing conn %p fastreg pool", isert_conn);
 -
 -      list_for_each_entry_safe(fr_desc, tmp,
 -                               &isert_conn->fr_pool, list) {
 -              list_del(&fr_desc->list);
 -              ib_dereg_mr(fr_desc->data_mr);
 -              if (fr_desc->pi_ctx) {
 -                      ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
 -                      ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
 -                      kfree(fr_desc->pi_ctx);
 -              }
 -              kfree(fr_desc);
 -              ++i;
 -      }
 -
 -      if (i < isert_conn->fr_pool_size)
 -              isert_warn("Pool still has %d regions registered\n",
 -                      isert_conn->fr_pool_size - i);
 -}
 -
 -static int
 -isert_create_pi_ctx(struct fast_reg_descriptor *desc,
 -                  struct ib_device *device,
 -                  struct ib_pd *pd)
 -{
 -      struct pi_context *pi_ctx;
 -      int ret;
 -
 -      pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
 -      if (!pi_ctx) {
 -              isert_err("Failed to allocate pi context\n");
 -              return -ENOMEM;
 -      }
 -
 -      pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
 -                                    ISCSI_ISER_SG_TABLESIZE);
 -      if (IS_ERR(pi_ctx->prot_mr)) {
 -              isert_err("Failed to allocate prot frmr err=%ld\n",
 -                        PTR_ERR(pi_ctx->prot_mr));
 -              ret = PTR_ERR(pi_ctx->prot_mr);
 -              goto err_pi_ctx;
 -      }
 -      desc->ind |= ISERT_PROT_KEY_VALID;
 -
 -      pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
 -      if (IS_ERR(pi_ctx->sig_mr)) {
 -              isert_err("Failed to allocate signature enabled mr err=%ld\n",
 -                        PTR_ERR(pi_ctx->sig_mr));
 -              ret = PTR_ERR(pi_ctx->sig_mr);
 -              goto err_prot_mr;
 -      }
 -
 -      desc->pi_ctx = pi_ctx;
 -      desc->ind |= ISERT_SIG_KEY_VALID;
 -      desc->ind &= ~ISERT_PROTECTED;
 -
 -      return 0;
 -
 -err_prot_mr:
 -      ib_dereg_mr(pi_ctx->prot_mr);
 -err_pi_ctx:
 -      kfree(pi_ctx);
 -
 -      return ret;
 -}
 -
 -static int
 -isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
 -                   struct fast_reg_descriptor *fr_desc)
 -{
 -      fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
 -                                     ISCSI_ISER_SG_TABLESIZE);
 -      if (IS_ERR(fr_desc->data_mr)) {
 -              isert_err("Failed to allocate data frmr err=%ld\n",
 -                        PTR_ERR(fr_desc->data_mr));
 -              return PTR_ERR(fr_desc->data_mr);
 -      }
 -      fr_desc->ind |= ISERT_DATA_KEY_VALID;
 -
 -      isert_dbg("Created fr_desc %p\n", fr_desc);
 -
 -      return 0;
 -}
 -
 -static int
 -isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
 -{
 -      struct fast_reg_descriptor *fr_desc;
 -      struct isert_device *device = isert_conn->device;
 -      struct se_session *se_sess = isert_conn->conn->sess->se_sess;
 -      struct se_node_acl *se_nacl = se_sess->se_node_acl;
 -      int i, ret, tag_num;
 -      /*
 -       * Setup the number of FRMRs based upon the number of tags
 -       * available to session in iscsi_target_locate_portal().
 -       */
 -      tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
 -      tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
 -
 -      isert_conn->fr_pool_size = 0;
 -      for (i = 0; i < tag_num; i++) {
 -              fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
 -              if (!fr_desc) {
 -                      isert_err("Failed to allocate fast_reg descriptor\n");
 -                      ret = -ENOMEM;
 -                      goto err;
 -              }
 -
 -              ret = isert_create_fr_desc(device->ib_device,
 -                                         device->pd, fr_desc);
 -              if (ret) {
 -                      isert_err("Failed to create fastreg descriptor err=%d\n",
 -                             ret);
 -                      kfree(fr_desc);
 -                      goto err;
 -              }
 -
 -              list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
 -              isert_conn->fr_pool_size++;
 -      }
 -
 -      isert_dbg("Creating conn %p fastreg pool size=%d",
 -               isert_conn, isert_conn->fr_pool_size);
 -
 -      return 0;
 -
 -err:
 -      isert_conn_free_fastreg_pool(isert_conn);
 -      return ret;
 -}
 -
  static void
  isert_init_conn(struct isert_conn *isert_conn)
  {
        init_completion(&isert_conn->login_req_comp);
        kref_init(&isert_conn->kref);
        mutex_init(&isert_conn->mutex);
 -      spin_lock_init(&isert_conn->pool_lock);
 -      INIT_LIST_HEAD(&isert_conn->fr_pool);
        INIT_WORK(&isert_conn->release_work, isert_release_work);
  }
  
@@@ -579,6 -739,9 +579,6 @@@ isert_connect_release(struct isert_con
  
        BUG_ON(!device);
  
 -      if (device->use_fastreg)
 -              isert_conn_free_fastreg_pool(isert_conn);
 -
        isert_free_rx_descriptors(isert_conn);
        if (isert_conn->cm_id)
                rdma_destroy_id(isert_conn->cm_id);
@@@ -917,6 -1080,7 +917,6 @@@ isert_init_send_wr(struct isert_conn *i
  {
        struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
  
 -      isert_cmd->iser_ib_op = ISER_IB_SEND;
        tx_desc->tx_cqe.done = isert_send_done;
        send_wr->wr_cqe = &tx_desc->tx_cqe;
  
@@@ -996,6 -1160,16 +996,6 @@@ isert_put_login_tx(struct iscsi_conn *c
        }
        if (!login->login_failed) {
                if (login->login_complete) {
 -                      if (!conn->sess->sess_ops->SessionType &&
 -                          isert_conn->device->use_fastreg) {
 -                              ret = isert_conn_create_fastreg_pool(isert_conn);
 -                              if (ret) {
 -                                      isert_err("Conn: %p failed to create"
 -                                             " fastreg pool\n", isert_conn);
 -                                      return ret;
 -                              }
 -                      }
 -
                        ret = isert_alloc_rx_descriptors(isert_conn);
                        if (ret)
                                return ret;
@@@ -1459,26 -1633,97 +1459,26 @@@ isert_login_recv_done(struct ib_cq *cq
                                ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
  }
  
 -static int
 -isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 -                 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
 -                 enum iser_ib_op_code op, struct isert_data_buf *data)
 -{
 -      struct ib_device *ib_dev = isert_conn->cm_id->device;
 -
 -      data->dma_dir = op == ISER_IB_RDMA_WRITE ?
 -                            DMA_TO_DEVICE : DMA_FROM_DEVICE;
 -
 -      data->len = length - offset;
 -      data->offset = offset;
 -      data->sg_off = data->offset / PAGE_SIZE;
 -
 -      data->sg = &sg[data->sg_off];
 -      data->nents = min_t(unsigned int, nents - data->sg_off,
 -                                        ISCSI_ISER_SG_TABLESIZE);
 -      data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
 -                                      PAGE_SIZE);
 -
 -      data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
 -                                      data->dma_dir);
 -      if (unlikely(!data->dma_nents)) {
 -              isert_err("Cmd: unable to dma map SGs %p\n", sg);
 -              return -EINVAL;
 -      }
 -
 -      isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
 -                isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
 -
 -      return 0;
 -}
 -
 -static void
 -isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
 -{
 -      struct ib_device *ib_dev = isert_conn->cm_id->device;
 -
 -      ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
 -      memset(data, 0, sizeof(*data));
 -}
 -
 -
 -
  static void
 -isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 +isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
  {
 -      isert_dbg("Cmd %p\n", isert_cmd);
 -
 -      if (isert_cmd->data.sg) {
 -              isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
 -              isert_unmap_data_buf(isert_conn, &isert_cmd->data);
 -      }
 +      struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
 +      enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
  
 -      if (isert_cmd->rdma_wr) {
 -              isert_dbg("Cmd %p free send_wr\n", isert_cmd);
 -              kfree(isert_cmd->rdma_wr);
 -              isert_cmd->rdma_wr = NULL;
 -      }
 -
 -      if (isert_cmd->ib_sge) {
 -              isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
 -              kfree(isert_cmd->ib_sge);
 -              isert_cmd->ib_sge = NULL;
 -      }
 -}
 -
 -static void
 -isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 -{
 -      isert_dbg("Cmd %p\n", isert_cmd);
 -
 -      if (isert_cmd->fr_desc) {
 -              isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
 -              if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
 -                      isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
 -                      isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 -              }
 -              spin_lock_bh(&isert_conn->pool_lock);
 -              list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
 -              spin_unlock_bh(&isert_conn->pool_lock);
 -              isert_cmd->fr_desc = NULL;
 -      }
 +      if (!cmd->rw.nr_ops)
 +              return;
  
 -      if (isert_cmd->data.sg) {
 -              isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
 -              isert_unmap_data_buf(isert_conn, &isert_cmd->data);
 +      if (isert_prot_cmd(conn, se_cmd)) {
 +              rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
 +                              conn->cm_id->port_num, se_cmd->t_data_sg,
 +                              se_cmd->t_data_nents, se_cmd->t_prot_sg,
 +                              se_cmd->t_prot_nents, dir);
 +      } else {
 +              rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
 +                              se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
        }
  
 -      isert_cmd->ib_sge = NULL;
 -      isert_cmd->rdma_wr = NULL;
 +      cmd->rw.nr_ops = 0;
  }
  
  static void
@@@ -1487,6 -1732,7 +1487,6 @@@ isert_put_cmd(struct isert_cmd *isert_c
        struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct iscsi_conn *conn = isert_conn->conn;
 -      struct isert_device *device = isert_conn->device;
        struct iscsi_text_rsp *hdr;
  
        isert_dbg("Cmd %p\n", isert_cmd);
                        }
                }
  
 -              device->unreg_rdma_mem(isert_cmd, isert_conn);
 +              isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
                transport_generic_free_cmd(&cmd->se_cmd, 0);
                break;
        case ISCSI_OP_SCSI_TMFUNC:
@@@ -1648,9 -1894,14 +1648,9 @@@ isert_rdma_write_done(struct ib_cq *cq
  
        isert_dbg("Cmd %p\n", isert_cmd);
  
 -      if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
 -              ret = isert_check_pi_status(cmd,
 -                              isert_cmd->fr_desc->pi_ctx->sig_mr);
 -              isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 -      }
 +      ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
 +      isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
  
 -      device->unreg_rdma_mem(isert_cmd, isert_conn);
 -      isert_cmd->rdma_wr_num = 0;
        if (ret)
                transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
        else
@@@ -1678,12 -1929,16 +1678,12 @@@ isert_rdma_read_done(struct ib_cq *cq, 
  
        isert_dbg("Cmd %p\n", isert_cmd);
  
 -      if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
 -              ret = isert_check_pi_status(se_cmd,
 -                                          isert_cmd->fr_desc->pi_ctx->sig_mr);
 -              isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
 -      }
 -
        iscsit_stop_dataout_timer(cmd);
 -      device->unreg_rdma_mem(isert_cmd, isert_conn);
 -      cmd->write_data_done = isert_cmd->data.len;
 -      isert_cmd->rdma_wr_num = 0;
 +
 +      if (isert_prot_cmd(isert_conn, se_cmd))
 +              ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
 +      isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
 +      cmd->write_data_done = 0;
  
        isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
        spin_lock_bh(&cmd->istate_lock);
@@@ -1856,6 -2111,7 +1856,6 @@@ isert_aborted_task(struct iscsi_conn *c
  {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
 -      struct isert_device *device = isert_conn->device;
  
        spin_lock_bh(&conn->cmd_lock);
        if (!list_empty(&cmd->i_conn_node))
  
        if (cmd->data_direction == DMA_TO_DEVICE)
                iscsit_stop_dataout_timer(cmd);
 -
 -      device->unreg_rdma_mem(isert_cmd, isert_conn);
 +      isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
  }
  
  static enum target_prot_op
@@@ -2017,6 -2274,234 +2017,6 @@@ isert_put_text_rsp(struct iscsi_cmd *cm
        return isert_post_response(isert_conn, isert_cmd);
  }
  
 -static int
 -isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 -                  struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
 -                  u32 data_left, u32 offset)
 -{
 -      struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 -      struct scatterlist *sg_start, *tmp_sg;
 -      struct isert_device *device = isert_conn->device;
 -      struct ib_device *ib_dev = device->ib_device;
 -      u32 sg_off, page_off;
 -      int i = 0, sg_nents;
 -
 -      sg_off = offset / PAGE_SIZE;
 -      sg_start = &cmd->se_cmd.t_data_sg[sg_off];
 -      sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
 -      page_off = offset % PAGE_SIZE;
 -
 -      rdma_wr->wr.sg_list = ib_sge;
 -      rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
 -
 -      /*
 -       * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
 -       */
 -      for_each_sg(sg_start, tmp_sg, sg_nents, i) {
 -              isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
 -                        "page_off: %u\n",
 -                        (unsigned long long)tmp_sg->dma_address,
 -                        tmp_sg->length, page_off);
 -
 -              ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
 -              ib_sge->length = min_t(u32, data_left,
 -                              ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
 -              ib_sge->lkey = device->pd->local_dma_lkey;
 -
 -              isert_dbg("RDMA ib_sge: addr: 0x%llx  length: %u lkey: %x\n",
 -                        ib_sge->addr, ib_sge->length, ib_sge->lkey);
 -              page_off = 0;
 -              data_left -= ib_sge->length;
 -              if (!data_left)
 -                      break;
 -              ib_sge++;
 -              isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
 -      }
 -
 -      rdma_wr->wr.num_sge = ++i;
 -      isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
 -                rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
 -
 -      return rdma_wr->wr.num_sge;
 -}
 -
 -static int
 -isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
 -{
 -      struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 -      struct se_cmd *se_cmd = &cmd->se_cmd;
 -      struct isert_conn *isert_conn = conn->context;
 -      struct isert_data_buf *data = &isert_cmd->data;
 -      struct ib_rdma_wr *rdma_wr;
 -      struct ib_sge *ib_sge;
 -      u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
 -      int ret = 0, i, ib_sge_cnt;
 -
 -      offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
 -                      cmd->write_data_done : 0;
 -      ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 -                               se_cmd->t_data_nents, se_cmd->data_length,
 -                               offset, isert_cmd->iser_ib_op,
 -                               &isert_cmd->data);
 -      if (ret)
 -              return ret;
 -
 -      data_left = data->len;
 -      offset = data->offset;
 -
 -      ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
 -      if (!ib_sge) {
 -              isert_warn("Unable to allocate ib_sge\n");
 -              ret = -ENOMEM;
 -              goto unmap_cmd;
 -      }
 -      isert_cmd->ib_sge = ib_sge;
 -
 -      isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
 -      isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
 -                      isert_cmd->rdma_wr_num, GFP_KERNEL);
 -      if (!isert_cmd->rdma_wr) {
 -              isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
 -              ret = -ENOMEM;
 -              goto unmap_cmd;
 -      }
 -
 -      rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
 -
 -      for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
 -              rdma_wr = &isert_cmd->rdma_wr[i];
 -              data_len = min(data_left, rdma_write_max);
 -
 -              rdma_wr->wr.send_flags = 0;
 -              if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
 -                      isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
 -
 -                      rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 -                      rdma_wr->remote_addr = isert_cmd->read_va + offset;
 -                      rdma_wr->rkey = isert_cmd->read_stag;
 -                      if (i + 1 == isert_cmd->rdma_wr_num)
 -                              rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
 -                      else
 -                              rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
 -              } else {
 -                      isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
 -
 -                      rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 -                      rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
 -                      rdma_wr->rkey = isert_cmd->write_stag;
 -                      if (i + 1 == isert_cmd->rdma_wr_num)
 -                              rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
 -                      else
 -                              rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
 -              }
 -
 -              ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
 -                                      rdma_wr, data_len, offset);
 -              ib_sge += ib_sge_cnt;
 -
 -              offset += data_len;
 -              va_offset += data_len;
 -              data_left -= data_len;
 -      }
 -
 -      return 0;
 -unmap_cmd:
 -      isert_unmap_data_buf(isert_conn, data);
 -
 -      return ret;
 -}
 -
 -static inline void
 -isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
 -{
 -      u32 rkey;
 -
 -      memset(inv_wr, 0, sizeof(*inv_wr));
 -      inv_wr->wr_cqe = NULL;
 -      inv_wr->opcode = IB_WR_LOCAL_INV;
 -      inv_wr->ex.invalidate_rkey = mr->rkey;
 -
 -      /* Bump the key */
 -      rkey = ib_inc_rkey(mr->rkey);
 -      ib_update_fast_reg_key(mr, rkey);
 -}
 -
 -static int
 -isert_fast_reg_mr(struct isert_conn *isert_conn,
 -                struct fast_reg_descriptor *fr_desc,
 -                struct isert_data_buf *mem,
 -                enum isert_indicator ind,
 -                struct ib_sge *sge)
 -{
 -      struct isert_device *device = isert_conn->device;
 -      struct ib_device *ib_dev = device->ib_device;
 -      struct ib_mr *mr;
 -      struct ib_reg_wr reg_wr;
 -      struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
 -      int ret, n;
 -
 -      if (mem->dma_nents == 1) {
 -              sge->lkey = device->pd->local_dma_lkey;
 -              sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
 -              sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
 -              isert_dbg("sge: addr: 0x%llx  length: %u lkey: %x\n",
 -                       sge->addr, sge->length, sge->lkey);
 -              return 0;
 -      }
 -
 -      if (ind == ISERT_DATA_KEY_VALID)
 -              /* Registering data buffer */
 -              mr = fr_desc->data_mr;
 -      else
 -              /* Registering protection buffer */
 -              mr = fr_desc->pi_ctx->prot_mr;
 -
 -      if (!(fr_desc->ind & ind)) {
 -              isert_inv_rkey(&inv_wr, mr);
 -              wr = &inv_wr;
 -      }
 -
 -      n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE);
 -      if (unlikely(n != mem->nents)) {
 -              isert_err("failed to map mr sg (%d/%d)\n",
 -                       n, mem->nents);
 -              return n < 0 ? n : -EINVAL;
 -      }
 -
 -      isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
 -                fr_desc, mem->nents, mem->offset);
 -
 -      reg_wr.wr.next = NULL;
 -      reg_wr.wr.opcode = IB_WR_REG_MR;
 -      reg_wr.wr.wr_cqe = NULL;
 -      reg_wr.wr.send_flags = 0;
 -      reg_wr.wr.num_sge = 0;
 -      reg_wr.mr = mr;
 -      reg_wr.key = mr->lkey;
 -      reg_wr.access = IB_ACCESS_LOCAL_WRITE;
 -
 -      if (!wr)
 -              wr = &reg_wr.wr;
 -      else
 -              wr->next = &reg_wr.wr;
 -
 -      ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
 -      if (ret) {
 -              isert_err("fast registration failed, ret:%d\n", ret);
 -              return ret;
 -      }
 -      fr_desc->ind &= ~ind;
 -
 -      sge->lkey = mr->lkey;
 -      sge->addr = mr->iova;
 -      sge->length = mr->length;
 -
 -      isert_dbg("sge: addr: 0x%llx  length: %u lkey: %x\n",
 -                sge->addr, sge->length, sge->lkey);
 -
 -      return ret;
 -}
 -
  static inline void
  isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
                     struct ib_sig_domain *domain)
  static int
  isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
  {
 +      memset(sig_attrs, 0, sizeof(*sig_attrs));
 +
        switch (se_cmd->prot_op) {
        case TARGET_PROT_DIN_INSERT:
        case TARGET_PROT_DOUT_STRIP:
                return -EINVAL;
        }
  
 +      sig_attrs->check_mask =
 +             (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
 +             (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
 +             (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
        return 0;
  }
  
 -static inline u8
 -isert_set_prot_checks(u8 prot_checks)
 -{
 -      return (prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
 -             (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
 -             (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
 -}
 -
  static int
 -isert_reg_sig_mr(struct isert_conn *isert_conn,
 -               struct isert_cmd *isert_cmd,
 -               struct fast_reg_descriptor *fr_desc)
 -{
 -      struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
 -      struct ib_sig_handover_wr sig_wr;
 -      struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
 -      struct pi_context *pi_ctx = fr_desc->pi_ctx;
 -      struct ib_sig_attrs sig_attrs;
 +isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
 +              struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
 +{
 +      struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
 +      enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
 +      u8 port_num = conn->cm_id->port_num;
 +      u64 addr;
 +      u32 rkey, offset;
        int ret;
  
 -      memset(&sig_attrs, 0, sizeof(sig_attrs));
 -      ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
 -      if (ret)
 -              goto err;
 -
 -      sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
 -
 -      if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
 -              isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
 -              wr = &inv_wr;
 -      }
 -
 -      memset(&sig_wr, 0, sizeof(sig_wr));
 -      sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
 -      sig_wr.wr.wr_cqe = NULL;
 -      sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
 -      sig_wr.wr.num_sge = 1;
 -      sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
 -      sig_wr.sig_attrs = &sig_attrs;
 -      sig_wr.sig_mr = pi_ctx->sig_mr;
 -      if (se_cmd->t_prot_sg)
 -              sig_wr.prot = &isert_cmd->ib_sg[PROT];
 -
 -      if (!wr)
 -              wr = &sig_wr.wr;
 -      else
 -              wr->next = &sig_wr.wr;
 -
 -      ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
 -      if (ret) {
 -              isert_err("fast registration failed, ret:%d\n", ret);
 -              goto err;
 -      }
 -      fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
 -
 -      isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
 -      isert_cmd->ib_sg[SIG].addr = 0;
 -      isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
 -      if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
 -          se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
 -              /*
 -               * We have protection guards on the wire
 -               * so we need to set a larget transfer
 -               */
 -              isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
 -
 -      isert_dbg("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
 -                isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
 -                isert_cmd->ib_sg[SIG].lkey);
 -err:
 -      return ret;
 -}
 -
 -static int
 -isert_handle_prot_cmd(struct isert_conn *isert_conn,
 -                    struct isert_cmd *isert_cmd)
 -{
 -      struct isert_device *device = isert_conn->device;
 -      struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
 -      int ret;
 -
 -      if (!isert_cmd->fr_desc->pi_ctx) {
 -              ret = isert_create_pi_ctx(isert_cmd->fr_desc,
 -                                        device->ib_device,
 -                                        device->pd);
 -              if (ret) {
 -                      isert_err("conn %p failed to allocate pi_ctx\n",
 -                                isert_conn);
 -                      return ret;
 -              }
 -      }
 -
 -      if (se_cmd->t_prot_sg) {
 -              ret = isert_map_data_buf(isert_conn, isert_cmd,
 -                                       se_cmd->t_prot_sg,
 -                                       se_cmd->t_prot_nents,
 -                                       se_cmd->prot_length,
 -                                       0,
 -                                       isert_cmd->iser_ib_op,
 -                                       &isert_cmd->prot);
 -              if (ret) {
 -                      isert_err("conn %p failed to map protection buffer\n",
 -                                isert_conn);
 -                      return ret;
 -              }
 -
 -              memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
 -              ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
 -                                      &isert_cmd->prot,
 -                                      ISERT_PROT_KEY_VALID,
 -                                      &isert_cmd->ib_sg[PROT]);
 -              if (ret) {
 -                      isert_err("conn %p failed to fast reg mr\n",
 -                                isert_conn);
 -                      goto unmap_prot_cmd;
 -              }
 -      }
 -
 -      ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
 -      if (ret) {
 -              isert_err("conn %p failed to fast reg mr\n",
 -                        isert_conn);
 -              goto unmap_prot_cmd;
 -      }
 -      isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
 -
 -      return 0;
 -
 -unmap_prot_cmd:
 -      if (se_cmd->t_prot_sg)
 -              isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
 -
 -      return ret;
 -}
 -
 -static int
 -isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
 -{
 -      struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 -      struct se_cmd *se_cmd = &cmd->se_cmd;
 -      struct isert_conn *isert_conn = conn->context;
 -      struct fast_reg_descriptor *fr_desc = NULL;
 -      struct ib_rdma_wr *rdma_wr;
 -      struct ib_sge *ib_sg;
 -      u32 offset;
 -      int ret = 0;
 -      unsigned long flags;
 -
 -      offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
 -                      cmd->write_data_done : 0;
 -      ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
 -                               se_cmd->t_data_nents, se_cmd->data_length,
 -                               offset, isert_cmd->iser_ib_op,
 -                               &isert_cmd->data);
 -      if (ret)
 -              return ret;
 -
 -      if (isert_cmd->data.dma_nents != 1 ||
 -          isert_prot_cmd(isert_conn, se_cmd)) {
 -              spin_lock_irqsave(&isert_conn->pool_lock, flags);
 -              fr_desc = list_first_entry(&isert_conn->fr_pool,
 -                                         struct fast_reg_descriptor, list);
 -              list_del(&fr_desc->list);
 -              spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
 -              isert_cmd->fr_desc = fr_desc;
 -      }
 -
 -      ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
 -                              ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
 -      if (ret)
 -              goto unmap_cmd;
 -
 -      if (isert_prot_cmd(isert_conn, se_cmd)) {
 -              ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
 -              if (ret)
 -                      goto unmap_cmd;
 -
 -              ib_sg = &isert_cmd->ib_sg[SIG];
 +      if (dir == DMA_FROM_DEVICE) {
 +              addr = cmd->write_va;
 +              rkey = cmd->write_stag;
 +              offset = cmd->iscsi_cmd->write_data_done;
        } else {
 -              ib_sg = &isert_cmd->ib_sg[DATA];
 +              addr = cmd->read_va;
 +              rkey = cmd->read_stag;
 +              offset = 0;
        }
  
 -      memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
 -      isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
 -      isert_cmd->rdma_wr_num = 1;
 -      memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
 -      isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
 +      if (isert_prot_cmd(conn, se_cmd)) {
 +              struct ib_sig_attrs sig_attrs;
  
 -      rdma_wr = &isert_cmd->s_rdma_wr;
 -      rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
 -      rdma_wr->wr.num_sge = 1;
 -      rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
 -      if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
 -              isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
 +              ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
 +              if (ret)
 +                      return ret;
  
 -              rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
 -              rdma_wr->remote_addr = isert_cmd->read_va;
 -              rdma_wr->rkey = isert_cmd->read_stag;
 -              rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
 -                                    0 : IB_SEND_SIGNALED;
 +              WARN_ON_ONCE(offset);
 +              ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
 +                              se_cmd->t_data_sg, se_cmd->t_data_nents,
 +                              se_cmd->t_prot_sg, se_cmd->t_prot_nents,
 +                              &sig_attrs, addr, rkey, dir);
        } else {
 -              isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
 -
 -              rdma_wr->wr.opcode = IB_WR_RDMA_READ;
 -              rdma_wr->remote_addr = isert_cmd->write_va;
 -              rdma_wr->rkey = isert_cmd->write_stag;
 -              rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
 +              ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
 +                              se_cmd->t_data_sg, se_cmd->t_data_nents,
 +                              offset, addr, rkey, dir);
        }
 -
 -      return 0;
 -
 -unmap_cmd:
 -      if (fr_desc) {
 -              spin_lock_irqsave(&isert_conn->pool_lock, flags);
 -              list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
 -              spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
 +      if (ret < 0) {
 +              isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
 +              return ret;
        }
 -      isert_unmap_data_buf(isert_conn, &isert_cmd->data);
  
 +      ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
 +      if (ret < 0)
 +              isert_err("Cmd: %p failed to post RDMA res\n", cmd);
        return ret;
  }
  
@@@ -2126,17 -2778,21 +2126,17 @@@ isert_put_datain(struct iscsi_conn *con
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
 -      struct isert_device *device = isert_conn->device;
 -      struct ib_send_wr *wr_failed;
 +      struct ib_cqe *cqe = NULL;
 +      struct ib_send_wr *chain_wr = NULL;
        int rc;
  
        isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
                 isert_cmd, se_cmd->data_length);
  
 -      isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
 -      rc = device->reg_rdma_mem(isert_cmd, conn);
 -      if (rc) {
 -              isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 -              return rc;
 -      }
 -
 -      if (!isert_prot_cmd(isert_conn, se_cmd)) {
 +      if (isert_prot_cmd(isert_conn, se_cmd)) {
 +              isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
 +              cqe = &isert_cmd->tx_desc.tx_cqe;
 +      } else {
                /*
                 * Build isert_conn->tx_desc for iSCSI response PDU and attach
                 */
                isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
                isert_init_send_wr(isert_conn, isert_cmd,
                                   &isert_cmd->tx_desc.send_wr);
 -              isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
 -              isert_cmd->rdma_wr_num += 1;
  
                rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
                if (rc) {
                        isert_err("ib_post_recv failed with %d\n", rc);
                        return rc;
                }
 -      }
  
 -      rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
 -      if (rc)
 -              isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
 -
 -      if (!isert_prot_cmd(isert_conn, se_cmd))
 -              isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
 -                       "READ\n", isert_cmd);
 -      else
 -              isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
 -                       isert_cmd);
 +              chain_wr = &isert_cmd->tx_desc.send_wr;
 +      }
  
 +      isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
 +      isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
        return 1;
  }
  
  static int
  isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
  {
 -      struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
 -      struct isert_conn *isert_conn = conn->context;
 -      struct isert_device *device = isert_conn->device;
 -      struct ib_send_wr *wr_failed;
 -      int rc;
  
        isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
 -               isert_cmd, se_cmd->data_length, cmd->write_data_done);
 -      isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
 -      rc = device->reg_rdma_mem(isert_cmd, conn);
 -      if (rc) {
 -              isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
 -              return rc;
 -      }
 +               isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
  
 -      rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
 -      if (rc)
 -              isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
 +      isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
 +      isert_rdma_rw_ctx_post(isert_cmd, conn->context,
 +                      &isert_cmd->tx_desc.tx_cqe, NULL);
  
        isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
                 isert_cmd);
 -
        return 0;
  }
  
@@@ -2596,9 -3273,19 +2596,19 @@@ static void isert_free_conn(struct iscs
        isert_put_conn(isert_conn);
  }
  
+ static void isert_get_rx_pdu(struct iscsi_conn *conn)
+ {
+       struct completion comp;
+       init_completion(&comp);
+       wait_for_completion_interruptible(&comp);
+ }
  static struct iscsit_transport iser_target_transport = {
        .name                   = "IB/iSER",
        .transport_type         = ISCSI_INFINIBAND,
+       .rdma_shutdown          = true,
        .priv_size              = sizeof(struct isert_cmd),
        .owner                  = THIS_MODULE,
        .iscsit_setup_np        = isert_setup_np,
        .iscsit_queue_data_in   = isert_put_datain,
        .iscsit_queue_status    = isert_put_response,
        .iscsit_aborted_task    = isert_aborted_task,
+       .iscsit_get_rx_pdu      = isert_get_rx_pdu,
        .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
  };
  
@@@ -254,8 -254,8 +254,8 @@@ static void srpt_get_class_port_info(st
        memset(cif, 0, sizeof(*cif));
        cif->base_version = 1;
        cif->class_version = 1;
 -      cif->resp_time_value = 20;
  
 +      ib_set_cpi_resp_time(cif, 20);
        mad->mad_hdr.status = 0;
  }
  
@@@ -764,6 -764,52 +764,6 @@@ static int srpt_post_recv(struct srpt_d
        return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
  }
  
 -/**
 - * srpt_post_send() - Post an IB send request.
 - *
 - * Returns zero upon success and a non-zero value upon failure.
 - */
 -static int srpt_post_send(struct srpt_rdma_ch *ch,
 -                        struct srpt_send_ioctx *ioctx, int len)
 -{
 -      struct ib_sge list;
 -      struct ib_send_wr wr, *bad_wr;
 -      struct srpt_device *sdev = ch->sport->sdev;
 -      int ret;
 -
 -      atomic_inc(&ch->req_lim);
 -
 -      ret = -ENOMEM;
 -      if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
 -              pr_warn("IB send queue full (needed 1)\n");
 -              goto out;
 -      }
 -
 -      ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
 -                                    DMA_TO_DEVICE);
 -
 -      list.addr = ioctx->ioctx.dma;
 -      list.length = len;
 -      list.lkey = sdev->pd->local_dma_lkey;
 -
 -      ioctx->ioctx.cqe.done = srpt_send_done;
 -      wr.next = NULL;
 -      wr.wr_cqe = &ioctx->ioctx.cqe;
 -      wr.sg_list = &list;
 -      wr.num_sge = 1;
 -      wr.opcode = IB_WR_SEND;
 -      wr.send_flags = IB_SEND_SIGNALED;
 -
 -      ret = ib_post_send(ch->qp, &wr, &bad_wr);
 -
 -out:
 -      if (ret < 0) {
 -              atomic_inc(&ch->sq_wr_avail);
 -              atomic_dec(&ch->req_lim);
 -      }
 -      return ret;
 -}
 -
  /**
   * srpt_zerolength_write() - Perform a zero-length RDMA write.
   *
@@@ -797,110 -843,6 +797,110 @@@ static void srpt_zerolength_write_done(
        }
  }
  
 +static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
 +              struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
 +              unsigned *sg_cnt)
 +{
 +      enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
 +      struct srpt_rdma_ch *ch = ioctx->ch;
 +      struct scatterlist *prev = NULL;
 +      unsigned prev_nents;
 +      int ret, i;
 +
 +      if (nbufs == 1) {
 +              ioctx->rw_ctxs = &ioctx->s_rw_ctx;
 +      } else {
 +              ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
 +                      GFP_KERNEL);
 +              if (!ioctx->rw_ctxs)
 +                      return -ENOMEM;
 +      }
 +
 +      for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
 +              struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 +              u64 remote_addr = be64_to_cpu(db->va);
 +              u32 size = be32_to_cpu(db->len);
 +              u32 rkey = be32_to_cpu(db->key);
 +
 +              ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
 +                              i < nbufs - 1);
 +              if (ret)
 +                      goto unwind;
 +
 +              ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
 +                              ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
 +              if (ret < 0) {
 +                      target_free_sgl(ctx->sg, ctx->nents);
 +                      goto unwind;
 +              }
 +
 +              ioctx->n_rdma += ret;
 +              ioctx->n_rw_ctx++;
 +
 +              if (prev) {
 +                      sg_unmark_end(&prev[prev_nents - 1]);
 +                      sg_chain(prev, prev_nents + 1, ctx->sg);
 +              } else {
 +                      *sg = ctx->sg;
 +              }
 +
 +              prev = ctx->sg;
 +              prev_nents = ctx->nents;
 +
 +              *sg_cnt += ctx->nents;
 +      }
 +
 +      return 0;
 +
 +unwind:
 +      while (--i >= 0) {
 +              struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 +
 +              rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
 +                              ctx->sg, ctx->nents, dir);
 +              target_free_sgl(ctx->sg, ctx->nents);
 +      }
 +      if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
 +              kfree(ioctx->rw_ctxs);
 +      return ret;
 +}
 +
 +static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
 +                                  struct srpt_send_ioctx *ioctx)
 +{
 +      enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
 +      int i;
 +
 +      for (i = 0; i < ioctx->n_rw_ctx; i++) {
 +              struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 +
 +              rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
 +                              ctx->sg, ctx->nents, dir);
 +              target_free_sgl(ctx->sg, ctx->nents);
 +      }
 +
 +      if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
 +              kfree(ioctx->rw_ctxs);
 +}
 +
 +static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
 +{
 +      /*
 +       * The pointer computations below will only be compiled correctly
 +       * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
 +       * whether srp_cmd::add_data has been declared as a byte pointer.
 +       */
 +      BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
 +                   !__same_type(srp_cmd->add_data[0], (u8)0));
 +
 +      /*
 +       * According to the SRP spec, the lower two bits of the 'ADDITIONAL
 +       * CDB LENGTH' field are reserved and the size in bytes of this field
 +       * is four times the value specified in bits 3..7. Hence the "& ~3".
 +       */
 +      return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
 +}
 +
  /**
   * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
   * @ioctx: Pointer to the I/O context associated with the request.
   * -ENOMEM when memory allocation fails and zero upon success.
   */
  static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
 -                           struct srp_cmd *srp_cmd,
 -                           enum dma_data_direction *dir, u64 *data_len)
 +              struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
 +              struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
  {
 -      struct srp_indirect_buf *idb;
 -      struct srp_direct_buf *db;
 -      unsigned add_cdb_offset;
 -      int ret;
 -
 -      /*
 -       * The pointer computations below will only be compiled correctly
 -       * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
 -       * whether srp_cmd::add_data has been declared as a byte pointer.
 -       */
 -      BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
 -                   && !__same_type(srp_cmd->add_data[0], (u8)0));
 -
        BUG_ON(!dir);
        BUG_ON(!data_len);
  
 -      ret = 0;
 -      *data_len = 0;
 -
        /*
         * The lower four bits of the buffer format field contain the DATA-IN
         * buffer descriptor format, and the highest four bits contain the
         * DATA-OUT buffer descriptor format.
         */
 -      *dir = DMA_NONE;
        if (srp_cmd->buf_fmt & 0xf)
                /* DATA-IN: transfer data from target to initiator (read). */
                *dir = DMA_FROM_DEVICE;
        else if (srp_cmd->buf_fmt >> 4)
                /* DATA-OUT: transfer data from initiator to target (write). */
                *dir = DMA_TO_DEVICE;
 +      else
 +              *dir = DMA_NONE;
 +
 +      /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
 +      ioctx->cmd.data_direction = *dir;
  
 -      /*
 -       * According to the SRP spec, the lower two bits of the 'ADDITIONAL
 -       * CDB LENGTH' field are reserved and the size in bytes of this field
 -       * is four times the value specified in bits 3..7. Hence the "& ~3".
 -       */
 -      add_cdb_offset = srp_cmd->add_cdb_len & ~3;
        if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
            ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
 -              ioctx->n_rbuf = 1;
 -              ioctx->rbufs = &ioctx->single_rbuf;
 +              struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
  
 -              db = (struct srp_direct_buf *)(srp_cmd->add_data
 -                                             + add_cdb_offset);
 -              memcpy(ioctx->rbufs, db, sizeof(*db));
                *data_len = be32_to_cpu(db->len);
 +              return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
        } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
                   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
 -              idb = (struct srp_indirect_buf *)(srp_cmd->add_data
 -                                                + add_cdb_offset);
 +              struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
 +              int nbufs = be32_to_cpu(idb->table_desc.len) /
 +                              sizeof(struct srp_direct_buf);
  
 -              ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
 -
 -              if (ioctx->n_rbuf >
 +              if (nbufs >
                    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
                        pr_err("received unsupported SRP_CMD request"
                               " type (%u out + %u in != %u / %zu)\n",
                               srp_cmd->data_out_desc_cnt,
                               srp_cmd->data_in_desc_cnt,
                               be32_to_cpu(idb->table_desc.len),
 -                             sizeof(*db));
 -                      ioctx->n_rbuf = 0;
 -                      ret = -EINVAL;
 -                      goto out;
 -              }
 -
 -              if (ioctx->n_rbuf == 1)
 -                      ioctx->rbufs = &ioctx->single_rbuf;
 -              else {
 -                      ioctx->rbufs =
 -                              kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
 -                      if (!ioctx->rbufs) {
 -                              ioctx->n_rbuf = 0;
 -                              ret = -ENOMEM;
 -                              goto out;
 -                      }
 +                             sizeof(struct srp_direct_buf));
 +                      return -EINVAL;
                }
  
 -              db = idb->desc_list;
 -              memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
                *data_len = be32_to_cpu(idb->len);
 +              return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
 +                              sg, sg_cnt);
 +      } else {
 +              *data_len = 0;
 +              return 0;
        }
 -out:
 -      return ret;
  }
  
  /**
@@@ -1071,6 -1048,217 +1071,6 @@@ static int srpt_ch_qp_err(struct srpt_r
        return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
  }
  
 -/**
 - * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
 - */
 -static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 -                                  struct srpt_send_ioctx *ioctx)
 -{
 -      struct scatterlist *sg;
 -      enum dma_data_direction dir;
 -
 -      BUG_ON(!ch);
 -      BUG_ON(!ioctx);
 -      BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
 -
 -      while (ioctx->n_rdma)
 -              kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
 -
 -      kfree(ioctx->rdma_wrs);
 -      ioctx->rdma_wrs = NULL;
 -
 -      if (ioctx->mapped_sg_count) {
 -              sg = ioctx->sg;
 -              WARN_ON(!sg);
 -              dir = ioctx->cmd.data_direction;
 -              BUG_ON(dir == DMA_NONE);
 -              ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
 -                              target_reverse_dma_direction(&ioctx->cmd));
 -              ioctx->mapped_sg_count = 0;
 -      }
 -}
 -
 -/**
 - * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
 - */
 -static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 -                               struct srpt_send_ioctx *ioctx)
 -{
 -      struct ib_device *dev = ch->sport->sdev->device;
 -      struct se_cmd *cmd;
 -      struct scatterlist *sg, *sg_orig;
 -      int sg_cnt;
 -      enum dma_data_direction dir;
 -      struct ib_rdma_wr *riu;
 -      struct srp_direct_buf *db;
 -      dma_addr_t dma_addr;
 -      struct ib_sge *sge;
 -      u64 raddr;
 -      u32 rsize;
 -      u32 tsize;
 -      u32 dma_len;
 -      int count, nrdma;
 -      int i, j, k;
 -
 -      BUG_ON(!ch);
 -      BUG_ON(!ioctx);
 -      cmd = &ioctx->cmd;
 -      dir = cmd->data_direction;
 -      BUG_ON(dir == DMA_NONE);
 -
 -      ioctx->sg = sg = sg_orig = cmd->t_data_sg;
 -      ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
 -
 -      count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
 -                            target_reverse_dma_direction(cmd));
 -      if (unlikely(!count))
 -              return -EAGAIN;
 -
 -      ioctx->mapped_sg_count = count;
 -
 -      if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
 -              nrdma = ioctx->n_rdma_wrs;
 -      else {
 -              nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
 -                      + ioctx->n_rbuf;
 -
 -              ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
 -                              GFP_KERNEL);
 -              if (!ioctx->rdma_wrs)
 -                      goto free_mem;
 -
 -              ioctx->n_rdma_wrs = nrdma;
 -      }
 -
 -      db = ioctx->rbufs;
 -      tsize = cmd->data_length;
 -      dma_len = ib_sg_dma_len(dev, &sg[0]);
 -      riu = ioctx->rdma_wrs;
 -
 -      /*
 -       * For each remote desc - calculate the #ib_sge.
 -       * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
 -       *      each remote desc rdma_iu is required a rdma wr;
 -       * else
 -       *      we need to allocate extra rdma_iu to carry extra #ib_sge in
 -       *      another rdma wr
 -       */
 -      for (i = 0, j = 0;
 -           j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
 -              rsize = be32_to_cpu(db->len);
 -              raddr = be64_to_cpu(db->va);
 -              riu->remote_addr = raddr;
 -              riu->rkey = be32_to_cpu(db->key);
 -              riu->wr.num_sge = 0;
 -
 -              /* calculate how many sge required for this remote_buf */
 -              while (rsize > 0 && tsize > 0) {
 -
 -                      if (rsize >= dma_len) {
 -                              tsize -= dma_len;
 -                              rsize -= dma_len;
 -                              raddr += dma_len;
 -
 -                              if (tsize > 0) {
 -                                      ++j;
 -                                      if (j < count) {
 -                                              sg = sg_next(sg);
 -                                              dma_len = ib_sg_dma_len(
 -                                                              dev, sg);
 -                                      }
 -                              }
 -                      } else {
 -                              tsize -= rsize;
 -                              dma_len -= rsize;
 -                              rsize = 0;
 -                      }
 -
 -                      ++riu->wr.num_sge;
 -
 -                      if (rsize > 0 &&
 -                          riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
 -                              ++ioctx->n_rdma;
 -                              riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
 -                                              sizeof(*riu->wr.sg_list),
 -                                              GFP_KERNEL);
 -                              if (!riu->wr.sg_list)
 -                                      goto free_mem;
 -
 -                              ++riu;
 -                              riu->wr.num_sge = 0;
 -                              riu->remote_addr = raddr;
 -                              riu->rkey = be32_to_cpu(db->key);
 -                      }
 -              }
 -
 -              ++ioctx->n_rdma;
 -              riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
 -                                      sizeof(*riu->wr.sg_list),
 -                                      GFP_KERNEL);
 -              if (!riu->wr.sg_list)
 -                      goto free_mem;
 -      }
 -
 -      db = ioctx->rbufs;
 -      tsize = cmd->data_length;
 -      riu = ioctx->rdma_wrs;
 -      sg = sg_orig;
 -      dma_len = ib_sg_dma_len(dev, &sg[0]);
 -      dma_addr = ib_sg_dma_address(dev, &sg[0]);
 -
 -      /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
 -      for (i = 0, j = 0;
 -           j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
 -              rsize = be32_to_cpu(db->len);
 -              sge = riu->wr.sg_list;
 -              k = 0;
 -
 -              while (rsize > 0 && tsize > 0) {
 -                      sge->addr = dma_addr;
 -                      sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
 -
 -                      if (rsize >= dma_len) {
 -                              sge->length =
 -                                      (tsize < dma_len) ? tsize : dma_len;
 -                              tsize -= dma_len;
 -                              rsize -= dma_len;
 -
 -                              if (tsize > 0) {
 -                                      ++j;
 -                                      if (j < count) {
 -                                              sg = sg_next(sg);
 -                                              dma_len = ib_sg_dma_len(
 -                                                              dev, sg);
 -                                              dma_addr = ib_sg_dma_address(
 -                                                              dev, sg);
 -                                      }
 -                              }
 -                      } else {
 -                              sge->length = (tsize < rsize) ? tsize : rsize;
 -                              tsize -= rsize;
 -                              dma_len -= rsize;
 -                              dma_addr += rsize;
 -                              rsize = 0;
 -                      }
 -
 -                      ++k;
 -                      if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
 -                              ++riu;
 -                              sge = riu->wr.sg_list;
 -                              k = 0;
 -                      } else if (rsize > 0 && tsize > 0)
 -                              ++sge;
 -              }
 -      }
 -
 -      return 0;
 -
 -free_mem:
 -      srpt_unmap_sg_to_ib_sge(ch, ioctx);
 -
 -      return -ENOMEM;
 -}
 -
  /**
   * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
   */
@@@ -1096,8 -1284,12 +1096,8 @@@ static struct srpt_send_ioctx *srpt_get
        BUG_ON(ioctx->ch != ch);
        spin_lock_init(&ioctx->spinlock);
        ioctx->state = SRPT_STATE_NEW;
 -      ioctx->n_rbuf = 0;
 -      ioctx->rbufs = NULL;
        ioctx->n_rdma = 0;
 -      ioctx->n_rdma_wrs = 0;
 -      ioctx->rdma_wrs = NULL;
 -      ioctx->mapped_sg_count = 0;
 +      ioctx->n_rw_ctx = 0;
        init_completion(&ioctx->tx_done);
        ioctx->queue_status_only = false;
        /*
@@@ -1167,6 -1359,7 +1167,6 @@@ static int srpt_abort_cmd(struct srpt_s
                 * SRP_RSP sending failed or the SRP_RSP send completion has
                 * not been received in time.
                 */
 -              srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
                transport_generic_free_cmd(&ioctx->cmd, 0);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
@@@ -1194,7 -1387,6 +1194,7 @@@ static void srpt_rdma_read_done(struct 
  
        WARN_ON(ioctx->n_rdma <= 0);
        atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
 +      ioctx->n_rdma = 0;
  
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
                       __LINE__, srpt_get_cmd_state(ioctx));
  }
  
 -static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
 -{
 -      struct srpt_send_ioctx *ioctx =
 -              container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
 -
 -      if (unlikely(wc->status != IB_WC_SUCCESS)) {
 -              /*
 -               * Note: if an RDMA write error completion is received that
 -               * means that a SEND also has been posted. Defer further
 -               * processing of the associated command until the send error
 -               * completion has been received.
 -               */
 -              pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
 -                      ioctx, wc->status);
 -      }
 -}
 -
  /**
   * srpt_build_cmd_rsp() - Build an SRP_RSP response.
   * @ch: RDMA channel through which the request has been received.
@@@ -1328,8 -1537,6 +1328,8 @@@ static void srpt_handle_cmd(struct srpt
  {
        struct se_cmd *cmd;
        struct srp_cmd *srp_cmd;
 +      struct scatterlist *sg = NULL;
 +      unsigned sg_cnt = 0;
        u64 data_len;
        enum dma_data_direction dir;
        int rc;
                break;
        }
  
 -      if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
 -              pr_err("0x%llx: parsing SRP descriptor table failed.\n",
 -                     srp_cmd->tag);
 +      rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
 +                      &data_len);
 +      if (rc) {
 +              if (rc != -EAGAIN) {
 +                      pr_err("0x%llx: parsing SRP descriptor table failed.\n",
 +                             srp_cmd->tag);
 +              }
                goto release_ioctx;
        }
  
 -      rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
 +      rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
                               &send_ioctx->sense_data[0],
                               scsilun_to_int(&srp_cmd->lun), data_len,
 -                             TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
 +                             TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
 +                             sg, sg_cnt, NULL, 0, NULL, 0);
        if (rc != 0) {
                pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
                         srp_cmd->tag);
@@@ -1462,21 -1664,23 +1462,21 @@@ static void srpt_handle_new_iu(struct s
                                   recv_ioctx->ioctx.dma, srp_max_req_size,
                                   DMA_FROM_DEVICE);
  
 -      if (unlikely(ch->state == CH_CONNECTING)) {
 -              list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
 -              goto out;
 -      }
 +      if (unlikely(ch->state == CH_CONNECTING))
 +              goto out_wait;
  
        if (unlikely(ch->state != CH_LIVE))
 -              goto out;
 +              return;
  
        srp_cmd = recv_ioctx->ioctx.buf;
        if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
 -              if (!send_ioctx)
 +              if (!send_ioctx) {
 +                      if (!list_empty(&ch->cmd_wait_list))
 +                              goto out_wait;
                        send_ioctx = srpt_get_send_ioctx(ch);
 -              if (unlikely(!send_ioctx)) {
 -                      list_add_tail(&recv_ioctx->wait_list,
 -                                    &ch->cmd_wait_list);
 -                      goto out;
                }
 +              if (unlikely(!send_ioctx))
 +                      goto out_wait;
        }
  
        switch (srp_cmd->opcode) {
        }
  
        srpt_post_recv(ch->sport->sdev, recv_ioctx);
 -out:
        return;
 +
 +out_wait:
 +      list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
  }
  
  static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@@ -1577,13 -1779,14 +1577,13 @@@ static void srpt_send_done(struct ib_c
        WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
                state != SRPT_STATE_MGMT_RSP_SENT);
  
 -      atomic_inc(&ch->sq_wr_avail);
 +      atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
  
        if (wc->status != IB_WC_SUCCESS)
                pr_info("sending response for ioctx 0x%p failed"
                        " with status %d\n", ioctx, wc->status);
  
        if (state != SRPT_STATE_DONE) {
 -              srpt_unmap_sg_to_ib_sge(ch, ioctx);
                transport_generic_free_cmd(&ioctx->cmd, 0);
        } else {
                pr_err("IB completion has been received too late for"
@@@ -1629,18 -1832,8 +1629,18 @@@ retry
        qp_init->srq = sdev->srq;
        qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
        qp_init->qp_type = IB_QPT_RC;
 -      qp_init->cap.max_send_wr = srp_sq_size;
 -      qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
 +      /*
 +       * We divide up our send queue size into half SEND WRs to send the
 +       * completions, and half R/W contexts to actually do the RDMA
 +       * READ/WRITE transfers.  Note that we need to allocate CQ slots for
 +       * both both, as RDMA contexts will also post completions for the
 +       * RDMA READ case.
 +       */
 +      qp_init->cap.max_send_wr = srp_sq_size / 2;
 +      qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
 +      qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
 +                                      sdev->device->attrs.max_sge);
 +      qp_init->port_num = ch->sport->port;
  
        ch->qp = ib_create_qp(sdev->pd, qp_init);
        if (IS_ERR(ch->qp)) {
@@@ -1767,14 -1960,6 +1767,6 @@@ static void __srpt_close_all_ch(struct 
        }
  }
  
- /**
-  * srpt_shutdown_session() - Whether or not a session may be shut down.
-  */
- static int srpt_shutdown_session(struct se_session *se_sess)
- {
-       return 1;
- }
  static void srpt_free_ch(struct kref *kref)
  {
        struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@@ -2193,6 -2378,95 +2185,6 @@@ static int srpt_cm_handler(struct ib_cm
        return ret;
  }
  
 -/**
 - * srpt_perform_rdmas() - Perform IB RDMA.
 - *
 - * Returns zero upon success or a negative number upon failure.
 - */
 -static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
 -                            struct srpt_send_ioctx *ioctx)
 -{
 -      struct ib_send_wr *bad_wr;
 -      int sq_wr_avail, ret, i;
 -      enum dma_data_direction dir;
 -      const int n_rdma = ioctx->n_rdma;
 -
 -      dir = ioctx->cmd.data_direction;
 -      if (dir == DMA_TO_DEVICE) {
 -              /* write */
 -              ret = -ENOMEM;
 -              sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
 -              if (sq_wr_avail < 0) {
 -                      pr_warn("IB send queue full (needed %d)\n",
 -                              n_rdma);
 -                      goto out;
 -              }
 -      }
 -
 -      for (i = 0; i < n_rdma; i++) {
 -              struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
 -
 -              wr->opcode = (dir == DMA_FROM_DEVICE) ?
 -                              IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
 -
 -              if (i == n_rdma - 1) {
 -                      /* only get completion event for the last rdma read */
 -                      if (dir == DMA_TO_DEVICE) {
 -                              wr->send_flags = IB_SEND_SIGNALED;
 -                              ioctx->rdma_cqe.done = srpt_rdma_read_done;
 -                      } else {
 -                              ioctx->rdma_cqe.done = srpt_rdma_write_done;
 -                      }
 -                      wr->wr_cqe = &ioctx->rdma_cqe;
 -                      wr->next = NULL;
 -              } else {
 -                      wr->wr_cqe = NULL;
 -                      wr->next = &ioctx->rdma_wrs[i + 1].wr;
 -              }
 -      }
 -
 -      ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
 -      if (ret)
 -              pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
 -                               __func__, __LINE__, ret, i, n_rdma);
 -out:
 -      if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
 -              atomic_add(n_rdma, &ch->sq_wr_avail);
 -      return ret;
 -}
 -
 -/**
 - * srpt_xfer_data() - Start data transfer from initiator to target.
 - */
 -static int srpt_xfer_data(struct srpt_rdma_ch *ch,
 -                        struct srpt_send_ioctx *ioctx)
 -{
 -      int ret;
 -
 -      ret = srpt_map_sg_to_ib_sge(ch, ioctx);
 -      if (ret) {
 -              pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
 -              goto out;
 -      }
 -
 -      ret = srpt_perform_rdmas(ch, ioctx);
 -      if (ret) {
 -              if (ret == -EAGAIN || ret == -ENOMEM)
 -                      pr_info("%s[%d] queue full -- ret=%d\n",
 -                              __func__, __LINE__, ret);
 -              else
 -                      pr_err("%s[%d] fatal error -- ret=%d\n",
 -                             __func__, __LINE__, ret);
 -              goto out_unmap;
 -      }
 -
 -out:
 -      return ret;
 -out_unmap:
 -      srpt_unmap_sg_to_ib_sge(ch, ioctx);
 -      goto out;
 -}
 -
  static int srpt_write_pending_status(struct se_cmd *se_cmd)
  {
        struct srpt_send_ioctx *ioctx;
@@@ -2209,42 -2483,11 +2201,42 @@@ static int srpt_write_pending(struct se
        struct srpt_send_ioctx *ioctx =
                container_of(se_cmd, struct srpt_send_ioctx, cmd);
        struct srpt_rdma_ch *ch = ioctx->ch;
 +      struct ib_send_wr *first_wr = NULL, *bad_wr;
 +      struct ib_cqe *cqe = &ioctx->rdma_cqe;
        enum srpt_command_state new_state;
 +      int ret, i;
  
        new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
        WARN_ON(new_state == SRPT_STATE_DONE);
 -      return srpt_xfer_data(ch, ioctx);
 +
 +      if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
 +              pr_warn("%s: IB send queue full (needed %d)\n",
 +                              __func__, ioctx->n_rdma);
 +              ret = -ENOMEM;
 +              goto out_undo;
 +      }
 +
 +      cqe->done = srpt_rdma_read_done;
 +      for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
 +              struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 +
 +              first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
 +                              cqe, first_wr);
 +              cqe = NULL;
 +      }
 +      
 +      ret = ib_post_send(ch->qp, first_wr, &bad_wr);
 +      if (ret) {
 +              pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
 +                       __func__, ret, ioctx->n_rdma,
 +                       atomic_read(&ch->sq_wr_avail));
 +              goto out_undo;
 +      }
 +
 +      return 0;
 +out_undo:
 +      atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
 +      return ret;
  }
  
  static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
   */
  static void srpt_queue_response(struct se_cmd *cmd)
  {
 -      struct srpt_rdma_ch *ch;
 -      struct srpt_send_ioctx *ioctx;
 +      struct srpt_send_ioctx *ioctx =
 +              container_of(cmd, struct srpt_send_ioctx, cmd);
 +      struct srpt_rdma_ch *ch = ioctx->ch;
 +      struct srpt_device *sdev = ch->sport->sdev;
 +      struct ib_send_wr send_wr, *first_wr = NULL, *bad_wr;
 +      struct ib_sge sge;
        enum srpt_command_state state;
        unsigned long flags;
 -      int ret;
 -      enum dma_data_direction dir;
 -      int resp_len;
 +      int resp_len, ret, i;
        u8 srp_tm_status;
  
 -      ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
 -      ch = ioctx->ch;
        BUG_ON(!ch);
  
        spin_lock_irqsave(&ioctx->spinlock, flags);
                return;
        }
  
 -      dir = ioctx->cmd.data_direction;
 -
        /* For read commands, transfer the data to the initiator. */
 -      if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
 +      if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
 +          ioctx->cmd.data_length &&
            !ioctx->queue_status_only) {
 -              ret = srpt_xfer_data(ch, ioctx);
 -              if (ret) {
 -                      pr_err("xfer_data failed for tag %llu\n",
 -                             ioctx->cmd.tag);
 -                      return;
 +              for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
 +                      struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
 +
 +                      first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
 +                                      ch->sport->port, NULL,
 +                                      first_wr ? first_wr : &send_wr);
                }
 +      } else {
 +              first_wr = &send_wr;
        }
  
        if (state != SRPT_STATE_MGMT)
                resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
                                                 ioctx->cmd.tag);
        }
 -      ret = srpt_post_send(ch, ioctx, resp_len);
 -      if (ret) {
 -              pr_err("sending cmd response failed for tag %llu\n",
 -                     ioctx->cmd.tag);
 -              srpt_unmap_sg_to_ib_sge(ch, ioctx);
 -              srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
 -              target_put_sess_cmd(&ioctx->cmd);
 +
 +      atomic_inc(&ch->req_lim);
 +
 +      if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
 +                      &ch->sq_wr_avail) < 0)) {
 +              pr_warn("%s: IB send queue full (needed %d)\n",
 +                              __func__, ioctx->n_rdma);
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +
 +      ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
 +                                    DMA_TO_DEVICE);
 +
 +      sge.addr = ioctx->ioctx.dma;
 +      sge.length = resp_len;
 +      sge.lkey = sdev->pd->local_dma_lkey;
 +
 +      ioctx->ioctx.cqe.done = srpt_send_done;
 +      send_wr.next = NULL;
 +      send_wr.wr_cqe = &ioctx->ioctx.cqe;
 +      send_wr.sg_list = &sge;
 +      send_wr.num_sge = 1;
 +      send_wr.opcode = IB_WR_SEND;
 +      send_wr.send_flags = IB_SEND_SIGNALED;
 +
 +      ret = ib_post_send(ch->qp, first_wr, &bad_wr);
 +      if (ret < 0) {
 +              pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
 +                      __func__, ioctx->cmd.tag, ret);
 +              goto out;
        }
 +
 +      return;
 +
 +out:
 +      atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
 +      atomic_dec(&ch->req_lim);
 +      srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
 +      target_put_sess_cmd(&ioctx->cmd);
  }
  
  static int srpt_queue_data_in(struct se_cmd *cmd)
@@@ -2382,6 -2591,10 +2374,6 @@@ static void srpt_queue_tm_rsp(struct se
  
  static void srpt_aborted_task(struct se_cmd *cmd)
  {
 -      struct srpt_send_ioctx *ioctx = container_of(cmd,
 -                              struct srpt_send_ioctx, cmd);
 -
 -      srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
  }
  
  static int srpt_queue_status(struct se_cmd *cmd)
@@@ -2682,10 -2895,12 +2674,10 @@@ static void srpt_release_cmd(struct se_
        unsigned long flags;
  
        WARN_ON(ioctx->state != SRPT_STATE_DONE);
 -      WARN_ON(ioctx->mapped_sg_count != 0);
  
 -      if (ioctx->n_rbuf > 1) {
 -              kfree(ioctx->rbufs);
 -              ioctx->rbufs = NULL;
 -              ioctx->n_rbuf = 0;
 +      if (ioctx->n_rw_ctx) {
 +              srpt_free_rw_ctxs(ch, ioctx);
 +              ioctx->n_rw_ctx = 0;
        }
  
        spin_lock_irqsave(&ch->spinlock, flags);
@@@ -3064,7 -3279,6 +3056,6 @@@ static const struct target_core_fabric_
        .tpg_get_inst_index             = srpt_tpg_get_inst_index,
        .release_cmd                    = srpt_release_cmd,
        .check_stop_free                = srpt_check_stop_free,
-       .shutdown_session               = srpt_shutdown_session,
        .close_session                  = srpt_close_session,
        .sess_get_index                 = srpt_sess_get_index,
        .sess_get_initiator_sid         = NULL,
@@@ -514,6 -514,7 +514,7 @@@ void iscsit_add_cmd_to_immediate_queue
  
        wake_up(&conn->queues_wq);
  }
+ EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
  
  struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
  {
@@@ -725,6 -726,9 +726,9 @@@ void __iscsit_free_cmd(struct iscsi_cm
                iscsit_remove_cmd_from_immediate_queue(cmd, conn);
                iscsit_remove_cmd_from_response_queue(cmd, conn);
        }
+       if (conn && conn->conn_transport->iscsit_release_cmd)
+               conn->conn_transport->iscsit_release_cmd(conn, cmd);
  }
  
  void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
                break;
        }
  }
+ EXPORT_SYMBOL(iscsit_free_cmd);
  
  int iscsit_check_session_usage_count(struct iscsi_session *sess)
  {
@@@ -1283,8 -1288,9 +1288,8 @@@ static int iscsit_do_rx_data
        iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
                      count->iov, count->iov_count, data);
  
 -      while (total_rx < data) {
 -              rx_loop = sock_recvmsg(conn->sock, &msg,
 -                                    (data - total_rx), MSG_WAITALL);
 +      while (msg_data_left(&msg)) {
 +              rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
                if (rx_loop <= 0) {
                        pr_debug("rx_loop: %d total_rx: %d\n",
                                rx_loop, total_rx);
@@@ -239,7 -239,6 +239,6 @@@ struct se_session *transport_init_sessi
        INIT_LIST_HEAD(&se_sess->sess_cmd_list);
        INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
-       kref_init(&se_sess->sess_kref);
        se_sess->sup_prot_ops = sup_prot_ops;
  
        return se_sess;
@@@ -430,27 -429,6 +429,6 @@@ target_alloc_session(struct se_portal_g
  }
  EXPORT_SYMBOL(target_alloc_session);
  
- static void target_release_session(struct kref *kref)
- {
-       struct se_session *se_sess = container_of(kref,
-                       struct se_session, sess_kref);
-       struct se_portal_group *se_tpg = se_sess->se_tpg;
-       se_tpg->se_tpg_tfo->close_session(se_sess);
- }
- int target_get_session(struct se_session *se_sess)
- {
-       return kref_get_unless_zero(&se_sess->sess_kref);
- }
- EXPORT_SYMBOL(target_get_session);
- void target_put_session(struct se_session *se_sess)
- {
-       kref_put(&se_sess->sess_kref, target_release_session);
- }
- EXPORT_SYMBOL(target_put_session);
  ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
  {
        struct se_session *se_sess;
@@@ -499,8 -477,8 +477,8 @@@ void transport_deregister_session_confi
        se_nacl = se_sess->se_node_acl;
        if (se_nacl) {
                spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
-               if (se_nacl->acl_stop == 0)
-                       list_del(&se_sess->sess_acl_list);
+               if (!list_empty(&se_sess->sess_acl_list))
+                       list_del_init(&se_sess->sess_acl_list);
                /*
                 * If the session list is empty, then clear the pointer.
                 * Otherwise, set the struct se_session pointer from the tail
@@@ -2195,7 -2173,7 +2173,7 @@@ queue_full
        transport_handle_queue_full(cmd, cmd->se_dev);
  }
  
 -static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
 +void target_free_sgl(struct scatterlist *sgl, int nents)
  {
        struct scatterlist *sg;
        int count;
  
        kfree(sgl);
  }
 +EXPORT_SYMBOL(target_free_sgl);
  
  static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
  {
  static inline void transport_free_pages(struct se_cmd *cmd)
  {
        if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
 -              transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
 +              target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
                cmd->t_prot_sg = NULL;
                cmd->t_prot_nents = 0;
        }
                 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
                 */
                if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
 -                      transport_free_sgl(cmd->t_bidi_data_sg,
 +                      target_free_sgl(cmd->t_bidi_data_sg,
                                           cmd->t_bidi_data_nents);
                        cmd->t_bidi_data_sg = NULL;
                        cmd->t_bidi_data_nents = 0;
        }
        transport_reset_sgl_orig(cmd);
  
 -      transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
 +      target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
        cmd->t_data_sg = NULL;
        cmd->t_data_nents = 0;
  
 -      transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
 +      target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
        cmd->t_bidi_data_sg = NULL;
        cmd->t_bidi_data_nents = 0;
  }
@@@ -2325,22 -2302,20 +2303,22 @@@ EXPORT_SYMBOL(transport_kunmap_data_sg)
  
  int
  target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
 -               bool zero_page)
 +               bool zero_page, bool chainable)
  {
        struct scatterlist *sg;
        struct page *page;
        gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
 -      unsigned int nent;
 +      unsigned int nalloc, nent;
        int i = 0;
  
 -      nent = DIV_ROUND_UP(length, PAGE_SIZE);
 -      sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
 +      nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
 +      if (chainable)
 +              nalloc++;
 +      sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
        if (!sg)
                return -ENOMEM;
  
 -      sg_init_table(sg, nent);
 +      sg_init_table(sg, nalloc);
  
        while (length) {
                u32 page_len = min_t(u32, length, PAGE_SIZE);
@@@ -2364,7 -2339,6 +2342,7 @@@ out
        kfree(sg);
        return -ENOMEM;
  }
 +EXPORT_SYMBOL(target_alloc_sgl);
  
  /*
   * Allocate any required resources to execute the command.  For writes we
@@@ -2380,7 -2354,7 +2358,7 @@@ transport_generic_new_cmd(struct se_cm
        if (cmd->prot_op != TARGET_PROT_NORMAL &&
            !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
                ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
 -                                     cmd->prot_length, true);
 +                                     cmd->prot_length, true, false);
                if (ret < 0)
                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
  
                        ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
                                               &cmd->t_bidi_data_nents,
 -                                             bidi_length, zero_flag);
 +                                             bidi_length, zero_flag, false);
                        if (ret < 0)
                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
  
                ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
 -                                     cmd->data_length, zero_flag);
 +                                     cmd->data_length, zero_flag, false);
                if (ret < 0)
                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
  
                ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
                                       &cmd->t_bidi_data_nents,
 -                                     caw_length, zero_flag);
 +                                     caw_length, zero_flag, false);
                if (ret < 0)
                        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
@@@ -50,10 -50,6 +50,6 @@@ struct target_core_fabric_ops 
         */
        int (*check_stop_free)(struct se_cmd *);
        void (*release_cmd)(struct se_cmd *);
-       /*
-        * Called with spin_lock_bh(struct se_portal_group->session_lock held.
-        */
-       int (*shutdown_session)(struct se_session *);
        void (*close_session)(struct se_session *);
        u32 (*sess_get_index)(struct se_session *);
        /*
@@@ -123,8 -119,6 +119,6 @@@ void       __transport_register_session(struc
                struct se_node_acl *, struct se_session *, void *);
  void  transport_register_session(struct se_portal_group *,
                struct se_node_acl *, struct se_session *, void *);
- int   target_get_session(struct se_session *);
- void  target_put_session(struct se_session *);
  ssize_t       target_show_dynamic_sessions(struct se_portal_group *, char *);
  void  transport_free_session(struct se_session *);
  void  target_put_nacl(struct se_node_acl *);
@@@ -185,10 -179,6 +179,10 @@@ int      core_tpg_set_initiator_node_tag(str
  int   core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
  int   core_tpg_deregister(struct se_portal_group *);
  
 +int   target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
 +              u32 length, bool zero_page, bool chainable);
 +void  target_free_sgl(struct scatterlist *sgl, int nents);
 +
  /*
   * The LIO target core uses DMA_TO_DEVICE to mean that data is going
   * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean