2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
37 #include <linux/highmem.h>
38 #include <linux/scatterlist.h>
40 #include "iscsi_iser.h"
43 iser_free_bounce_sg(struct iser_data_buf *data)
45 struct scatterlist *sg;
48 for_each_sg(data->sg, sg, data->size, count)
49 __free_page(sg_page(sg));
53 data->sg = data->orig_sg;
54 data->size = data->orig_size;
60 iser_alloc_bounce_sg(struct iser_data_buf *data)
62 struct scatterlist *sg;
64 unsigned long length = data->data_len;
65 int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
67 sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
71 sg_init_table(sg, nents);
73 u32 page_len = min_t(u32, length, PAGE_SIZE);
75 page = alloc_page(GFP_ATOMIC);
79 sg_set_page(&sg[i], page, page_len, 0);
84 data->orig_sg = data->sg;
85 data->orig_size = data->size;
93 __free_page(sg_page(&sg[i - 1]));
100 iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
102 struct scatterlist *osg, *bsg = data->sg;
104 unsigned int left = data->data_len;
105 unsigned int bsg_off = 0;
108 for_each_sg(data->orig_sg, osg, data->orig_size, i) {
109 unsigned int copy_len, osg_off = 0;
111 oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
112 copy_len = min(left, osg->length);
114 unsigned int len = min(copy_len, bsg->length - bsg_off);
116 baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
118 memcpy(baddr + bsg_off, oaddr + osg_off, len);
120 memcpy(oaddr + osg_off, baddr + bsg_off, len);
122 kunmap_atomic(baddr - bsg->offset);
127 if (bsg_off >= bsg->length) {
132 kunmap_atomic(oaddr - osg->offset);
138 iser_copy_from_bounce(struct iser_data_buf *data)
140 iser_copy_bounce(data, false);
144 iser_copy_to_bounce(struct iser_data_buf *data)
146 iser_copy_bounce(data, true);
149 struct fast_reg_descriptor *
150 iser_reg_desc_get(struct ib_conn *ib_conn)
152 struct fast_reg_descriptor *desc;
155 spin_lock_irqsave(&ib_conn->lock, flags);
156 desc = list_first_entry(&ib_conn->fastreg.pool,
157 struct fast_reg_descriptor, list);
158 list_del(&desc->list);
159 spin_unlock_irqrestore(&ib_conn->lock, flags);
165 iser_reg_desc_put(struct ib_conn *ib_conn,
166 struct fast_reg_descriptor *desc)
170 spin_lock_irqsave(&ib_conn->lock, flags);
171 list_add(&desc->list, &ib_conn->fastreg.pool);
172 spin_unlock_irqrestore(&ib_conn->lock, flags);
176 * iser_start_rdma_unaligned_sg
178 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
179 struct iser_data_buf *data,
180 enum iser_data_dir cmd_dir)
182 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
185 rc = iser_alloc_bounce_sg(data);
187 iser_err("Failed to allocate bounce for data len %lu\n",
192 if (cmd_dir == ISER_DIR_OUT)
193 iser_copy_to_bounce(data);
195 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
196 (cmd_dir == ISER_DIR_OUT) ?
197 DMA_TO_DEVICE : DMA_FROM_DEVICE);
198 if (!data->dma_nents) {
199 iser_err("Got dma_nents %d, something went wrong...\n",
207 iser_free_bounce_sg(data);
212 * iser_finalize_rdma_unaligned_sg
215 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
216 struct iser_data_buf *data,
217 enum iser_data_dir cmd_dir)
219 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
221 ib_dma_unmap_sg(dev, data->sg, data->size,
222 (cmd_dir == ISER_DIR_OUT) ?
223 DMA_TO_DEVICE : DMA_FROM_DEVICE);
225 if (cmd_dir == ISER_DIR_IN)
226 iser_copy_from_bounce(data);
228 iser_free_bounce_sg(data);
231 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
234 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
235 * and returns the length of resulting physical address array (may be less than
236 * the original due to possible compaction).
238 * we build a "page vec" under the assumption that the SG meets the RDMA
239 * alignment requirements. Other then the first and last SG elements, all
240 * the "internal" elements can be compacted into a list whose elements are
241 * dma addresses of physical pages. The code supports also the weird case
242 * where --few fragments of the same page-- are present in the SG as
243 * consecutive elements. Also, it handles one entry SG.
246 static int iser_sg_to_page_vec(struct iser_data_buf *data,
247 struct ib_device *ibdev, u64 *pages,
248 int *offset, int *data_size)
250 struct scatterlist *sg, *sgl = data->sg;
251 u64 start_addr, end_addr, page, chunk_start = 0;
252 unsigned long total_sz = 0;
253 unsigned int dma_len;
254 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
256 /* compute the offset of first element */
257 *offset = (u64) sgl[0].offset & ~MASK_4K;
261 for_each_sg(sgl, sg, data->dma_nents, i) {
262 start_addr = ib_sg_dma_address(ibdev, sg);
264 chunk_start = start_addr;
265 dma_len = ib_sg_dma_len(ibdev, sg);
266 end_addr = start_addr + dma_len;
269 /* collect page fragments until aligned or end of SG list */
270 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
276 /* address of the first page in the contiguous chunk;
277 masking relevant for the very first SG entry,
278 which might be unaligned */
279 page = chunk_start & MASK_4K;
281 pages[cur_page++] = page;
283 } while (page < end_addr);
286 *data_size = total_sz;
287 iser_dbg("page_vec->data_size:%d cur_page %d\n",
288 *data_size, cur_page);
294 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
295 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
296 * the number of entries which are aligned correctly. Supports the case where
297 * consecutive SG elements are actually fragments of the same physcial page.
299 static int iser_data_buf_aligned_len(struct iser_data_buf *data,
300 struct ib_device *ibdev)
302 struct scatterlist *sg, *sgl, *next_sg = NULL;
303 u64 start_addr, end_addr;
304 int i, ret_len, start_check = 0;
306 if (data->dma_nents == 1)
310 start_addr = ib_sg_dma_address(ibdev, sgl);
312 for_each_sg(sgl, sg, data->dma_nents, i) {
313 if (start_check && !IS_4K_ALIGNED(start_addr))
316 next_sg = sg_next(sg);
320 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
321 start_addr = ib_sg_dma_address(ibdev, next_sg);
323 if (end_addr == start_addr) {
329 if (!IS_4K_ALIGNED(end_addr))
332 ret_len = (next_sg) ? i : i+1;
334 if (unlikely(ret_len != data->dma_nents))
335 iser_warn("rdma alignment violation (%d/%d aligned)\n",
336 ret_len, data->dma_nents);
341 static void iser_data_buf_dump(struct iser_data_buf *data,
342 struct ib_device *ibdev)
344 struct scatterlist *sg;
347 for_each_sg(data->sg, sg, data->dma_nents, i)
348 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
349 "off:0x%x sz:0x%x dma_len:0x%x\n",
350 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
351 sg_page(sg), sg->offset,
352 sg->length, ib_sg_dma_len(ibdev, sg));
355 static void iser_dump_page_vec(struct iser_page_vec *page_vec)
359 iser_err("page vec length %d data size %d\n",
360 page_vec->length, page_vec->data_size);
361 for (i = 0; i < page_vec->length; i++)
362 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
365 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
366 struct iser_data_buf *data,
367 enum iser_data_dir iser_dir,
368 enum dma_data_direction dma_dir)
370 struct ib_device *dev;
372 iser_task->dir[iser_dir] = 1;
373 dev = iser_task->iser_conn->ib_conn.device->ib_device;
375 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
376 if (data->dma_nents == 0) {
377 iser_err("dma_map_sg failed!!!\n");
383 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
384 struct iser_data_buf *data,
385 enum dma_data_direction dir)
387 struct ib_device *dev;
389 dev = iser_task->iser_conn->ib_conn.device->ib_device;
390 ib_dma_unmap_sg(dev, data->sg, data->size, dir);
394 iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
395 struct iser_mem_reg *reg)
397 struct scatterlist *sg = mem->sg;
399 reg->sge.lkey = device->mr->lkey;
400 reg->rkey = device->mr->rkey;
401 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
402 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
404 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
405 " length=0x%x\n", reg->sge.lkey, reg->rkey,
406 reg->sge.addr, reg->sge.length);
411 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
412 struct iser_data_buf *mem,
413 enum iser_data_dir cmd_dir)
415 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
416 struct iser_device *device = iser_task->iser_conn->ib_conn.device;
418 iscsi_conn->fmr_unalign_cnt++;
420 if (iser_debug_level > 0)
421 iser_data_buf_dump(mem, device->ib_device);
423 /* unmap the command data before accessing it */
424 iser_dma_unmap_task_data(iser_task, mem,
425 (cmd_dir == ISER_DIR_OUT) ?
426 DMA_TO_DEVICE : DMA_FROM_DEVICE);
428 /* allocate copy buf, if we are writing, copy the */
429 /* unaligned scatterlist, dma map the copy */
430 if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
437 * iser_reg_page_vec - Register physical memory
439 * returns: 0 on success, errno code on failure
442 int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
443 struct iser_data_buf *mem,
444 struct iser_page_vec *page_vec,
445 struct iser_mem_reg *mem_reg)
447 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
448 struct iser_device *device = ib_conn->device;
449 struct ib_pool_fmr *fmr;
452 plen = iser_sg_to_page_vec(mem, device->ib_device,
455 &page_vec->data_size);
456 page_vec->length = plen;
457 if (plen * SIZE_4K < page_vec->data_size) {
458 iser_err("page vec too short to hold this SG\n");
459 iser_data_buf_dump(mem, device->ib_device);
460 iser_dump_page_vec(page_vec);
464 fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
470 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
474 mem_reg->sge.lkey = fmr->fmr->lkey;
475 mem_reg->rkey = fmr->fmr->rkey;
476 mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
477 mem_reg->sge.length = page_vec->data_size;
478 mem_reg->mem_h = fmr;
484 * Unregister (previosuly registered using FMR) memory.
485 * If memory is non-FMR does nothing.
487 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
488 enum iser_data_dir cmd_dir)
490 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
496 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
498 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
500 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
505 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
506 enum iser_data_dir cmd_dir)
508 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
513 iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
519 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
520 * using FMR (if possible) obtaining rkey and va
522 * returns 0 on success, errno code on failure
524 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
525 enum iser_data_dir cmd_dir)
527 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
528 struct iser_device *device = ib_conn->device;
529 struct ib_device *ibdev = device->ib_device;
530 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
531 struct iser_mem_reg *mem_reg;
536 mem_reg = &iser_task->rdma_reg[cmd_dir];
538 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
539 if (aligned_len != mem->dma_nents) {
540 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
542 iser_err("failed to allocate bounce buffer\n");
547 /* if there a single dma entry, FMR is not needed */
548 if (mem->dma_nents == 1) {
549 return iser_reg_dma(device, mem, mem_reg);
550 } else { /* use FMR for multiple dma entries */
551 err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
553 if (err && err != -EAGAIN) {
554 iser_data_buf_dump(mem, ibdev);
555 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
557 ntoh24(iser_task->desc.iscsi_header.dlength));
558 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
559 ib_conn->fmr.page_vec->data_size,
560 ib_conn->fmr.page_vec->length,
561 ib_conn->fmr.page_vec->offset);
562 for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
563 iser_err("page_vec[%d] = 0x%llx\n", i,
564 (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
573 iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
574 struct ib_sig_domain *domain)
576 domain->sig_type = IB_SIG_TYPE_T10_DIF;
577 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
578 domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
580 * At the moment we hard code those, but in the future
581 * we will take them from sc.
583 domain->sig.dif.apptag_check_mask = 0xffff;
584 domain->sig.dif.app_escape = true;
585 domain->sig.dif.ref_escape = true;
586 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
587 domain->sig.dif.ref_remap = true;
591 iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
593 switch (scsi_get_prot_op(sc)) {
594 case SCSI_PROT_WRITE_INSERT:
595 case SCSI_PROT_READ_STRIP:
596 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
597 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
598 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
600 case SCSI_PROT_READ_INSERT:
601 case SCSI_PROT_WRITE_STRIP:
602 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
603 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
604 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
605 IB_T10DIF_CSUM : IB_T10DIF_CRC;
607 case SCSI_PROT_READ_PASS:
608 case SCSI_PROT_WRITE_PASS:
609 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
610 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
611 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
612 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
613 IB_T10DIF_CSUM : IB_T10DIF_CRC;
616 iser_err("Unsupported PI operation %d\n",
617 scsi_get_prot_op(sc));
625 iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
628 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
629 *mask |= ISER_CHECK_REFTAG;
630 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
631 *mask |= ISER_CHECK_GUARD;
635 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
639 memset(inv_wr, 0, sizeof(*inv_wr));
640 inv_wr->opcode = IB_WR_LOCAL_INV;
641 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
642 inv_wr->ex.invalidate_rkey = mr->rkey;
644 rkey = ib_inc_rkey(mr->rkey);
645 ib_update_fast_reg_key(mr, rkey);
649 iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
650 struct iser_pi_context *pi_ctx,
651 struct iser_mem_reg *data_reg,
652 struct iser_mem_reg *prot_reg,
653 struct iser_mem_reg *sig_reg)
655 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
656 struct ib_send_wr sig_wr, inv_wr;
657 struct ib_send_wr *bad_wr, *wr = NULL;
658 struct ib_sig_attrs sig_attrs;
661 memset(&sig_attrs, 0, sizeof(sig_attrs));
662 ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
666 iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
668 if (!pi_ctx->sig_mr_valid) {
669 iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
673 memset(&sig_wr, 0, sizeof(sig_wr));
674 sig_wr.opcode = IB_WR_REG_SIG_MR;
675 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
676 sig_wr.sg_list = &data_reg->sge;
678 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
679 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
680 if (scsi_prot_sg_count(iser_task->sc))
681 sig_wr.wr.sig_handover.prot = &prot_reg->sge;
682 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
683 IB_ACCESS_REMOTE_READ |
684 IB_ACCESS_REMOTE_WRITE;
691 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
693 iser_err("reg_sig_mr failed, ret:%d\n", ret);
696 pi_ctx->sig_mr_valid = 0;
698 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
699 sig_reg->rkey = pi_ctx->sig_mr->rkey;
700 sig_reg->sge.addr = 0;
701 sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
703 iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
704 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
705 sig_reg->sge.length);
710 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
711 struct iser_data_buf *mem,
712 struct iser_reg_resources *rsc,
713 struct iser_mem_reg *reg)
715 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
716 struct iser_device *device = ib_conn->device;
718 struct ib_fast_reg_page_list *frpl;
719 struct ib_send_wr fastreg_wr, inv_wr;
720 struct ib_send_wr *bad_wr, *wr = NULL;
721 int ret, offset, size, plen;
723 /* if there a single dma entry, dma mr suffices */
724 if (mem->dma_nents == 1)
725 return iser_reg_dma(device, mem, reg);
730 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
732 if (plen * SIZE_4K < size) {
733 iser_err("fast reg page_list too short to hold this SG\n");
737 if (!rsc->mr_valid) {
738 iser_inv_rkey(&inv_wr, mr);
742 /* Prepare FASTREG WR */
743 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
744 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
745 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
746 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
747 fastreg_wr.wr.fast_reg.page_list = frpl;
748 fastreg_wr.wr.fast_reg.page_list_len = plen;
749 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
750 fastreg_wr.wr.fast_reg.length = size;
751 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
752 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
753 IB_ACCESS_REMOTE_WRITE |
754 IB_ACCESS_REMOTE_READ);
759 wr->next = &fastreg_wr;
761 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
763 iser_err("fast registration failed, ret:%d\n", ret);
768 reg->sge.lkey = mr->lkey;
769 reg->rkey = mr->rkey;
770 reg->sge.addr = frpl->page_list[0] + offset;
771 reg->sge.length = size;
777 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
778 * using Fast Registration WR (if possible) obtaining rkey and va
780 * returns 0 on success, errno code on failure
782 int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
783 enum iser_data_dir cmd_dir)
785 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
786 struct iser_device *device = ib_conn->device;
787 struct ib_device *ibdev = device->ib_device;
788 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
789 struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
790 struct fast_reg_descriptor *desc = NULL;
791 int err, aligned_len;
793 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
794 if (aligned_len != mem->dma_nents) {
795 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
797 iser_err("failed to allocate bounce buffer\n");
802 if (mem->dma_nents != 1 ||
803 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
804 desc = iser_reg_desc_get(ib_conn);
805 mem_reg->mem_h = desc;
808 err = iser_fast_reg_mr(iser_task, mem,
809 desc ? &desc->rsc : NULL, mem_reg);
813 if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
814 struct iser_mem_reg prot_reg;
816 memset(&prot_reg, 0, sizeof(prot_reg));
817 if (scsi_prot_sg_count(iser_task->sc)) {
818 mem = &iser_task->prot[cmd_dir];
819 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
820 if (aligned_len != mem->dma_nents) {
821 err = fall_to_bounce_buf(iser_task, mem,
824 iser_err("failed to allocate bounce buffer\n");
829 err = iser_fast_reg_mr(iser_task, mem,
830 &desc->pi_ctx->rsc, &prot_reg);
835 err = iser_reg_sig_mr(iser_task, desc->pi_ctx, mem_reg,
838 iser_err("Failed to register signature mr\n");
841 desc->pi_ctx->sig_protected = 1;
847 iser_reg_desc_put(ib_conn, desc);