2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
45 static void iser_cq_tasklet_fn(unsigned long data);
46 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
47 static int iser_drain_tx_cq(struct iser_device *device, int cq_index);
49 static void iser_cq_event_callback(struct ib_event *cause, void *context)
51 iser_err("got cq event %d \n", cause->event);
54 static void iser_qp_event_callback(struct ib_event *cause, void *context)
56 iser_err("got qp event %d\n",cause->event);
59 static void iser_event_handler(struct ib_event_handler *handler,
60 struct ib_event *event)
62 iser_err("async event %d on device %s port %d\n", event->event,
63 event->device->name, event->element.port_num);
67 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
68 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
71 * returns 0 on success, -1 on failure
73 static int iser_create_device_ib_res(struct iser_device *device)
75 struct iser_cq_desc *cq_desc;
76 struct ib_device_attr *dev_attr = &device->dev_attr;
79 ret = ib_query_device(device->ib_device, dev_attr);
81 pr_warn("Query device failed for %s\n", device->ib_device->name);
85 /* Assign function handles - based on FMR support */
86 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
87 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
88 iser_info("FMR supported, using FMR for registration\n");
89 device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
90 device->iser_free_rdma_reg_res = iser_free_fmr_pool;
91 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
92 device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
94 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
95 iser_info("FastReg supported, using FastReg for registration\n");
96 device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
97 device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
98 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
99 device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
101 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
105 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
106 iser_info("using %d CQs, device %s supports %d vectors\n",
107 device->cqs_used, device->ib_device->name,
108 device->ib_device->num_comp_vectors);
110 device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
112 if (device->cq_desc == NULL)
114 cq_desc = device->cq_desc;
116 device->pd = ib_alloc_pd(device->ib_device);
117 if (IS_ERR(device->pd))
120 for (i = 0; i < device->cqs_used; i++) {
121 cq_desc[i].device = device;
122 cq_desc[i].cq_index = i;
124 device->rx_cq[i] = ib_create_cq(device->ib_device,
126 iser_cq_event_callback,
128 ISER_MAX_RX_CQ_LEN, i);
129 if (IS_ERR(device->rx_cq[i])) {
130 device->rx_cq[i] = NULL;
134 device->tx_cq[i] = ib_create_cq(device->ib_device,
135 NULL, iser_cq_event_callback,
137 ISER_MAX_TX_CQ_LEN, i);
139 if (IS_ERR(device->tx_cq[i])) {
140 device->tx_cq[i] = NULL;
144 if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
147 tasklet_init(&device->cq_tasklet[i],
149 (unsigned long)&cq_desc[i]);
152 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
153 IB_ACCESS_REMOTE_WRITE |
154 IB_ACCESS_REMOTE_READ);
155 if (IS_ERR(device->mr))
158 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
160 if (ib_register_event_handler(&device->event_handler))
166 ib_dereg_mr(device->mr);
168 for (i = 0; i < device->cqs_used; i++)
169 tasklet_kill(&device->cq_tasklet[i]);
171 for (i = 0; i < device->cqs_used; i++) {
172 if (device->tx_cq[i])
173 ib_destroy_cq(device->tx_cq[i]);
174 if (device->rx_cq[i])
175 ib_destroy_cq(device->rx_cq[i]);
177 ib_dealloc_pd(device->pd);
179 kfree(device->cq_desc);
181 iser_err("failed to allocate an IB resource\n");
186 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
187 * CQ and PD created with the device associated with the adapator.
189 static void iser_free_device_ib_res(struct iser_device *device)
192 BUG_ON(device->mr == NULL);
194 for (i = 0; i < device->cqs_used; i++) {
195 tasklet_kill(&device->cq_tasklet[i]);
196 (void)ib_destroy_cq(device->tx_cq[i]);
197 (void)ib_destroy_cq(device->rx_cq[i]);
198 device->tx_cq[i] = NULL;
199 device->rx_cq[i] = NULL;
202 (void)ib_unregister_event_handler(&device->event_handler);
203 (void)ib_dereg_mr(device->mr);
204 (void)ib_dealloc_pd(device->pd);
206 kfree(device->cq_desc);
213 * iser_create_fmr_pool - Creates FMR pool and page_vector
215 * returns 0 on success, or errno code on failure
217 int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
219 struct iser_device *device = ib_conn->device;
220 struct ib_fmr_pool_param params;
223 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
224 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
226 if (!ib_conn->fmr.page_vec)
229 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
231 params.page_shift = SHIFT_4K;
232 /* when the first/last SG element are not start/end *
233 * page aligned, the map whould be of N+1 pages */
234 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
235 /* make the pool size twice the max number of SCSI commands *
236 * the ML is expected to queue, watermark for unmap at 50% */
237 params.pool_size = cmds_max * 2;
238 params.dirty_watermark = cmds_max;
240 params.flush_function = NULL;
241 params.access = (IB_ACCESS_LOCAL_WRITE |
242 IB_ACCESS_REMOTE_WRITE |
243 IB_ACCESS_REMOTE_READ);
245 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms);
246 if (!IS_ERR(ib_conn->fmr.pool))
249 /* no FMR => no need for page_vec */
250 kfree(ib_conn->fmr.page_vec);
251 ib_conn->fmr.page_vec = NULL;
253 ret = PTR_ERR(ib_conn->fmr.pool);
254 ib_conn->fmr.pool = NULL;
255 if (ret != -ENOSYS) {
256 iser_err("FMR allocation failed, err %d\n", ret);
259 iser_warn("FMRs are not supported, using unaligned mode\n");
265 * iser_free_fmr_pool - releases the FMR pool and page vec
267 void iser_free_fmr_pool(struct ib_conn *ib_conn)
269 iser_info("freeing conn %p fmr pool %p\n",
270 ib_conn, ib_conn->fmr.pool);
272 if (ib_conn->fmr.pool != NULL)
273 ib_destroy_fmr_pool(ib_conn->fmr.pool);
275 ib_conn->fmr.pool = NULL;
277 kfree(ib_conn->fmr.page_vec);
278 ib_conn->fmr.page_vec = NULL;
282 iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
283 bool pi_enable, struct fast_reg_descriptor *desc)
287 desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
288 ISCSI_ISER_SG_TABLESIZE + 1);
289 if (IS_ERR(desc->data_frpl)) {
290 ret = PTR_ERR(desc->data_frpl);
291 iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
293 return PTR_ERR(desc->data_frpl);
296 desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
297 if (IS_ERR(desc->data_mr)) {
298 ret = PTR_ERR(desc->data_mr);
299 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
300 goto fast_reg_mr_failure;
302 desc->reg_indicators |= ISER_DATA_KEY_VALID;
305 struct ib_mr_init_attr mr_init_attr = {0};
306 struct iser_pi_context *pi_ctx = NULL;
308 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
310 iser_err("Failed to allocate pi context\n");
312 goto pi_ctx_alloc_failure;
314 pi_ctx = desc->pi_ctx;
316 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
317 ISCSI_ISER_SG_TABLESIZE);
318 if (IS_ERR(pi_ctx->prot_frpl)) {
319 ret = PTR_ERR(pi_ctx->prot_frpl);
320 iser_err("Failed to allocate prot frpl ret=%d\n",
322 goto prot_frpl_failure;
325 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
326 ISCSI_ISER_SG_TABLESIZE + 1);
327 if (IS_ERR(pi_ctx->prot_mr)) {
328 ret = PTR_ERR(pi_ctx->prot_mr);
329 iser_err("Failed to allocate prot frmr ret=%d\n",
331 goto prot_mr_failure;
333 desc->reg_indicators |= ISER_PROT_KEY_VALID;
335 mr_init_attr.max_reg_descriptors = 2;
336 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
337 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
338 if (IS_ERR(pi_ctx->sig_mr)) {
339 ret = PTR_ERR(pi_ctx->sig_mr);
340 iser_err("Failed to allocate signature enabled mr err=%d\n",
344 desc->reg_indicators |= ISER_SIG_KEY_VALID;
346 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
348 iser_dbg("Create fr_desc %p page_list %p\n",
349 desc, desc->data_frpl->page_list);
353 ib_dereg_mr(desc->pi_ctx->prot_mr);
355 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
358 pi_ctx_alloc_failure:
359 ib_dereg_mr(desc->data_mr);
361 ib_free_fast_reg_page_list(desc->data_frpl);
367 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
368 * for fast registration work requests.
369 * returns 0 on success, or errno code on failure
371 int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
373 struct iser_device *device = ib_conn->device;
374 struct fast_reg_descriptor *desc;
377 INIT_LIST_HEAD(&ib_conn->fastreg.pool);
378 ib_conn->fastreg.pool_size = 0;
379 for (i = 0; i < cmds_max; i++) {
380 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
382 iser_err("Failed to allocate a new fast_reg descriptor\n");
387 ret = iser_create_fastreg_desc(device->ib_device, device->pd,
388 ib_conn->pi_support, desc);
390 iser_err("Failed to create fastreg descriptor err=%d\n",
396 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
397 ib_conn->fastreg.pool_size++;
403 iser_free_fastreg_pool(ib_conn);
408 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
410 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
412 struct fast_reg_descriptor *desc, *tmp;
415 if (list_empty(&ib_conn->fastreg.pool))
418 iser_info("freeing conn %p fr pool\n", ib_conn);
420 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
421 list_del(&desc->list);
422 ib_free_fast_reg_page_list(desc->data_frpl);
423 ib_dereg_mr(desc->data_mr);
425 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
426 ib_dereg_mr(desc->pi_ctx->prot_mr);
427 ib_destroy_mr(desc->pi_ctx->sig_mr);
434 if (i < ib_conn->fastreg.pool_size)
435 iser_warn("pool still has %d regions registered\n",
436 ib_conn->fastreg.pool_size - i);
440 * iser_create_ib_conn_res - Queue-Pair (QP)
442 * returns 0 on success, -1 on failure
444 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
446 struct iser_device *device;
447 struct ib_qp_init_attr init_attr;
449 int index, min_index = 0;
451 BUG_ON(ib_conn->device == NULL);
453 device = ib_conn->device;
455 memset(&init_attr, 0, sizeof init_attr);
457 mutex_lock(&ig.connlist_mutex);
458 /* select the CQ with the minimal number of usages */
459 for (index = 0; index < device->cqs_used; index++)
460 if (device->cq_active_qps[index] <
461 device->cq_active_qps[min_index])
463 device->cq_active_qps[min_index]++;
464 ib_conn->cq_index = min_index;
465 mutex_unlock(&ig.connlist_mutex);
466 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
468 init_attr.event_handler = iser_qp_event_callback;
469 init_attr.qp_context = (void *)ib_conn;
470 init_attr.send_cq = device->tx_cq[min_index];
471 init_attr.recv_cq = device->rx_cq[min_index];
472 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
473 init_attr.cap.max_send_sge = 2;
474 init_attr.cap.max_recv_sge = 1;
475 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
476 init_attr.qp_type = IB_QPT_RC;
477 if (ib_conn->pi_support) {
478 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS;
479 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
481 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
484 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
488 ib_conn->qp = ib_conn->cma_id->qp;
489 iser_info("setting conn %p cma_id %p qp %p\n",
490 ib_conn, ib_conn->cma_id,
491 ib_conn->cma_id->qp);
495 iser_err("unable to alloc mem or create resource, err %d\n", ret);
500 * based on the resolved device node GUID see if there already allocated
501 * device for this device. If there's no such, create one.
504 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
506 struct iser_device *device;
508 mutex_lock(&ig.device_list_mutex);
510 list_for_each_entry(device, &ig.device_list, ig_list)
511 /* find if there's a match using the node GUID */
512 if (device->ib_device->node_guid == cma_id->device->node_guid)
515 device = kzalloc(sizeof *device, GFP_KERNEL);
519 /* assign this device to the device */
520 device->ib_device = cma_id->device;
521 /* init the device and link it into ig device list */
522 if (iser_create_device_ib_res(device)) {
527 list_add(&device->ig_list, &ig.device_list);
532 mutex_unlock(&ig.device_list_mutex);
536 /* if there's no demand for this device, release it */
537 static void iser_device_try_release(struct iser_device *device)
539 mutex_lock(&ig.device_list_mutex);
541 iser_info("device %p refcount %d\n", device, device->refcount);
542 if (!device->refcount) {
543 iser_free_device_ib_res(device);
544 list_del(&device->ig_list);
547 mutex_unlock(&ig.device_list_mutex);
551 * Called with state mutex held
553 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
554 enum iser_conn_state comp,
555 enum iser_conn_state exch)
559 ret = (iser_conn->state == comp);
561 iser_conn->state = exch;
566 void iser_release_work(struct work_struct *work)
568 struct iser_conn *iser_conn;
570 iser_conn = container_of(work, struct iser_conn, release_work);
572 /* Wait for conn_stop to complete */
573 wait_for_completion(&iser_conn->stop_completion);
574 /* Wait for IB resouces cleanup to complete */
575 wait_for_completion(&iser_conn->ib_completion);
577 mutex_lock(&iser_conn->state_mutex);
578 iser_conn->state = ISER_CONN_DOWN;
579 mutex_unlock(&iser_conn->state_mutex);
581 iser_conn_release(iser_conn);
585 * iser_free_ib_conn_res - release IB related resources
586 * @iser_conn: iser connection struct
587 * @destroy_device: indicator if we need to try to release
588 * the iser device (only iscsi shutdown and DEVICE_REMOVAL
591 * This routine is called with the iser state mutex held
592 * so the cm_id removal is out of here. It is Safe to
593 * be invoked multiple times.
595 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
598 struct ib_conn *ib_conn = &iser_conn->ib_conn;
599 struct iser_device *device = ib_conn->device;
601 iser_info("freeing conn %p cma_id %p qp %p\n",
602 iser_conn, ib_conn->cma_id, ib_conn->qp);
604 iser_free_rx_descriptors(iser_conn);
606 if (ib_conn->qp != NULL) {
607 ib_conn->device->cq_active_qps[ib_conn->cq_index]--;
608 rdma_destroy_qp(ib_conn->cma_id);
612 if (destroy_device && device != NULL) {
613 iser_device_try_release(device);
614 ib_conn->device = NULL;
619 * Frees all conn objects and deallocs conn descriptor
621 void iser_conn_release(struct iser_conn *iser_conn)
623 struct ib_conn *ib_conn = &iser_conn->ib_conn;
625 mutex_lock(&ig.connlist_mutex);
626 list_del(&iser_conn->conn_list);
627 mutex_unlock(&ig.connlist_mutex);
629 mutex_lock(&iser_conn->state_mutex);
630 BUG_ON(iser_conn->state != ISER_CONN_DOWN);
632 * In case we never got to bind stage, we still need to
633 * release IB resources (which is safe to call more than once).
635 iser_free_ib_conn_res(iser_conn, true);
636 mutex_unlock(&iser_conn->state_mutex);
638 if (ib_conn->cma_id != NULL) {
639 rdma_destroy_id(ib_conn->cma_id);
640 ib_conn->cma_id = NULL;
647 * iser_poll_for_flush_errors - Don't settle for less than all.
648 * @struct ib_conn: IB context of the connection
650 * This routine is called when the QP is in error state
651 * It polls the send CQ until all flush errors are consumed and
652 * returns when all flush errors were processed.
654 static void iser_poll_for_flush_errors(struct ib_conn *ib_conn)
656 struct iser_device *device = ib_conn->device;
659 while (ib_conn->post_recv_buf_count > 0 ||
660 atomic_read(&ib_conn->post_send_buf_count) > 0) {
662 if (atomic_read(&ib_conn->post_send_buf_count) > 0)
663 iser_drain_tx_cq(device, ib_conn->cq_index);
666 /* Don't flood with prints */
668 iser_dbg("post_recv %d post_send %d",
669 ib_conn->post_recv_buf_count,
670 atomic_read(&ib_conn->post_send_buf_count));
675 * triggers start of the disconnect procedures and wait for them to be done
676 * Called with state mutex held
678 int iser_conn_terminate(struct iser_conn *iser_conn)
680 struct ib_conn *ib_conn = &iser_conn->ib_conn;
683 /* terminate the iser conn only if the conn state is UP */
684 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
685 ISER_CONN_TERMINATING))
688 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
690 /* suspend queuing of new iscsi commands */
691 if (iser_conn->iscsi_conn)
692 iscsi_suspend_queue(iser_conn->iscsi_conn);
695 * In case we didn't already clean up the cma_id (peer initiated
696 * a disconnection), we need to Cause the CMA to change the QP
699 if (ib_conn->cma_id) {
700 err = rdma_disconnect(ib_conn->cma_id);
702 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
705 iser_poll_for_flush_errors(ib_conn);
712 * Called with state mutex held
714 static void iser_connect_error(struct rdma_cm_id *cma_id)
716 struct iser_conn *iser_conn;
718 iser_conn = (struct iser_conn *)cma_id->context;
719 iser_conn->state = ISER_CONN_DOWN;
723 * Called with state mutex held
725 static void iser_addr_handler(struct rdma_cm_id *cma_id)
727 struct iser_device *device;
728 struct iser_conn *iser_conn;
729 struct ib_conn *ib_conn;
732 iser_conn = (struct iser_conn *)cma_id->context;
733 if (iser_conn->state != ISER_CONN_PENDING)
737 ib_conn = &iser_conn->ib_conn;
738 device = iser_device_find_by_ib_device(cma_id);
740 iser_err("device lookup/creation failed\n");
741 iser_connect_error(cma_id);
745 ib_conn->device = device;
747 /* connection T10-PI support */
748 if (iser_pi_enable) {
749 if (!(device->dev_attr.device_cap_flags &
750 IB_DEVICE_SIGNATURE_HANDOVER)) {
751 iser_warn("T10-PI requested but not supported on %s, "
752 "continue without T10-PI\n",
753 ib_conn->device->ib_device->name);
754 ib_conn->pi_support = false;
756 ib_conn->pi_support = true;
760 ret = rdma_resolve_route(cma_id, 1000);
762 iser_err("resolve route failed: %d\n", ret);
763 iser_connect_error(cma_id);
769 * Called with state mutex held
771 static void iser_route_handler(struct rdma_cm_id *cma_id)
773 struct rdma_conn_param conn_param;
775 struct iser_cm_hdr req_hdr;
776 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
777 struct ib_conn *ib_conn = &iser_conn->ib_conn;
778 struct iser_device *device = ib_conn->device;
780 if (iser_conn->state != ISER_CONN_PENDING)
784 ret = iser_create_ib_conn_res(ib_conn);
788 memset(&conn_param, 0, sizeof conn_param);
789 conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
790 conn_param.initiator_depth = 1;
791 conn_param.retry_count = 7;
792 conn_param.rnr_retry_count = 6;
794 memset(&req_hdr, 0, sizeof(req_hdr));
795 req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
796 ISER_SEND_W_INV_NOT_SUPPORTED);
797 conn_param.private_data = (void *)&req_hdr;
798 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
800 ret = rdma_connect(cma_id, &conn_param);
802 iser_err("failure connecting: %d\n", ret);
808 iser_connect_error(cma_id);
811 static void iser_connected_handler(struct rdma_cm_id *cma_id)
813 struct iser_conn *iser_conn;
814 struct ib_qp_attr attr;
815 struct ib_qp_init_attr init_attr;
817 iser_conn = (struct iser_conn *)cma_id->context;
818 if (iser_conn->state != ISER_CONN_PENDING)
822 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
823 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
825 iser_conn->state = ISER_CONN_UP;
826 complete(&iser_conn->up_completion);
829 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
831 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
833 if (iser_conn_terminate(iser_conn)) {
834 if (iser_conn->iscsi_conn)
835 iscsi_conn_failure(iser_conn->iscsi_conn,
836 ISCSI_ERR_CONN_FAILED);
838 iser_err("iscsi_iser connection isn't bound\n");
842 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
845 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
848 * We are not guaranteed that we visited disconnected_handler
849 * by now, call it here to be safe that we handle CM drep
852 iser_disconnected_handler(cma_id);
853 iser_free_ib_conn_res(iser_conn, destroy_device);
854 complete(&iser_conn->ib_completion);
857 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
859 struct iser_conn *iser_conn;
862 iser_conn = (struct iser_conn *)cma_id->context;
863 iser_info("event %d status %d conn %p id %p\n",
864 event->event, event->status, cma_id->context, cma_id);
866 mutex_lock(&iser_conn->state_mutex);
867 switch (event->event) {
868 case RDMA_CM_EVENT_ADDR_RESOLVED:
869 iser_addr_handler(cma_id);
871 case RDMA_CM_EVENT_ROUTE_RESOLVED:
872 iser_route_handler(cma_id);
874 case RDMA_CM_EVENT_ESTABLISHED:
875 iser_connected_handler(cma_id);
877 case RDMA_CM_EVENT_ADDR_ERROR:
878 case RDMA_CM_EVENT_ROUTE_ERROR:
879 case RDMA_CM_EVENT_CONNECT_ERROR:
880 case RDMA_CM_EVENT_UNREACHABLE:
881 case RDMA_CM_EVENT_REJECTED:
882 iser_connect_error(cma_id);
884 case RDMA_CM_EVENT_DISCONNECTED:
885 case RDMA_CM_EVENT_ADDR_CHANGE:
886 iser_disconnected_handler(cma_id);
888 case RDMA_CM_EVENT_DEVICE_REMOVAL:
890 * we *must* destroy the device as we cannot rely
891 * on iscsid to be around to initiate error handling.
892 * also implicitly destroy the cma_id.
894 iser_cleanup_handler(cma_id, true);
895 iser_conn->ib_conn.cma_id = NULL;
898 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
899 iser_cleanup_handler(cma_id, false);
902 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
905 mutex_unlock(&iser_conn->state_mutex);
910 void iser_conn_init(struct iser_conn *iser_conn)
912 iser_conn->state = ISER_CONN_INIT;
913 iser_conn->ib_conn.post_recv_buf_count = 0;
914 atomic_set(&iser_conn->ib_conn.post_send_buf_count, 0);
915 init_completion(&iser_conn->stop_completion);
916 init_completion(&iser_conn->ib_completion);
917 init_completion(&iser_conn->up_completion);
918 INIT_LIST_HEAD(&iser_conn->conn_list);
919 spin_lock_init(&iser_conn->ib_conn.lock);
920 mutex_init(&iser_conn->state_mutex);
924 * starts the process of connecting to the target
925 * sleeps until the connection is established or rejected
927 int iser_connect(struct iser_conn *iser_conn,
928 struct sockaddr *src_addr,
929 struct sockaddr *dst_addr,
932 struct ib_conn *ib_conn = &iser_conn->ib_conn;
935 mutex_lock(&iser_conn->state_mutex);
937 sprintf(iser_conn->name, "%pISp", dst_addr);
939 iser_info("connecting to: %s\n", iser_conn->name);
941 /* the device is known only --after-- address resolution */
942 ib_conn->device = NULL;
944 iser_conn->state = ISER_CONN_PENDING;
946 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
948 RDMA_PS_TCP, IB_QPT_RC);
949 if (IS_ERR(ib_conn->cma_id)) {
950 err = PTR_ERR(ib_conn->cma_id);
951 iser_err("rdma_create_id failed: %d\n", err);
955 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
957 iser_err("rdma_resolve_addr failed: %d\n", err);
962 wait_for_completion_interruptible(&iser_conn->up_completion);
964 if (iser_conn->state != ISER_CONN_UP) {
966 goto connect_failure;
969 mutex_unlock(&iser_conn->state_mutex);
971 mutex_lock(&ig.connlist_mutex);
972 list_add(&iser_conn->conn_list, &ig.connlist);
973 mutex_unlock(&ig.connlist_mutex);
977 ib_conn->cma_id = NULL;
979 iser_conn->state = ISER_CONN_DOWN;
981 mutex_unlock(&iser_conn->state_mutex);
982 iser_conn_release(iser_conn);
987 * iser_reg_page_vec - Register physical memory
989 * returns: 0 on success, errno code on failure
991 int iser_reg_page_vec(struct ib_conn *ib_conn,
992 struct iser_page_vec *page_vec,
993 struct iser_mem_reg *mem_reg)
995 struct ib_pool_fmr *mem;
1000 page_list = page_vec->pages;
1001 io_addr = page_list[0];
1003 mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
1009 status = (int)PTR_ERR(mem);
1010 iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
1014 mem_reg->lkey = mem->fmr->lkey;
1015 mem_reg->rkey = mem->fmr->rkey;
1016 mem_reg->len = page_vec->length * SIZE_4K;
1017 mem_reg->va = io_addr;
1019 mem_reg->mem_h = (void *)mem;
1021 mem_reg->va += page_vec->offset;
1022 mem_reg->len = page_vec->data_size;
1024 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
1025 "entry[0]: (0x%08lx,%ld)] -> "
1026 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
1027 page_vec, page_vec->length,
1028 (unsigned long)page_vec->pages[0],
1029 (unsigned long)page_vec->data_size,
1030 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
1031 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
1036 * Unregister (previosuly registered using FMR) memory.
1037 * If memory is non-FMR does nothing.
1039 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
1040 enum iser_data_dir cmd_dir)
1042 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1048 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
1050 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
1052 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
1057 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
1058 enum iser_data_dir cmd_dir)
1060 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1061 struct iser_conn *iser_conn = iser_task->iser_conn;
1062 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1063 struct fast_reg_descriptor *desc = reg->mem_h;
1070 spin_lock_bh(&ib_conn->lock);
1071 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
1072 spin_unlock_bh(&ib_conn->lock);
1075 int iser_post_recvl(struct iser_conn *iser_conn)
1077 struct ib_recv_wr rx_wr, *rx_wr_failed;
1078 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1082 sge.addr = iser_conn->login_resp_dma;
1083 sge.length = ISER_RX_LOGIN_SIZE;
1084 sge.lkey = ib_conn->device->mr->lkey;
1086 rx_wr.wr_id = (unsigned long)iser_conn->login_resp_buf;
1087 rx_wr.sg_list = &sge;
1091 ib_conn->post_recv_buf_count++;
1092 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1094 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1095 ib_conn->post_recv_buf_count--;
1100 int iser_post_recvm(struct iser_conn *iser_conn, int count)
1102 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1104 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1105 unsigned int my_rx_head = iser_conn->rx_desc_head;
1106 struct iser_rx_desc *rx_desc;
1108 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1109 rx_desc = &iser_conn->rx_descs[my_rx_head];
1110 rx_wr->wr_id = (unsigned long)rx_desc;
1111 rx_wr->sg_list = &rx_desc->rx_sg;
1113 rx_wr->next = rx_wr + 1;
1114 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1118 rx_wr->next = NULL; /* mark end of work requests list */
1120 ib_conn->post_recv_buf_count += count;
1121 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1123 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1124 ib_conn->post_recv_buf_count -= count;
1126 iser_conn->rx_desc_head = my_rx_head;
1132 * iser_start_send - Initiate a Send DTO operation
1134 * returns 0 on success, -1 on failure
1136 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
1139 struct ib_send_wr send_wr, *send_wr_failed;
1141 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1142 tx_desc->dma_addr, ISER_HEADERS_LEN,
1145 send_wr.next = NULL;
1146 send_wr.wr_id = (unsigned long)tx_desc;
1147 send_wr.sg_list = tx_desc->tx_sg;
1148 send_wr.num_sge = tx_desc->num_sge;
1149 send_wr.opcode = IB_WR_SEND;
1150 send_wr.send_flags = IB_SEND_SIGNALED;
1152 atomic_inc(&ib_conn->post_send_buf_count);
1154 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1156 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1157 atomic_dec(&ib_conn->post_send_buf_count);
1163 * iser_handle_comp_error() - Handle error completion
1164 * @desc: iser TX descriptor
1165 * @ib_conn: connection RDMA resources
1166 * @wc: work completion
1168 * Notes: We may handle a FLUSH error completion and in this case
1169 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1170 * error completion we should also notify iscsi layer that
1171 * connection is failed (in case we passed bind stage).
1174 iser_handle_comp_error(struct iser_tx_desc *desc,
1175 struct ib_conn *ib_conn,
1178 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
1181 if (wc->status != IB_WC_WR_FLUSH_ERR)
1182 if (iser_conn->iscsi_conn)
1183 iscsi_conn_failure(iser_conn->iscsi_conn,
1184 ISCSI_ERR_CONN_FAILED);
1186 if (desc && desc->type == ISCSI_TX_DATAOUT)
1187 kmem_cache_free(ig.desc_cache, desc);
1190 static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
1192 struct ib_cq *cq = device->tx_cq[cq_index];
1194 struct iser_tx_desc *tx_desc;
1195 struct ib_conn *ib_conn;
1196 int completed_tx = 0;
1198 while (ib_poll_cq(cq, 1, &wc) == 1) {
1199 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
1200 ib_conn = wc.qp->qp_context;
1201 if (wc.status == IB_WC_SUCCESS) {
1202 if (wc.opcode == IB_WC_SEND)
1203 iser_snd_completion(tx_desc, ib_conn);
1205 iser_err("expected opcode %d got %d\n",
1206 IB_WC_SEND, wc.opcode);
1208 iser_err("tx id %llx status %d vend_err %x\n",
1209 wc.wr_id, wc.status, wc.vendor_err);
1210 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
1211 atomic_dec(&ib_conn->post_send_buf_count);
1212 iser_handle_comp_error(tx_desc, ib_conn, &wc);
1217 return completed_tx;
1221 static void iser_cq_tasklet_fn(unsigned long data)
1223 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
1224 struct iser_device *device = cq_desc->device;
1225 int cq_index = cq_desc->cq_index;
1226 struct ib_cq *cq = device->rx_cq[cq_index];
1228 struct iser_rx_desc *desc;
1229 unsigned long xfer_len;
1230 struct ib_conn *ib_conn;
1231 int completed_tx, completed_rx = 0;
1233 /* First do tx drain, so in a case where we have rx flushes and a successful
1234 * tx completion we will still go through completion error handling.
1236 completed_tx = iser_drain_tx_cq(device, cq_index);
1238 while (ib_poll_cq(cq, 1, &wc) == 1) {
1239 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
1240 BUG_ON(desc == NULL);
1241 ib_conn = wc.qp->qp_context;
1242 if (wc.status == IB_WC_SUCCESS) {
1243 if (wc.opcode == IB_WC_RECV) {
1244 xfer_len = (unsigned long)wc.byte_len;
1245 iser_rcv_completion(desc, xfer_len, ib_conn);
1247 iser_err("expected opcode %d got %d\n",
1248 IB_WC_RECV, wc.opcode);
1250 if (wc.status != IB_WC_WR_FLUSH_ERR)
1251 iser_err("rx id %llx status %d vend_err %x\n",
1252 wc.wr_id, wc.status, wc.vendor_err);
1253 ib_conn->post_recv_buf_count--;
1254 iser_handle_comp_error(NULL, ib_conn, &wc);
1257 if (!(completed_rx & 63))
1258 completed_tx += iser_drain_tx_cq(device, cq_index);
1260 /* #warning "it is assumed here that arming CQ only once its empty" *
1261 * " would not cause interrupts to be missed" */
1262 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1264 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
1267 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
1269 struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
1270 struct iser_device *device = cq_desc->device;
1271 int cq_index = cq_desc->cq_index;
1273 tasklet_schedule(&device->cq_tasklet[cq_index]);
1276 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1277 enum iser_data_dir cmd_dir, sector_t *sector)
1279 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
1280 struct fast_reg_descriptor *desc = reg->mem_h;
1281 unsigned long sector_size = iser_task->sc->device->sector_size;
1282 struct ib_mr_status mr_status;
1285 if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
1286 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
1287 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1288 IB_MR_CHECK_SIG_STATUS, &mr_status);
1290 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1294 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1295 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1297 do_div(sector_off, sector_size + 8);
1298 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1300 pr_err("PI error found type %d at sector %llx "
1301 "expected %x vs actual %x\n",
1302 mr_status.sig_err.err_type,
1303 (unsigned long long)*sector,
1304 mr_status.sig_err.expected,
1305 mr_status.sig_err.actual);
1307 switch (mr_status.sig_err.err_type) {
1308 case IB_SIG_BAD_GUARD:
1310 case IB_SIG_BAD_REFTAG:
1312 case IB_SIG_BAD_APPTAG:
1320 /* Not alot we can do here, return ambiguous guard error */