2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Encapsulates the major functions managing:
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <linux/prefetch.h>
53 #include <linux/sunrpc/addr.h>
54 #include <asm/bitops.h>
55 #include <linux/module.h> /* try_module_get()/module_put() */
57 #include "xprt_rdma.h"
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_TRANS
71 static struct workqueue_struct *rpcrdma_receive_wq;
74 rpcrdma_alloc_wq(void)
76 struct workqueue_struct *recv_wq;
78 recv_wq = alloc_workqueue("xprtrdma_receive",
79 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI,
84 rpcrdma_receive_wq = recv_wq;
89 rpcrdma_destroy_wq(void)
91 struct workqueue_struct *wq;
93 if (rpcrdma_receive_wq) {
94 wq = rpcrdma_receive_wq;
95 rpcrdma_receive_wq = NULL;
96 destroy_workqueue(wq);
101 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
103 struct rpcrdma_ep *ep = context;
105 pr_err("RPC: %s: %s on device %s ep %p\n",
106 __func__, ib_event_msg(event->event),
107 event->device->name, context);
108 if (ep->rep_connected == 1) {
109 ep->rep_connected = -EIO;
110 rpcrdma_conn_func(ep);
111 wake_up_all(&ep->rep_connect_wait);
116 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
117 * @cq: completion queue (ignored)
122 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
124 /* WARNING: Only wr_cqe and status are reliable at this point */
125 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
126 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
127 ib_wc_status_msg(wc->status),
128 wc->status, wc->vendor_err);
132 rpcrdma_receive_worker(struct work_struct *work)
134 struct rpcrdma_rep *rep =
135 container_of(work, struct rpcrdma_rep, rr_work);
137 rpcrdma_reply_handler(rep);
140 /* Perform basic sanity checking to avoid using garbage
141 * to update the credit grant value.
144 rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
146 struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
147 struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
150 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
153 credits = be32_to_cpu(rmsgp->rm_credit);
155 credits = 1; /* don't deadlock */
156 else if (credits > buffer->rb_max_requests)
157 credits = buffer->rb_max_requests;
159 atomic_set(&buffer->rb_credits, credits);
163 * rpcrdma_receive_wc - Invoked by RDMA provider for each polled Receive WC
164 * @cq: completion queue (ignored)
169 rpcrdma_receive_wc(struct ib_cq *cq, struct ib_wc *wc)
171 struct ib_cqe *cqe = wc->wr_cqe;
172 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
175 /* WARNING: Only wr_id and status are reliable at this point */
176 if (wc->status != IB_WC_SUCCESS)
179 /* status == SUCCESS means all fields in wc are trustworthy */
180 if (wc->opcode != IB_WC_RECV)
183 dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
184 __func__, rep, wc->byte_len);
186 rep->rr_len = wc->byte_len;
187 ib_dma_sync_single_for_cpu(rep->rr_device,
188 rdmab_addr(rep->rr_rdmabuf),
189 rep->rr_len, DMA_FROM_DEVICE);
191 rpcrdma_update_granted_credits(rep);
194 queue_work(rpcrdma_receive_wq, &rep->rr_work);
198 if (wc->status != IB_WC_WR_FLUSH_ERR)
199 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
200 ib_wc_status_msg(wc->status),
201 wc->status, wc->vendor_err);
202 rep->rr_len = RPCRDMA_BAD_LEN;
207 rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
211 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
212 rpcrdma_receive_wc(NULL, &wc);
216 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
218 struct rpcrdma_xprt *xprt = id->context;
219 struct rpcrdma_ia *ia = &xprt->rx_ia;
220 struct rpcrdma_ep *ep = &xprt->rx_ep;
221 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
222 struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
224 struct ib_qp_attr *attr = &ia->ri_qp_attr;
225 struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
228 switch (event->event) {
229 case RDMA_CM_EVENT_ADDR_RESOLVED:
230 case RDMA_CM_EVENT_ROUTE_RESOLVED:
232 complete(&ia->ri_done);
234 case RDMA_CM_EVENT_ADDR_ERROR:
235 ia->ri_async_rc = -EHOSTUNREACH;
236 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
238 complete(&ia->ri_done);
240 case RDMA_CM_EVENT_ROUTE_ERROR:
241 ia->ri_async_rc = -ENETUNREACH;
242 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
244 complete(&ia->ri_done);
246 case RDMA_CM_EVENT_ESTABLISHED:
248 ib_query_qp(ia->ri_id->qp, attr,
249 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
251 dprintk("RPC: %s: %d responder resources"
253 __func__, attr->max_dest_rd_atomic,
254 attr->max_rd_atomic);
256 case RDMA_CM_EVENT_CONNECT_ERROR:
257 connstate = -ENOTCONN;
259 case RDMA_CM_EVENT_UNREACHABLE:
260 connstate = -ENETDOWN;
262 case RDMA_CM_EVENT_REJECTED:
263 connstate = -ECONNREFUSED;
265 case RDMA_CM_EVENT_DISCONNECTED:
266 connstate = -ECONNABORTED;
268 case RDMA_CM_EVENT_DEVICE_REMOVAL:
271 dprintk("RPC: %s: %sconnected\n",
272 __func__, connstate > 0 ? "" : "dis");
273 atomic_set(&xprt->rx_buf.rb_credits, 1);
274 ep->rep_connected = connstate;
275 rpcrdma_conn_func(ep);
276 wake_up_all(&ep->rep_connect_wait);
279 dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n",
280 __func__, sap, rpc_get_port(sap), ep,
281 rdma_event_msg(event->event));
285 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
286 if (connstate == 1) {
287 int ird = attr->max_dest_rd_atomic;
288 int tird = ep->rep_remote_cma.responder_resources;
290 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
291 sap, rpc_get_port(sap),
293 ia->ri_ops->ro_displayname,
294 xprt->rx_buf.rb_max_requests,
295 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
296 } else if (connstate < 0) {
297 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
298 sap, rpc_get_port(sap), connstate);
305 static void rpcrdma_destroy_id(struct rdma_cm_id *id)
308 module_put(id->device->owner);
313 static struct rdma_cm_id *
314 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
315 struct rpcrdma_ia *ia, struct sockaddr *addr)
317 struct rdma_cm_id *id;
320 init_completion(&ia->ri_done);
322 id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
326 dprintk("RPC: %s: rdma_create_id() failed %i\n",
331 ia->ri_async_rc = -ETIMEDOUT;
332 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
334 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
338 wait_for_completion_interruptible_timeout(&ia->ri_done,
339 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
342 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
343 * be pinned while there are active NFS/RDMA mounts to prevent
344 * hangs and crashes at umount time.
346 if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
347 dprintk("RPC: %s: Failed to get device module\n",
349 ia->ri_async_rc = -ENODEV;
351 rc = ia->ri_async_rc;
355 ia->ri_async_rc = -ETIMEDOUT;
356 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
358 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
362 wait_for_completion_interruptible_timeout(&ia->ri_done,
363 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
364 rc = ia->ri_async_rc;
370 module_put(id->device->owner);
377 * Drain any cq, prior to teardown.
380 rpcrdma_clean_cq(struct ib_cq *cq)
385 while (1 == ib_poll_cq(cq, 1, &wc))
389 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
390 __func__, count, wc.opcode);
394 * Exported functions.
398 * Open and initialize an Interface Adapter.
399 * o initializes fields of struct rpcrdma_ia, including
400 * interface and provider attributes and protection zone.
403 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
405 struct rpcrdma_ia *ia = &xprt->rx_ia;
408 ia->ri_dma_mr = NULL;
410 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
411 if (IS_ERR(ia->ri_id)) {
412 rc = PTR_ERR(ia->ri_id);
415 ia->ri_device = ia->ri_id->device;
417 ia->ri_pd = ib_alloc_pd(ia->ri_device);
418 if (IS_ERR(ia->ri_pd)) {
419 rc = PTR_ERR(ia->ri_pd);
420 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
425 if (memreg == RPCRDMA_FRMR) {
426 if (!(ia->ri_device->attrs.device_cap_flags &
427 IB_DEVICE_MEM_MGT_EXTENSIONS) ||
428 (ia->ri_device->attrs.max_fast_reg_page_list_len == 0)) {
429 dprintk("RPC: %s: FRMR registration "
430 "not supported by HCA\n", __func__);
431 memreg = RPCRDMA_MTHCAFMR;
434 if (memreg == RPCRDMA_MTHCAFMR) {
435 if (!ia->ri_device->alloc_fmr) {
436 dprintk("RPC: %s: MTHCAFMR registration "
437 "not supported by HCA\n", __func__);
445 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
447 case RPCRDMA_ALLPHYSICAL:
448 ia->ri_ops = &rpcrdma_physical_memreg_ops;
450 case RPCRDMA_MTHCAFMR:
451 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
454 printk(KERN_ERR "RPC: Unsupported memory "
455 "registration mode: %d\n", memreg);
459 dprintk("RPC: %s: memory registration strategy is '%s'\n",
460 __func__, ia->ri_ops->ro_displayname);
462 rwlock_init(&ia->ri_qplock);
466 ib_dealloc_pd(ia->ri_pd);
469 rpcrdma_destroy_id(ia->ri_id);
476 * Clean up/close an IA.
477 * o if event handles and PD have been initialized, free them.
481 rpcrdma_ia_close(struct rpcrdma_ia *ia)
483 dprintk("RPC: %s: entering\n", __func__);
484 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
486 rdma_destroy_qp(ia->ri_id);
487 rpcrdma_destroy_id(ia->ri_id);
491 /* If the pd is still busy, xprtrdma missed freeing a resource */
492 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
493 ib_dealloc_pd(ia->ri_pd);
497 * Create unconnected endpoint.
500 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
501 struct rpcrdma_create_data_internal *cdata)
503 struct ib_cq *sendcq, *recvcq;
504 unsigned int max_qp_wr;
507 if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) {
508 dprintk("RPC: %s: insufficient sge's available\n",
513 if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
514 dprintk("RPC: %s: insufficient wqe's available\n",
518 max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS;
520 /* check provider's send/recv wr limits */
521 if (cdata->max_requests > max_qp_wr)
522 cdata->max_requests = max_qp_wr;
524 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
525 ep->rep_attr.qp_context = ep;
526 ep->rep_attr.srq = NULL;
527 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
528 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
529 rc = ia->ri_ops->ro_open(ia, ep, cdata);
532 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
533 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
534 ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
535 ep->rep_attr.cap.max_recv_sge = 1;
536 ep->rep_attr.cap.max_inline_data = 0;
537 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
538 ep->rep_attr.qp_type = IB_QPT_RC;
539 ep->rep_attr.port_num = ~0;
541 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
542 "iovs: send %d recv %d\n",
544 ep->rep_attr.cap.max_send_wr,
545 ep->rep_attr.cap.max_recv_wr,
546 ep->rep_attr.cap.max_send_sge,
547 ep->rep_attr.cap.max_recv_sge);
549 /* set trigger for requesting send completion */
550 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
551 if (ep->rep_cqinit <= 2)
552 ep->rep_cqinit = 0; /* always signal? */
554 init_waitqueue_head(&ep->rep_connect_wait);
555 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
557 sendcq = ib_alloc_cq(ia->ri_device, NULL,
558 ep->rep_attr.cap.max_send_wr + 1,
560 if (IS_ERR(sendcq)) {
561 rc = PTR_ERR(sendcq);
562 dprintk("RPC: %s: failed to create send CQ: %i\n",
567 recvcq = ib_alloc_cq(ia->ri_device, NULL,
568 ep->rep_attr.cap.max_recv_wr + 1,
570 if (IS_ERR(recvcq)) {
571 rc = PTR_ERR(recvcq);
572 dprintk("RPC: %s: failed to create recv CQ: %i\n",
577 ep->rep_attr.send_cq = sendcq;
578 ep->rep_attr.recv_cq = recvcq;
580 /* Initialize cma parameters */
582 /* RPC/RDMA does not use private data */
583 ep->rep_remote_cma.private_data = NULL;
584 ep->rep_remote_cma.private_data_len = 0;
586 /* Client offers RDMA Read but does not initiate */
587 ep->rep_remote_cma.initiator_depth = 0;
588 if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
589 ep->rep_remote_cma.responder_resources = 32;
591 ep->rep_remote_cma.responder_resources =
592 ia->ri_device->attrs.max_qp_rd_atom;
594 ep->rep_remote_cma.retry_count = 7;
595 ep->rep_remote_cma.flow_control = 0;
596 ep->rep_remote_cma.rnr_retry_count = 0;
604 ib_dereg_mr(ia->ri_dma_mr);
611 * Disconnect and destroy endpoint. After this, the only
612 * valid operations on the ep are to free it (if dynamically
613 * allocated) or re-create it.
616 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
620 dprintk("RPC: %s: entering, connected is %d\n",
621 __func__, ep->rep_connected);
623 cancel_delayed_work_sync(&ep->rep_connect_worker);
626 rpcrdma_ep_disconnect(ep, ia);
628 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
629 rpcrdma_clean_cq(ep->rep_attr.send_cq);
632 rdma_destroy_qp(ia->ri_id);
633 ia->ri_id->qp = NULL;
636 ib_free_cq(ep->rep_attr.recv_cq);
637 ib_free_cq(ep->rep_attr.send_cq);
640 rc = ib_dereg_mr(ia->ri_dma_mr);
641 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
647 * Connect unconnected endpoint.
650 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
652 struct rdma_cm_id *id, *old;
656 if (ep->rep_connected != 0) {
657 struct rpcrdma_xprt *xprt;
659 dprintk("RPC: %s: reconnecting...\n", __func__);
661 rpcrdma_ep_disconnect(ep, ia);
662 rpcrdma_flush_cqs(ep);
664 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
665 id = rpcrdma_create_id(xprt, ia,
666 (struct sockaddr *)&xprt->rx_data.addr);
671 /* TEMP TEMP TEMP - fail if new device:
672 * Deregister/remarshal *all* requests!
673 * Close and recreate adapter, pd, etc!
674 * Re-determine all attributes still sane!
675 * More stuff I haven't thought of!
678 if (ia->ri_device != id->device) {
679 printk("RPC: %s: can't reconnect on "
680 "different device!\n", __func__);
681 rpcrdma_destroy_id(id);
686 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
688 dprintk("RPC: %s: rdma_create_qp failed %i\n",
690 rpcrdma_destroy_id(id);
695 write_lock(&ia->ri_qplock);
698 write_unlock(&ia->ri_qplock);
700 rdma_destroy_qp(old);
701 rpcrdma_destroy_id(old);
703 dprintk("RPC: %s: connecting...\n", __func__);
704 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
706 dprintk("RPC: %s: rdma_create_qp failed %i\n",
708 /* do not update ep->rep_connected */
713 ep->rep_connected = 0;
715 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
717 dprintk("RPC: %s: rdma_connect() failed with %i\n",
722 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
725 * Check state. A non-peer reject indicates no listener
726 * (ECONNREFUSED), which may be a transient state. All
727 * others indicate a transport condition which has already
728 * undergone a best-effort.
730 if (ep->rep_connected == -ECONNREFUSED &&
731 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
732 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
735 if (ep->rep_connected <= 0) {
736 /* Sometimes, the only way to reliably connect to remote
737 * CMs is to use same nonzero values for ORD and IRD. */
738 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
739 (ep->rep_remote_cma.responder_resources == 0 ||
740 ep->rep_remote_cma.initiator_depth !=
741 ep->rep_remote_cma.responder_resources)) {
742 if (ep->rep_remote_cma.responder_resources == 0)
743 ep->rep_remote_cma.responder_resources = 1;
744 ep->rep_remote_cma.initiator_depth =
745 ep->rep_remote_cma.responder_resources;
748 rc = ep->rep_connected;
750 struct rpcrdma_xprt *r_xprt;
753 dprintk("RPC: %s: connected\n", __func__);
755 r_xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
756 extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
759 rc = rpcrdma_ep_post_extra_recv(r_xprt, extras);
761 pr_warn("%s: rpcrdma_ep_post_extra_recv: %i\n",
770 ep->rep_connected = rc;
775 * rpcrdma_ep_disconnect
777 * This is separate from destroy to facilitate the ability
778 * to reconnect without recreating the endpoint.
780 * This call is not reentrant, and must not be made in parallel
781 * on the same endpoint.
784 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
788 rpcrdma_flush_cqs(ep);
789 rc = rdma_disconnect(ia->ri_id);
791 /* returns without wait if not connected */
792 wait_event_interruptible(ep->rep_connect_wait,
793 ep->rep_connected != 1);
794 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
795 (ep->rep_connected == 1) ? "still " : "dis");
797 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
798 ep->rep_connected = rc;
803 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
805 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
806 struct rpcrdma_req *req;
808 req = kzalloc(sizeof(*req), GFP_KERNEL);
810 return ERR_PTR(-ENOMEM);
812 INIT_LIST_HEAD(&req->rl_free);
813 spin_lock(&buffer->rb_reqslock);
814 list_add(&req->rl_all, &buffer->rb_allreqs);
815 spin_unlock(&buffer->rb_reqslock);
816 req->rl_cqe.done = rpcrdma_wc_send;
817 req->rl_buffer = &r_xprt->rx_buf;
822 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
824 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
825 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
826 struct rpcrdma_rep *rep;
830 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
834 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
836 if (IS_ERR(rep->rr_rdmabuf)) {
837 rc = PTR_ERR(rep->rr_rdmabuf);
841 rep->rr_device = ia->ri_device;
842 rep->rr_cqe.done = rpcrdma_receive_wc;
843 rep->rr_rxprt = r_xprt;
844 INIT_WORK(&rep->rr_work, rpcrdma_receive_worker);
854 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
856 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
857 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
860 buf->rb_max_requests = r_xprt->rx_data.max_requests;
861 buf->rb_bc_srv_max_requests = 0;
862 spin_lock_init(&buf->rb_lock);
863 atomic_set(&buf->rb_credits, 1);
865 rc = ia->ri_ops->ro_init(r_xprt);
869 INIT_LIST_HEAD(&buf->rb_send_bufs);
870 INIT_LIST_HEAD(&buf->rb_allreqs);
871 spin_lock_init(&buf->rb_reqslock);
872 for (i = 0; i < buf->rb_max_requests; i++) {
873 struct rpcrdma_req *req;
875 req = rpcrdma_create_req(r_xprt);
877 dprintk("RPC: %s: request buffer %d alloc"
878 " failed\n", __func__, i);
882 req->rl_backchannel = false;
883 list_add(&req->rl_free, &buf->rb_send_bufs);
886 INIT_LIST_HEAD(&buf->rb_recv_bufs);
887 for (i = 0; i < buf->rb_max_requests + 2; i++) {
888 struct rpcrdma_rep *rep;
890 rep = rpcrdma_create_rep(r_xprt);
892 dprintk("RPC: %s: reply buffer %d alloc failed\n",
897 list_add(&rep->rr_list, &buf->rb_recv_bufs);
902 rpcrdma_buffer_destroy(buf);
906 static struct rpcrdma_req *
907 rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
909 struct rpcrdma_req *req;
911 req = list_first_entry(&buf->rb_send_bufs,
912 struct rpcrdma_req, rl_free);
913 list_del(&req->rl_free);
917 static struct rpcrdma_rep *
918 rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
920 struct rpcrdma_rep *rep;
922 rep = list_first_entry(&buf->rb_recv_bufs,
923 struct rpcrdma_rep, rr_list);
924 list_del(&rep->rr_list);
929 rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
931 rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
936 rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
938 rpcrdma_free_regbuf(ia, req->rl_sendbuf);
939 rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
944 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
946 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
948 while (!list_empty(&buf->rb_recv_bufs)) {
949 struct rpcrdma_rep *rep;
951 rep = rpcrdma_buffer_get_rep_locked(buf);
952 rpcrdma_destroy_rep(ia, rep);
955 spin_lock(&buf->rb_reqslock);
956 while (!list_empty(&buf->rb_allreqs)) {
957 struct rpcrdma_req *req;
959 req = list_first_entry(&buf->rb_allreqs,
960 struct rpcrdma_req, rl_all);
961 list_del(&req->rl_all);
963 spin_unlock(&buf->rb_reqslock);
964 rpcrdma_destroy_req(ia, req);
965 spin_lock(&buf->rb_reqslock);
967 spin_unlock(&buf->rb_reqslock);
969 ia->ri_ops->ro_destroy(buf);
973 rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
975 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
976 struct rpcrdma_mw *mw = NULL;
978 spin_lock(&buf->rb_mwlock);
979 if (!list_empty(&buf->rb_mws)) {
980 mw = list_first_entry(&buf->rb_mws,
981 struct rpcrdma_mw, mw_list);
982 list_del_init(&mw->mw_list);
984 spin_unlock(&buf->rb_mwlock);
987 pr_err("RPC: %s: no MWs available\n", __func__);
992 rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
994 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
996 spin_lock(&buf->rb_mwlock);
997 list_add_tail(&mw->mw_list, &buf->rb_mws);
998 spin_unlock(&buf->rb_mwlock);
1002 * Get a set of request/reply buffers.
1004 * Reply buffer (if available) is attached to send buffer upon return.
1006 struct rpcrdma_req *
1007 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1009 struct rpcrdma_req *req;
1011 spin_lock(&buffers->rb_lock);
1012 if (list_empty(&buffers->rb_send_bufs))
1014 req = rpcrdma_buffer_get_req_locked(buffers);
1015 if (list_empty(&buffers->rb_recv_bufs))
1017 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1018 spin_unlock(&buffers->rb_lock);
1022 spin_unlock(&buffers->rb_lock);
1023 pr_warn("RPC: %s: out of request buffers\n", __func__);
1026 spin_unlock(&buffers->rb_lock);
1027 pr_warn("RPC: %s: out of reply buffers\n", __func__);
1028 req->rl_reply = NULL;
1033 * Put request/reply buffers back into pool.
1034 * Pre-decrement counter/array index.
1037 rpcrdma_buffer_put(struct rpcrdma_req *req)
1039 struct rpcrdma_buffer *buffers = req->rl_buffer;
1040 struct rpcrdma_rep *rep = req->rl_reply;
1043 req->rl_reply = NULL;
1045 spin_lock(&buffers->rb_lock);
1046 list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1048 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1049 spin_unlock(&buffers->rb_lock);
1053 * Recover reply buffers from pool.
1054 * This happens when recovering from disconnect.
1057 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1059 struct rpcrdma_buffer *buffers = req->rl_buffer;
1061 spin_lock(&buffers->rb_lock);
1062 if (!list_empty(&buffers->rb_recv_bufs))
1063 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1064 spin_unlock(&buffers->rb_lock);
1068 * Put reply buffers back into pool when not attached to
1069 * request. This happens in error conditions.
1072 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1074 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1076 spin_lock(&buffers->rb_lock);
1077 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1078 spin_unlock(&buffers->rb_lock);
1082 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1086 rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1088 dprintk("RPC: map_one: offset %p iova %llx len %zu\n",
1090 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1094 * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1095 * @ia: controlling rpcrdma_ia
1096 * @size: size of buffer to be allocated, in bytes
1099 * Returns pointer to private header of an area of internally
1100 * registered memory, or an ERR_PTR. The registered buffer follows
1101 * the end of the private header.
1103 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1104 * receiving the payload of RDMA RECV operations. regbufs are not
1105 * used for RDMA READ/WRITE operations, thus are registered only for
1108 struct rpcrdma_regbuf *
1109 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1111 struct rpcrdma_regbuf *rb;
1114 rb = kmalloc(sizeof(*rb) + size, flags);
1119 iov->addr = ib_dma_map_single(ia->ri_device,
1120 (void *)rb->rg_base, size,
1122 if (ib_dma_mapping_error(ia->ri_device, iov->addr))
1126 iov->lkey = ia->ri_pd->local_dma_lkey;
1128 rb->rg_owner = NULL;
1134 return ERR_PTR(-ENOMEM);
1138 * rpcrdma_free_regbuf - deregister and free registered buffer
1139 * @ia: controlling rpcrdma_ia
1140 * @rb: regbuf to be deregistered and freed
1143 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1151 ib_dma_unmap_single(ia->ri_device,
1152 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1157 * Prepost any receive buffer, then post send.
1159 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1162 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1163 struct rpcrdma_ep *ep,
1164 struct rpcrdma_req *req)
1166 struct ib_device *device = ia->ri_device;
1167 struct ib_send_wr send_wr, *send_wr_fail;
1168 struct rpcrdma_rep *rep = req->rl_reply;
1169 struct ib_sge *iov = req->rl_send_iov;
1173 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1176 req->rl_reply = NULL;
1179 send_wr.next = NULL;
1180 send_wr.wr_cqe = &req->rl_cqe;
1181 send_wr.sg_list = iov;
1182 send_wr.num_sge = req->rl_niovs;
1183 send_wr.opcode = IB_WR_SEND;
1185 for (i = 0; i < send_wr.num_sge; i++)
1186 ib_dma_sync_single_for_device(device, iov[i].addr,
1187 iov[i].length, DMA_TO_DEVICE);
1188 dprintk("RPC: %s: posting %d s/g entries\n",
1189 __func__, send_wr.num_sge);
1191 if (DECR_CQCOUNT(ep) > 0)
1192 send_wr.send_flags = 0;
1193 else { /* Provider must take a send completion every now and then */
1195 send_wr.send_flags = IB_SEND_SIGNALED;
1198 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1200 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1207 * (Re)post a receive buffer.
1210 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1211 struct rpcrdma_ep *ep,
1212 struct rpcrdma_rep *rep)
1214 struct ib_recv_wr recv_wr, *recv_wr_fail;
1217 recv_wr.next = NULL;
1218 recv_wr.wr_cqe = &rep->rr_cqe;
1219 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1220 recv_wr.num_sge = 1;
1222 ib_dma_sync_single_for_cpu(ia->ri_device,
1223 rdmab_addr(rep->rr_rdmabuf),
1224 rdmab_length(rep->rr_rdmabuf),
1227 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1230 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
1236 * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
1237 * @r_xprt: transport associated with these backchannel resources
1238 * @min_reqs: minimum number of incoming requests expected
1240 * Returns zero if all requested buffers were posted, or a negative errno.
1243 rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
1245 struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
1246 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1247 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
1248 struct rpcrdma_rep *rep;
1252 spin_lock(&buffers->rb_lock);
1253 if (list_empty(&buffers->rb_recv_bufs))
1255 rep = rpcrdma_buffer_get_rep_locked(buffers);
1256 spin_unlock(&buffers->rb_lock);
1258 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1266 spin_unlock(&buffers->rb_lock);
1267 pr_warn("%s: no extra receive buffers\n", __func__);
1271 rpcrdma_recv_buffer_put(rep);
1275 /* How many chunk list items fit within our inline buffers?
1278 rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
1280 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1281 int bytes, segments;
1283 bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1284 bytes -= RPCRDMA_HDRLEN_MIN;
1285 if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1286 pr_warn("RPC: %s: inline threshold too small\n",
1291 segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1292 dprintk("RPC: %s: max chunk list size = %d segments\n",
1293 __func__, segments);