2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/slab.h>
52 #include <linux/seq_file.h>
53 #include <linux/sunrpc/addr.h>
55 #include "xprt_rdma.h"
57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
58 # define RPCDBG_FACILITY RPCDBG_TRANS
65 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
66 unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
67 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
68 static unsigned int xprt_rdma_inline_write_padding;
69 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
70 int xprt_rdma_pad_optimize = 1;
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
74 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
75 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
76 static unsigned int zero;
77 static unsigned int max_padding = PAGE_SIZE;
78 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
79 static unsigned int max_memreg = RPCRDMA_LAST - 1;
81 static struct ctl_table_header *sunrpc_table_header;
83 static struct ctl_table xr_tunables_table[] = {
85 .procname = "rdma_slot_table_entries",
86 .data = &xprt_rdma_slot_table_entries,
87 .maxlen = sizeof(unsigned int),
89 .proc_handler = proc_dointvec_minmax,
90 .extra1 = &min_slot_table_size,
91 .extra2 = &max_slot_table_size
94 .procname = "rdma_max_inline_read",
95 .data = &xprt_rdma_max_inline_read,
96 .maxlen = sizeof(unsigned int),
98 .proc_handler = proc_dointvec,
101 .procname = "rdma_max_inline_write",
102 .data = &xprt_rdma_max_inline_write,
103 .maxlen = sizeof(unsigned int),
105 .proc_handler = proc_dointvec,
108 .procname = "rdma_inline_write_padding",
109 .data = &xprt_rdma_inline_write_padding,
110 .maxlen = sizeof(unsigned int),
112 .proc_handler = proc_dointvec_minmax,
114 .extra2 = &max_padding,
117 .procname = "rdma_memreg_strategy",
118 .data = &xprt_rdma_memreg_strategy,
119 .maxlen = sizeof(unsigned int),
121 .proc_handler = proc_dointvec_minmax,
122 .extra1 = &min_memreg,
123 .extra2 = &max_memreg,
126 .procname = "rdma_pad_optimize",
127 .data = &xprt_rdma_pad_optimize,
128 .maxlen = sizeof(unsigned int),
130 .proc_handler = proc_dointvec,
135 static struct ctl_table sunrpc_table[] = {
137 .procname = "sunrpc",
139 .child = xr_tunables_table
146 static struct rpc_xprt_ops xprt_rdma_procs; /*forward reference */
149 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
151 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
154 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
155 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
157 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
161 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
163 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
166 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
167 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
169 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
173 xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap)
177 switch (sap->sa_family) {
179 xprt_rdma_format_addresses4(xprt, sap);
182 xprt_rdma_format_addresses6(xprt, sap);
185 pr_err("rpcrdma: Unrecognized address family\n");
189 (void)rpc_ntop(sap, buf, sizeof(buf));
190 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
192 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
193 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
195 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
196 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
198 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
202 xprt_rdma_free_addresses(struct rpc_xprt *xprt)
206 for (i = 0; i < RPC_DISPLAY_MAX; i++)
208 case RPC_DISPLAY_PROTO:
209 case RPC_DISPLAY_NETID:
212 kfree(xprt->address_strings[i]);
217 xprt_rdma_connect_worker(struct work_struct *work)
219 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
220 rx_connect_worker.work);
221 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
224 xprt_clear_connected(xprt);
226 dprintk("RPC: %s: %sconnect\n", __func__,
227 r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
228 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
230 xprt_wake_pending_tasks(xprt, rc);
232 dprintk("RPC: %s: exit\n", __func__);
233 xprt_clear_connecting(xprt);
237 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
239 struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
242 pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt);
243 rdma_disconnect(r_xprt->rx_ia.ri_id);
250 * Free all memory associated with the object, including its own.
251 * NOTE: none of the *destroy methods free memory for their top-level
252 * objects, even though they may have allocated it (they do free
253 * private memory). It's up to the caller to handle it. In this
254 * case (RDMA transport), all structure memory is inlined with the
255 * struct rpcrdma_xprt.
258 xprt_rdma_destroy(struct rpc_xprt *xprt)
260 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
262 dprintk("RPC: %s: called\n", __func__);
264 cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
266 xprt_clear_connected(xprt);
268 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
269 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
270 rpcrdma_ia_close(&r_xprt->rx_ia);
272 xprt_rdma_free_addresses(xprt);
276 dprintk("RPC: %s: returning\n", __func__);
278 module_put(THIS_MODULE);
281 static const struct rpc_timeout xprt_rdma_default_timeout = {
282 .to_initval = 60 * HZ,
283 .to_maxval = 60 * HZ,
287 * xprt_setup_rdma - Set up transport to use RDMA
289 * @args: rpc transport arguments
291 static struct rpc_xprt *
292 xprt_setup_rdma(struct xprt_create *args)
294 struct rpcrdma_create_data_internal cdata;
295 struct rpc_xprt *xprt;
296 struct rpcrdma_xprt *new_xprt;
297 struct rpcrdma_ep *new_ep;
298 struct sockaddr *sap;
301 if (args->addrlen > sizeof(xprt->addr)) {
302 dprintk("RPC: %s: address too large\n", __func__);
303 return ERR_PTR(-EBADF);
306 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
307 xprt_rdma_slot_table_entries,
308 xprt_rdma_slot_table_entries);
310 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
312 return ERR_PTR(-ENOMEM);
315 /* 60 second timeout, no retries */
316 xprt->timeout = &xprt_rdma_default_timeout;
317 xprt->bind_timeout = RPCRDMA_BIND_TO;
318 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
319 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
321 xprt->resvport = 0; /* privileged port not needed */
322 xprt->tsh_size = 0; /* RPC-RDMA handles framing */
323 xprt->ops = &xprt_rdma_procs;
326 * Set up RDMA-specific connect data.
329 sap = (struct sockaddr *)&cdata.addr;
330 memcpy(sap, args->dstaddr, args->addrlen);
332 /* Ensure xprt->addr holds valid server TCP (not RDMA)
333 * address, for any side protocols which peek at it */
334 xprt->prot = IPPROTO_TCP;
335 xprt->addrlen = args->addrlen;
336 memcpy(&xprt->addr, sap, xprt->addrlen);
338 if (rpc_get_port(sap))
339 xprt_set_bound(xprt);
341 cdata.max_requests = xprt->max_reqs;
343 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
344 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
346 cdata.inline_wsize = xprt_rdma_max_inline_write;
347 if (cdata.inline_wsize > cdata.wsize)
348 cdata.inline_wsize = cdata.wsize;
350 cdata.inline_rsize = xprt_rdma_max_inline_read;
351 if (cdata.inline_rsize > cdata.rsize)
352 cdata.inline_rsize = cdata.rsize;
354 cdata.padding = xprt_rdma_inline_write_padding;
357 * Create new transport instance, which includes initialized
363 new_xprt = rpcx_to_rdmax(xprt);
365 rc = rpcrdma_ia_open(new_xprt, sap, xprt_rdma_memreg_strategy);
370 * initialize and create ep
372 new_xprt->rx_data = cdata;
373 new_ep = &new_xprt->rx_ep;
374 new_ep->rep_remote_addr = cdata.addr;
376 rc = rpcrdma_ep_create(&new_xprt->rx_ep,
377 &new_xprt->rx_ia, &new_xprt->rx_data);
382 * Allocate pre-registered send and receive buffers for headers and
383 * any inline data. Also specify any padding which will be provided
384 * from a preregistered zero buffer.
386 rc = rpcrdma_buffer_create(new_xprt);
391 * Register a callback for connection events. This is necessary because
392 * connection loss notification is async. We also catch connection loss
393 * when reaping receives.
395 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
396 xprt_rdma_connect_worker);
398 xprt_rdma_format_addresses(xprt, sap);
399 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
400 if (xprt->max_payload == 0)
402 xprt->max_payload <<= PAGE_SHIFT;
403 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
404 __func__, xprt->max_payload);
406 if (!try_module_get(THIS_MODULE))
409 dprintk("RPC: %s: %s:%s\n", __func__,
410 xprt->address_strings[RPC_DISPLAY_ADDR],
411 xprt->address_strings[RPC_DISPLAY_PORT]);
415 xprt_rdma_free_addresses(xprt);
418 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
420 rpcrdma_ia_close(&new_xprt->rx_ia);
427 * Close a connection, during shutdown or timeout/reconnect
430 xprt_rdma_close(struct rpc_xprt *xprt)
432 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
434 dprintk("RPC: %s: closing\n", __func__);
435 if (r_xprt->rx_ep.rep_connected > 0)
436 xprt->reestablish_timeout = 0;
437 xprt_disconnect_done(xprt);
438 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
442 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
444 struct sockaddr_in *sap;
446 sap = (struct sockaddr_in *)&xprt->addr;
447 sap->sin_port = htons(port);
448 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
449 sap->sin_port = htons(port);
450 dprintk("RPC: %s: %u\n", __func__, port);
454 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
456 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
458 if (r_xprt->rx_ep.rep_connected != 0) {
460 schedule_delayed_work(&r_xprt->rx_connect_worker,
461 xprt->reestablish_timeout);
462 xprt->reestablish_timeout <<= 1;
463 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
464 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
465 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
466 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
468 schedule_delayed_work(&r_xprt->rx_connect_worker, 0);
469 if (!RPC_IS_ASYNC(task))
470 flush_delayed_work(&r_xprt->rx_connect_worker);
475 * The RDMA allocate/free functions need the task structure as a place
476 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
479 * The RPC layer allocates both send and receive buffers in the same call
480 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
481 * We may register rq_rcv_buf when using reply chunks.
484 xprt_rdma_allocate(struct rpc_task *task, size_t size)
486 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
487 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
488 struct rpcrdma_regbuf *rb;
489 struct rpcrdma_req *req;
493 req = rpcrdma_buffer_get(&r_xprt->rx_buf);
497 flags = RPCRDMA_DEF_GFP;
498 if (RPC_IS_SWAPPER(task))
499 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
501 if (req->rl_rdmabuf == NULL)
503 if (req->rl_sendbuf == NULL)
505 if (size > req->rl_sendbuf->rg_size)
509 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
510 req->rl_connect_cookie = 0; /* our reserved value */
511 return req->rl_sendbuf->rg_base;
514 min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
515 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
518 req->rl_rdmabuf = rb;
521 /* XDR encoding and RPC/RDMA marshaling of this request has not
522 * yet occurred. Thus a lower bound is needed to prevent buffer
523 * overrun during marshaling.
525 * RPC/RDMA marshaling may choose to send payload bearing ops
526 * inline, if the result is smaller than the inline threshold.
527 * The value of the "size" argument accounts for header
528 * requirements but not for the payload in these cases.
530 * Likewise, allocate enough space to receive a reply up to the
531 * size of the inline threshold.
533 * It's unlikely that both the send header and the received
534 * reply will be large, but slush is provided here to allow
535 * flexibility when marshaling.
537 min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
538 min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
542 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
547 r_xprt->rx_stats.hardway_register_count += size;
548 rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
549 req->rl_sendbuf = rb;
553 rpcrdma_buffer_put(req);
554 r_xprt->rx_stats.failed_marshal_count++;
559 * This function returns all RDMA resources to the pool.
562 xprt_rdma_free(void *buffer)
564 struct rpcrdma_req *req;
565 struct rpcrdma_xprt *r_xprt;
566 struct rpcrdma_regbuf *rb;
572 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
574 if (req->rl_backchannel)
577 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
579 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
581 for (i = 0; req->rl_nchunks;) {
583 i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
584 &req->rl_segments[i]);
587 rpcrdma_buffer_put(req);
591 * send_request invokes the meat of RPC RDMA. It must do the following:
592 * 1. Marshal the RPC request into an RPC RDMA request, which means
593 * putting a header in front of data, and creating IOVs for RDMA
594 * from those in the request.
595 * 2. In marshaling, detect opportunities for RDMA, and use them.
596 * 3. Post a recv message to set up asynch completion, then send
597 * the request (rpcrdma_ep_post).
598 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
602 xprt_rdma_send_request(struct rpc_task *task)
604 struct rpc_rqst *rqst = task->tk_rqstp;
605 struct rpc_xprt *xprt = rqst->rq_xprt;
606 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
607 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
610 rc = rpcrdma_marshal_req(rqst);
614 if (req->rl_reply == NULL) /* e.g. reconnection */
615 rpcrdma_recv_buffer_get(req);
617 /* Must suppress retransmit to maintain credits */
618 if (req->rl_connect_cookie == xprt->connect_cookie)
619 goto drop_connection;
620 req->rl_connect_cookie = xprt->connect_cookie;
622 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
623 goto drop_connection;
625 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
626 rqst->rq_bytes_sent = 0;
630 r_xprt->rx_stats.failed_marshal_count++;
631 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n",
636 xprt_disconnect_done(xprt);
637 return -ENOTCONN; /* implies disconnect */
640 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
642 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
645 if (xprt_connected(xprt))
646 idle_time = (long)(jiffies - xprt->last_used) / HZ;
648 seq_puts(seq, "\txprt:\trdma ");
649 seq_printf(seq, "%u %lu %lu %lu %ld %lu %lu %lu %llu %llu ",
650 0, /* need a local port? */
651 xprt->stat.bind_count,
652 xprt->stat.connect_count,
653 xprt->stat.connect_time,
660 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %lu %lu %lu %lu\n",
661 r_xprt->rx_stats.read_chunk_count,
662 r_xprt->rx_stats.write_chunk_count,
663 r_xprt->rx_stats.reply_chunk_count,
664 r_xprt->rx_stats.total_rdma_request,
665 r_xprt->rx_stats.total_rdma_reply,
666 r_xprt->rx_stats.pullup_copy_count,
667 r_xprt->rx_stats.fixup_copy_count,
668 r_xprt->rx_stats.hardway_register_count,
669 r_xprt->rx_stats.failed_marshal_count,
670 r_xprt->rx_stats.bad_reply_count,
671 r_xprt->rx_stats.nomsg_call_count);
675 xprt_rdma_enable_swap(struct rpc_xprt *xprt)
681 xprt_rdma_disable_swap(struct rpc_xprt *xprt)
686 * Plumbing for rpc transport switch and kernel module
689 static struct rpc_xprt_ops xprt_rdma_procs = {
690 .reserve_xprt = xprt_reserve_xprt_cong,
691 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
692 .alloc_slot = xprt_alloc_slot,
693 .release_request = xprt_release_rqst_cong, /* ditto */
694 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
695 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
696 .set_port = xprt_rdma_set_port,
697 .connect = xprt_rdma_connect,
698 .buf_alloc = xprt_rdma_allocate,
699 .buf_free = xprt_rdma_free,
700 .send_request = xprt_rdma_send_request,
701 .close = xprt_rdma_close,
702 .destroy = xprt_rdma_destroy,
703 .print_stats = xprt_rdma_print_stats,
704 .enable_swap = xprt_rdma_enable_swap,
705 .disable_swap = xprt_rdma_disable_swap,
706 .inject_disconnect = xprt_rdma_inject_disconnect,
707 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
708 .bc_setup = xprt_rdma_bc_setup,
709 .bc_up = xprt_rdma_bc_up,
710 .bc_free_rqst = xprt_rdma_bc_free_rqst,
711 .bc_destroy = xprt_rdma_bc_destroy,
715 static struct xprt_class xprt_rdma = {
716 .list = LIST_HEAD_INIT(xprt_rdma.list),
718 .owner = THIS_MODULE,
719 .ident = XPRT_TRANSPORT_RDMA,
720 .setup = xprt_setup_rdma,
723 void xprt_rdma_cleanup(void)
727 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
728 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
729 if (sunrpc_table_header) {
730 unregister_sysctl_table(sunrpc_table_header);
731 sunrpc_table_header = NULL;
734 rc = xprt_unregister_transport(&xprt_rdma);
736 dprintk("RPC: %s: xprt_unregister returned %i\n",
739 rpcrdma_destroy_wq();
740 frwr_destroy_recovery_wq();
742 rc = xprt_unregister_transport(&xprt_rdma_bc);
744 dprintk("RPC: %s: xprt_unregister(bc) returned %i\n",
748 int xprt_rdma_init(void)
752 rc = frwr_alloc_recovery_wq();
756 rc = rpcrdma_alloc_wq();
758 frwr_destroy_recovery_wq();
762 rc = xprt_register_transport(&xprt_rdma);
764 rpcrdma_destroy_wq();
765 frwr_destroy_recovery_wq();
769 rc = xprt_register_transport(&xprt_rdma_bc);
771 xprt_unregister_transport(&xprt_rdma);
772 rpcrdma_destroy_wq();
773 frwr_destroy_recovery_wq();
777 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
779 dprintk("Defaults:\n");
780 dprintk("\tSlots %d\n"
781 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
782 xprt_rdma_slot_table_entries,
783 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
784 dprintk("\tPadding %d\n\tMemreg %d\n",
785 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
787 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
788 if (!sunrpc_table_header)
789 sunrpc_table_header = register_sysctl_table(sunrpc_table);