2 * linux/net/sunrpc/xprtsock.c
4 * Client-side transport implementation for sockets.
6 * TCP callback races fixes (C) 1998 Red Hat
7 * TCP send fixes (C) 1998 Red Hat
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
18 * <gilles.quillard@bull.net>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/capability.h>
26 #include <linux/pagemap.h>
27 #include <linux/errno.h>
28 #include <linux/socket.h>
30 #include <linux/net.h>
33 #include <linux/udp.h>
34 #include <linux/tcp.h>
35 #include <linux/sunrpc/clnt.h>
36 #include <linux/sunrpc/addr.h>
37 #include <linux/sunrpc/sched.h>
38 #include <linux/sunrpc/svcsock.h>
39 #include <linux/sunrpc/xprtsock.h>
40 #include <linux/file.h>
41 #ifdef CONFIG_SUNRPC_BACKCHANNEL
42 #include <linux/sunrpc/bc_xprt.h>
46 #include <net/checksum.h>
50 #include <trace/events/sunrpc.h>
54 static void xs_close(struct rpc_xprt *xprt);
59 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
60 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
61 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
63 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
64 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
66 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
68 #define XS_TCP_LINGER_TO (15U * HZ)
69 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
72 * We can register our own files under /proc/sys/sunrpc by
73 * calling register_sysctl_table() again. The files in that
74 * directory become the union of all files registered there.
76 * We simply need to make sure that we don't collide with
77 * someone else's file names!
80 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
82 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
83 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
84 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
86 static struct ctl_table_header *sunrpc_table_header;
89 * FIXME: changing the UDP slot table size should also resize the UDP
90 * socket buffers for existing UDP transports
92 static struct ctl_table xs_tunables_table[] = {
94 .procname = "udp_slot_table_entries",
95 .data = &xprt_udp_slot_table_entries,
96 .maxlen = sizeof(unsigned int),
98 .proc_handler = proc_dointvec_minmax,
99 .extra1 = &min_slot_table_size,
100 .extra2 = &max_slot_table_size
103 .procname = "tcp_slot_table_entries",
104 .data = &xprt_tcp_slot_table_entries,
105 .maxlen = sizeof(unsigned int),
107 .proc_handler = proc_dointvec_minmax,
108 .extra1 = &min_slot_table_size,
109 .extra2 = &max_slot_table_size
112 .procname = "tcp_max_slot_table_entries",
113 .data = &xprt_max_tcp_slot_table_entries,
114 .maxlen = sizeof(unsigned int),
116 .proc_handler = proc_dointvec_minmax,
117 .extra1 = &min_slot_table_size,
118 .extra2 = &max_tcp_slot_table_limit
121 .procname = "min_resvport",
122 .data = &xprt_min_resvport,
123 .maxlen = sizeof(unsigned int),
125 .proc_handler = proc_dointvec_minmax,
126 .extra1 = &xprt_min_resvport_limit,
127 .extra2 = &xprt_max_resvport_limit
130 .procname = "max_resvport",
131 .data = &xprt_max_resvport,
132 .maxlen = sizeof(unsigned int),
134 .proc_handler = proc_dointvec_minmax,
135 .extra1 = &xprt_min_resvport_limit,
136 .extra2 = &xprt_max_resvport_limit
139 .procname = "tcp_fin_timeout",
140 .data = &xs_tcp_fin_timeout,
141 .maxlen = sizeof(xs_tcp_fin_timeout),
143 .proc_handler = proc_dointvec_jiffies,
148 static struct ctl_table sunrpc_table[] = {
150 .procname = "sunrpc",
152 .child = xs_tunables_table
160 * Wait duration for a reply from the RPC portmapper.
162 #define XS_BIND_TO (60U * HZ)
165 * Delay if a UDP socket connect error occurs. This is most likely some
166 * kind of resource problem on the local host.
168 #define XS_UDP_REEST_TO (2U * HZ)
171 * The reestablish timeout allows clients to delay for a bit before attempting
172 * to reconnect to a server that just dropped our connection.
174 * We implement an exponential backoff when trying to reestablish a TCP
175 * transport connection with the server. Some servers like to drop a TCP
176 * connection when they are overworked, so we start with a short timeout and
177 * increase over time if the server is down or not responding.
179 #define XS_TCP_INIT_REEST_TO (3U * HZ)
180 #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
183 * TCP idle timeout; client drops the transport socket if it is idle
184 * for this long. Note that we also timeout UDP sockets to prevent
185 * holding port numbers when there is no RPC traffic.
187 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
189 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
190 # undef RPC_DEBUG_DATA
191 # define RPCDBG_FACILITY RPCDBG_TRANS
194 #ifdef RPC_DEBUG_DATA
195 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
197 u8 *buf = (u8 *) packet;
200 dprintk("RPC: %s\n", msg);
201 for (j = 0; j < count && j < 128; j += 4) {
205 dprintk("0x%04x ", j);
207 dprintk("%02x%02x%02x%02x ",
208 buf[j], buf[j+1], buf[j+2], buf[j+3]);
213 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
219 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
221 return (struct rpc_xprt *) sk->sk_user_data;
224 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
226 return (struct sockaddr *) &xprt->addr;
229 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
231 return (struct sockaddr_un *) &xprt->addr;
234 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
236 return (struct sockaddr_in *) &xprt->addr;
239 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
241 return (struct sockaddr_in6 *) &xprt->addr;
244 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
246 struct sockaddr *sap = xs_addr(xprt);
247 struct sockaddr_in6 *sin6;
248 struct sockaddr_in *sin;
249 struct sockaddr_un *sun;
252 switch (sap->sa_family) {
254 sun = xs_addr_un(xprt);
255 strlcpy(buf, sun->sun_path, sizeof(buf));
256 xprt->address_strings[RPC_DISPLAY_ADDR] =
257 kstrdup(buf, GFP_KERNEL);
260 (void)rpc_ntop(sap, buf, sizeof(buf));
261 xprt->address_strings[RPC_DISPLAY_ADDR] =
262 kstrdup(buf, GFP_KERNEL);
263 sin = xs_addr_in(xprt);
264 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
267 (void)rpc_ntop(sap, buf, sizeof(buf));
268 xprt->address_strings[RPC_DISPLAY_ADDR] =
269 kstrdup(buf, GFP_KERNEL);
270 sin6 = xs_addr_in6(xprt);
271 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
277 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
280 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
282 struct sockaddr *sap = xs_addr(xprt);
285 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
286 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
288 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
289 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
292 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
293 const char *protocol,
296 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
297 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
298 xs_format_common_peer_addresses(xprt);
299 xs_format_common_peer_ports(xprt);
302 static void xs_update_peer_port(struct rpc_xprt *xprt)
304 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
305 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
307 xs_format_common_peer_ports(xprt);
310 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
314 for (i = 0; i < RPC_DISPLAY_MAX; i++)
316 case RPC_DISPLAY_PROTO:
317 case RPC_DISPLAY_NETID:
320 kfree(xprt->address_strings[i]);
324 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
326 static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
328 struct msghdr msg = {
330 .msg_namelen = addrlen,
331 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
334 .iov_base = vec->iov_base + base,
335 .iov_len = vec->iov_len - base,
338 if (iov.iov_len != 0)
339 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
340 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
343 static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
345 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
346 int offset, size_t size, int flags);
348 unsigned int remainder;
351 remainder = xdr->page_len - base;
352 base += xdr->page_base;
353 ppage = xdr->pages + (base >> PAGE_SHIFT);
355 do_sendpage = sock->ops->sendpage;
357 do_sendpage = sock_no_sendpage;
359 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
360 int flags = XS_SENDMSG_FLAGS;
366 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
367 err = do_sendpage(sock, *ppage, base, len, flags);
368 if (remainder == 0 || err != len)
382 * xs_sendpages - write pages directly to a socket
383 * @sock: socket to send on
384 * @addr: UDP only -- address of destination
385 * @addrlen: UDP only -- length of destination address
386 * @xdr: buffer containing this request
387 * @base: starting position in the buffer
388 * @zerocopy: true if it is safe to use sendpage()
389 * @sent_p: return the total number of bytes successfully queued for sending
392 static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
394 unsigned int remainder = xdr->len - base;
401 clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
407 if (base < xdr->head[0].iov_len || addr != NULL) {
408 unsigned int len = xdr->head[0].iov_len - base;
410 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
411 if (remainder == 0 || err != len)
416 base -= xdr->head[0].iov_len;
418 if (base < xdr->page_len) {
419 unsigned int len = xdr->page_len - base;
421 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
423 if (remainder == 0 || sent != len)
427 base -= xdr->page_len;
429 if (base >= xdr->tail[0].iov_len)
431 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
440 static void xs_nospace_callback(struct rpc_task *task)
442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
444 transport->inet->sk_write_pending--;
445 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
449 * xs_nospace - place task on wait queue if transmit was incomplete
450 * @task: task to put to sleep
453 static int xs_nospace(struct rpc_task *task)
455 struct rpc_rqst *req = task->tk_rqstp;
456 struct rpc_xprt *xprt = req->rq_xprt;
457 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
458 struct sock *sk = transport->inet;
461 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
462 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
465 /* Protect against races with write_space */
466 spin_lock_bh(&xprt->transport_lock);
468 /* Don't race with disconnect */
469 if (xprt_connected(xprt)) {
470 if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
472 * Notify TCP that we're limited by the application
475 set_bit(SOCK_NOSPACE, &transport->sock->flags);
476 sk->sk_write_pending++;
477 /* ...and wait for more buffer space */
478 xprt_wait_for_buffer_space(task, xs_nospace_callback);
481 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
485 spin_unlock_bh(&xprt->transport_lock);
487 /* Race breaker in case memory is freed before above code is called */
488 sk->sk_write_space(sk);
493 * Construct a stream transport record marker in @buf.
495 static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
497 u32 reclen = buf->len - sizeof(rpc_fraghdr);
498 rpc_fraghdr *base = buf->head[0].iov_base;
499 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
503 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
504 * @task: RPC task that manages the state of an RPC request
507 * 0: The request has been sent
508 * EAGAIN: The socket was blocked, please call again later to
509 * complete the request
510 * ENOTCONN: Caller needs to invoke connect logic then call again
511 * other: Some other error occured, the request was not sent
513 static int xs_local_send_request(struct rpc_task *task)
515 struct rpc_rqst *req = task->tk_rqstp;
516 struct rpc_xprt *xprt = req->rq_xprt;
517 struct sock_xprt *transport =
518 container_of(xprt, struct sock_xprt, xprt);
519 struct xdr_buf *xdr = &req->rq_snd_buf;
523 xs_encode_stream_record_marker(&req->rq_snd_buf);
525 xs_pktdump("packet data:",
526 req->rq_svec->iov_base, req->rq_svec->iov_len);
528 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
530 dprintk("RPC: %s(%u) = %d\n",
531 __func__, xdr->len - req->rq_bytes_sent, status);
533 if (status == -EAGAIN && sock_writeable(transport->inet))
536 if (likely(sent > 0) || status == 0) {
537 req->rq_bytes_sent += sent;
538 req->rq_xmit_bytes_sent += sent;
539 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
540 req->rq_bytes_sent = 0;
550 status = xs_nospace(task);
553 dprintk("RPC: sendmsg returned unrecognized error %d\n",
564 * xs_udp_send_request - write an RPC request to a UDP socket
565 * @task: address of RPC task that manages the state of an RPC request
568 * 0: The request has been sent
569 * EAGAIN: The socket was blocked, please call again later to
570 * complete the request
571 * ENOTCONN: Caller needs to invoke connect logic then call again
572 * other: Some other error occurred, the request was not sent
574 static int xs_udp_send_request(struct rpc_task *task)
576 struct rpc_rqst *req = task->tk_rqstp;
577 struct rpc_xprt *xprt = req->rq_xprt;
578 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
579 struct xdr_buf *xdr = &req->rq_snd_buf;
583 xs_pktdump("packet data:",
584 req->rq_svec->iov_base,
585 req->rq_svec->iov_len);
587 if (!xprt_bound(xprt))
589 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
590 xdr, req->rq_bytes_sent, true, &sent);
592 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
593 xdr->len - req->rq_bytes_sent, status);
595 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
596 if (status == -EPERM)
599 if (status == -EAGAIN && sock_writeable(transport->inet))
602 if (sent > 0 || status == 0) {
603 req->rq_xmit_bytes_sent += sent;
604 if (sent >= req->rq_slen)
606 /* Still some bytes left; set up for a retry later. */
614 /* Should we call xs_close() here? */
617 status = xs_nospace(task);
620 dprintk("RPC: sendmsg returned unrecognized error %d\n",
627 /* When the server has died, an ICMP port unreachable message
628 * prompts ECONNREFUSED. */
629 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
636 * xs_tcp_send_request - write an RPC request to a TCP socket
637 * @task: address of RPC task that manages the state of an RPC request
640 * 0: The request has been sent
641 * EAGAIN: The socket was blocked, please call again later to
642 * complete the request
643 * ENOTCONN: Caller needs to invoke connect logic then call again
644 * other: Some other error occurred, the request was not sent
646 * XXX: In the case of soft timeouts, should we eventually give up
647 * if sendmsg is not able to make progress?
649 static int xs_tcp_send_request(struct rpc_task *task)
651 struct rpc_rqst *req = task->tk_rqstp;
652 struct rpc_xprt *xprt = req->rq_xprt;
653 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
654 struct xdr_buf *xdr = &req->rq_snd_buf;
655 bool zerocopy = true;
659 xs_encode_stream_record_marker(&req->rq_snd_buf);
661 xs_pktdump("packet data:",
662 req->rq_svec->iov_base,
663 req->rq_svec->iov_len);
664 /* Don't use zero copy if this is a resend. If the RPC call
665 * completes while the socket holds a reference to the pages,
666 * then we may end up resending corrupted data.
668 if (task->tk_flags & RPC_TASK_SENT)
671 /* Continue transmitting the packet/record. We must be careful
672 * to cope with writespace callbacks arriving _after_ we have
673 * called sendmsg(). */
676 status = xs_sendpages(transport->sock, NULL, 0, xdr,
677 req->rq_bytes_sent, zerocopy, &sent);
679 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
680 xdr->len - req->rq_bytes_sent, status);
682 /* If we've sent the entire packet, immediately
683 * reset the count of bytes sent. */
684 req->rq_bytes_sent += sent;
685 req->rq_xmit_bytes_sent += sent;
686 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
687 req->rq_bytes_sent = 0;
698 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
704 /* Should we call xs_close() here? */
707 status = xs_nospace(task);
710 dprintk("RPC: sendmsg returned unrecognized error %d\n",
718 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
725 * xs_tcp_release_xprt - clean up after a tcp transmission
729 * This cleans up if an error causes us to abort the transmission of a request.
730 * In this case, the socket may need to be reset in order to avoid confusing
733 static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
735 struct rpc_rqst *req;
737 if (task != xprt->snd_task)
741 req = task->tk_rqstp;
744 if (req->rq_bytes_sent == 0)
746 if (req->rq_bytes_sent == req->rq_snd_buf.len)
748 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
750 xprt_release_xprt(xprt, task);
753 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
755 transport->old_data_ready = sk->sk_data_ready;
756 transport->old_state_change = sk->sk_state_change;
757 transport->old_write_space = sk->sk_write_space;
758 transport->old_error_report = sk->sk_error_report;
761 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
763 sk->sk_data_ready = transport->old_data_ready;
764 sk->sk_state_change = transport->old_state_change;
765 sk->sk_write_space = transport->old_write_space;
766 sk->sk_error_report = transport->old_error_report;
769 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
771 smp_mb__before_atomic();
772 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
773 clear_bit(XPRT_CLOSING, &xprt->state);
774 smp_mb__after_atomic();
777 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
779 xs_sock_reset_connection_flags(xprt);
780 /* Mark transport as closed and wake up all pending tasks */
781 xprt_disconnect_done(xprt);
785 * xs_error_report - callback to handle TCP socket state errors
788 * Note: we don't call sock_error() since there may be a rpc_task
789 * using the socket, and so we don't want to clear sk->sk_err.
791 static void xs_error_report(struct sock *sk)
793 struct rpc_xprt *xprt;
796 read_lock_bh(&sk->sk_callback_lock);
797 if (!(xprt = xprt_from_sock(sk)))
803 /* Is this a reset event? */
804 if (sk->sk_state == TCP_CLOSE)
805 xs_sock_mark_closed(xprt);
806 dprintk("RPC: xs_error_report client %p, error=%d...\n",
808 trace_rpc_socket_error(xprt, sk->sk_socket, err);
809 xprt_wake_pending_tasks(xprt, err);
811 read_unlock_bh(&sk->sk_callback_lock);
814 static void xs_reset_transport(struct sock_xprt *transport)
816 struct socket *sock = transport->sock;
817 struct sock *sk = transport->inet;
818 struct rpc_xprt *xprt = &transport->xprt;
823 if (atomic_read(&transport->xprt.swapper))
824 sk_clear_memalloc(sk);
826 kernel_sock_shutdown(sock, SHUT_RDWR);
828 mutex_lock(&transport->recv_mutex);
829 write_lock_bh(&sk->sk_callback_lock);
830 transport->inet = NULL;
831 transport->sock = NULL;
833 sk->sk_user_data = NULL;
835 xs_restore_old_callbacks(transport, sk);
836 xprt_clear_connected(xprt);
837 write_unlock_bh(&sk->sk_callback_lock);
838 xs_sock_reset_connection_flags(xprt);
839 mutex_unlock(&transport->recv_mutex);
841 trace_rpc_socket_close(xprt, sock);
846 * xs_close - close a socket
849 * This is used when all requests are complete; ie, no DRC state remains
850 * on the server we want to save.
852 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
853 * xs_reset_transport() zeroing the socket from underneath a writer.
855 static void xs_close(struct rpc_xprt *xprt)
857 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
859 dprintk("RPC: xs_close xprt %p\n", xprt);
861 xs_reset_transport(transport);
862 xprt->reestablish_timeout = 0;
864 xprt_disconnect_done(xprt);
867 static void xs_inject_disconnect(struct rpc_xprt *xprt)
869 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
871 xprt_disconnect_done(xprt);
874 static void xs_xprt_free(struct rpc_xprt *xprt)
876 xs_free_peer_addresses(xprt);
881 * xs_destroy - prepare to shutdown a transport
882 * @xprt: doomed transport
885 static void xs_destroy(struct rpc_xprt *xprt)
887 struct sock_xprt *transport = container_of(xprt,
888 struct sock_xprt, xprt);
889 dprintk("RPC: xs_destroy xprt %p\n", xprt);
891 cancel_delayed_work_sync(&transport->connect_worker);
893 cancel_work_sync(&transport->recv_worker);
895 module_put(THIS_MODULE);
898 static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
900 struct xdr_skb_reader desc = {
902 .offset = sizeof(rpc_fraghdr),
903 .count = skb->len - sizeof(rpc_fraghdr),
906 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
914 * xs_local_data_read_skb
919 * Currently this assumes we can read the whole reply in a single gulp.
921 static void xs_local_data_read_skb(struct rpc_xprt *xprt,
925 struct rpc_task *task;
926 struct rpc_rqst *rovr;
931 repsize = skb->len - sizeof(rpc_fraghdr);
933 dprintk("RPC: impossible RPC reply size %d\n", repsize);
937 /* Copy the XID from the skb... */
938 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
942 /* Look up and lock the request corresponding to the given XID */
943 spin_lock_bh(&xprt->transport_lock);
944 rovr = xprt_lookup_rqst(xprt, *xp);
947 task = rovr->rq_task;
949 copied = rovr->rq_private_buf.buflen;
950 if (copied > repsize)
953 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
954 dprintk("RPC: sk_buff copy failed\n");
958 xprt_complete_rqst(task, copied);
961 spin_unlock_bh(&xprt->transport_lock);
964 static void xs_local_data_receive(struct sock_xprt *transport)
970 mutex_lock(&transport->recv_mutex);
971 sk = transport->inet;
975 skb = skb_recv_datagram(sk, 0, 1, &err);
978 xs_local_data_read_skb(&transport->xprt, sk, skb);
979 skb_free_datagram(sk, skb);
982 mutex_unlock(&transport->recv_mutex);
985 static void xs_local_data_receive_workfn(struct work_struct *work)
987 struct sock_xprt *transport =
988 container_of(work, struct sock_xprt, recv_worker);
989 xs_local_data_receive(transport);
993 * xs_udp_data_read_skb - receive callback for UDP sockets
999 static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1001 struct sk_buff *skb)
1003 struct rpc_task *task;
1004 struct rpc_rqst *rovr;
1005 int repsize, copied;
1009 repsize = skb->len - sizeof(struct udphdr);
1011 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1015 /* Copy the XID from the skb... */
1016 xp = skb_header_pointer(skb, sizeof(struct udphdr),
1017 sizeof(_xid), &_xid);
1021 /* Look up and lock the request corresponding to the given XID */
1022 spin_lock_bh(&xprt->transport_lock);
1023 rovr = xprt_lookup_rqst(xprt, *xp);
1026 task = rovr->rq_task;
1028 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1031 /* Suck it into the iovec, verify checksum if not done by hw. */
1032 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1033 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1037 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1039 xprt_adjust_cwnd(xprt, task, copied);
1040 xprt_complete_rqst(task, copied);
1043 spin_unlock_bh(&xprt->transport_lock);
1046 static void xs_udp_data_receive(struct sock_xprt *transport)
1048 struct sk_buff *skb;
1052 mutex_lock(&transport->recv_mutex);
1053 sk = transport->inet;
1057 skb = skb_recv_datagram(sk, 0, 1, &err);
1060 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1061 skb_free_datagram(sk, skb);
1064 mutex_unlock(&transport->recv_mutex);
1067 static void xs_udp_data_receive_workfn(struct work_struct *work)
1069 struct sock_xprt *transport =
1070 container_of(work, struct sock_xprt, recv_worker);
1071 xs_udp_data_receive(transport);
1075 * xs_data_ready - "data ready" callback for UDP sockets
1076 * @sk: socket with data to read
1079 static void xs_data_ready(struct sock *sk)
1081 struct rpc_xprt *xprt;
1083 read_lock_bh(&sk->sk_callback_lock);
1084 dprintk("RPC: xs_data_ready...\n");
1085 xprt = xprt_from_sock(sk);
1087 struct sock_xprt *transport = container_of(xprt,
1088 struct sock_xprt, xprt);
1089 queue_work(rpciod_workqueue, &transport->recv_worker);
1091 read_unlock_bh(&sk->sk_callback_lock);
1095 * Helper function to force a TCP close if the server is sending
1096 * junk and/or it has put us in CLOSE_WAIT
1098 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1100 xprt_force_disconnect(xprt);
1103 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1105 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1109 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1110 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1111 used = xdr_skb_read_bits(desc, p, len);
1112 transport->tcp_offset += used;
1116 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1117 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1118 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1120 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1121 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1123 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1124 transport->tcp_offset = 0;
1126 /* Sanity check of the record length */
1127 if (unlikely(transport->tcp_reclen < 8)) {
1128 dprintk("RPC: invalid TCP record fragment length\n");
1129 xs_tcp_force_close(xprt);
1132 dprintk("RPC: reading TCP record fragment of length %d\n",
1133 transport->tcp_reclen);
1136 static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1138 if (transport->tcp_offset == transport->tcp_reclen) {
1139 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1140 transport->tcp_offset = 0;
1141 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1142 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1143 transport->tcp_flags |= TCP_RCV_COPY_XID;
1144 transport->tcp_copied = 0;
1149 static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1154 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1155 dprintk("RPC: reading XID (%Zu bytes)\n", len);
1156 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1157 used = xdr_skb_read_bits(desc, p, len);
1158 transport->tcp_offset += used;
1161 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1162 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1163 transport->tcp_copied = 4;
1164 dprintk("RPC: reading %s XID %08x\n",
1165 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1167 ntohl(transport->tcp_xid));
1168 xs_tcp_check_fraghdr(transport);
1171 static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1172 struct xdr_skb_reader *desc)
1179 * We want transport->tcp_offset to be 8 at the end of this routine
1180 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1181 * When this function is called for the first time,
1182 * transport->tcp_offset is 4 (after having already read the xid).
1184 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1185 len = sizeof(transport->tcp_calldir) - offset;
1186 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1187 p = ((char *) &transport->tcp_calldir) + offset;
1188 used = xdr_skb_read_bits(desc, p, len);
1189 transport->tcp_offset += used;
1192 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1194 * We don't yet have the XDR buffer, so we will write the calldir
1195 * out after we get the buffer from the 'struct rpc_rqst'
1197 switch (ntohl(transport->tcp_calldir)) {
1199 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1200 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1201 transport->tcp_flags |= TCP_RPC_REPLY;
1204 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1205 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1206 transport->tcp_flags &= ~TCP_RPC_REPLY;
1209 dprintk("RPC: invalid request message type\n");
1210 xs_tcp_force_close(&transport->xprt);
1212 xs_tcp_check_fraghdr(transport);
1215 static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1216 struct xdr_skb_reader *desc,
1217 struct rpc_rqst *req)
1219 struct sock_xprt *transport =
1220 container_of(xprt, struct sock_xprt, xprt);
1221 struct xdr_buf *rcvbuf;
1225 rcvbuf = &req->rq_private_buf;
1227 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1229 * Save the RPC direction in the XDR buffer
1231 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1232 &transport->tcp_calldir,
1233 sizeof(transport->tcp_calldir));
1234 transport->tcp_copied += sizeof(transport->tcp_calldir);
1235 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1239 if (len > transport->tcp_reclen - transport->tcp_offset) {
1240 struct xdr_skb_reader my_desc;
1242 len = transport->tcp_reclen - transport->tcp_offset;
1243 memcpy(&my_desc, desc, sizeof(my_desc));
1244 my_desc.count = len;
1245 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1246 &my_desc, xdr_skb_read_bits);
1250 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1251 desc, xdr_skb_read_bits);
1254 transport->tcp_copied += r;
1255 transport->tcp_offset += r;
1258 /* Error when copying to the receive buffer,
1259 * usually because we weren't able to allocate
1260 * additional buffer pages. All we can do now
1261 * is turn off TCP_RCV_COPY_DATA, so the request
1262 * will not receive any additional updates,
1264 * Any remaining data from this record will
1267 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1268 dprintk("RPC: XID %08x truncated request\n",
1269 ntohl(transport->tcp_xid));
1270 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1271 "tcp_offset = %u, tcp_reclen = %u\n",
1272 xprt, transport->tcp_copied,
1273 transport->tcp_offset, transport->tcp_reclen);
1277 dprintk("RPC: XID %08x read %Zd bytes\n",
1278 ntohl(transport->tcp_xid), r);
1279 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1280 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1281 transport->tcp_offset, transport->tcp_reclen);
1283 if (transport->tcp_copied == req->rq_private_buf.buflen)
1284 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1285 else if (transport->tcp_offset == transport->tcp_reclen) {
1286 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1287 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1292 * Finds the request corresponding to the RPC xid and invokes the common
1293 * tcp read code to read the data.
1295 static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1296 struct xdr_skb_reader *desc)
1298 struct sock_xprt *transport =
1299 container_of(xprt, struct sock_xprt, xprt);
1300 struct rpc_rqst *req;
1302 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1304 /* Find and lock the request corresponding to this xid */
1305 spin_lock_bh(&xprt->transport_lock);
1306 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1308 dprintk("RPC: XID %08x request not found!\n",
1309 ntohl(transport->tcp_xid));
1310 spin_unlock_bh(&xprt->transport_lock);
1314 xs_tcp_read_common(xprt, desc, req);
1316 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1317 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1319 spin_unlock_bh(&xprt->transport_lock);
1323 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1325 * Obtains an rpc_rqst previously allocated and invokes the common
1326 * tcp read code to read the data. The result is placed in the callback
1328 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1329 * connection and return -1.
1331 static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1332 struct xdr_skb_reader *desc)
1334 struct sock_xprt *transport =
1335 container_of(xprt, struct sock_xprt, xprt);
1336 struct rpc_rqst *req;
1338 /* Look up and lock the request corresponding to the given XID */
1339 spin_lock_bh(&xprt->transport_lock);
1340 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1342 spin_unlock_bh(&xprt->transport_lock);
1343 printk(KERN_WARNING "Callback slot table overflowed\n");
1344 xprt_force_disconnect(xprt);
1348 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1349 xs_tcp_read_common(xprt, desc, req);
1351 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1352 xprt_complete_bc_request(req, transport->tcp_copied);
1353 spin_unlock_bh(&xprt->transport_lock);
1358 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1359 struct xdr_skb_reader *desc)
1361 struct sock_xprt *transport =
1362 container_of(xprt, struct sock_xprt, xprt);
1364 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1365 xs_tcp_read_reply(xprt, desc) :
1366 xs_tcp_read_callback(xprt, desc);
1369 static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
1373 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
1374 SVC_SOCK_ANONYMOUS);
1380 static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1381 struct xdr_skb_reader *desc)
1383 return xs_tcp_read_reply(xprt, desc);
1385 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1388 * Read data off the transport. This can be either an RPC_CALL or an
1389 * RPC_REPLY. Relay the processing to helper functions.
1391 static void xs_tcp_read_data(struct rpc_xprt *xprt,
1392 struct xdr_skb_reader *desc)
1394 struct sock_xprt *transport =
1395 container_of(xprt, struct sock_xprt, xprt);
1397 if (_xs_tcp_read_data(xprt, desc) == 0)
1398 xs_tcp_check_fraghdr(transport);
1401 * The transport_lock protects the request handling.
1402 * There's no need to hold it to update the tcp_flags.
1404 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1408 static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1412 len = transport->tcp_reclen - transport->tcp_offset;
1413 if (len > desc->count)
1416 desc->offset += len;
1417 transport->tcp_offset += len;
1418 dprintk("RPC: discarded %Zu bytes\n", len);
1419 xs_tcp_check_fraghdr(transport);
1422 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1424 struct rpc_xprt *xprt = rd_desc->arg.data;
1425 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1426 struct xdr_skb_reader desc = {
1432 dprintk("RPC: xs_tcp_data_recv started\n");
1434 trace_xs_tcp_data_recv(transport);
1435 /* Read in a new fragment marker if necessary */
1436 /* Can we ever really expect to get completely empty fragments? */
1437 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1438 xs_tcp_read_fraghdr(xprt, &desc);
1441 /* Read in the xid if necessary */
1442 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1443 xs_tcp_read_xid(transport, &desc);
1446 /* Read in the call/reply flag */
1447 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1448 xs_tcp_read_calldir(transport, &desc);
1451 /* Read in the request data */
1452 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1453 xs_tcp_read_data(xprt, &desc);
1456 /* Skip over any trailing bytes on short reads */
1457 xs_tcp_read_discard(transport, &desc);
1458 } while (desc.count);
1459 trace_xs_tcp_data_recv(transport);
1460 dprintk("RPC: xs_tcp_data_recv done\n");
1461 return len - desc.count;
1464 static void xs_tcp_data_receive(struct sock_xprt *transport)
1466 struct rpc_xprt *xprt = &transport->xprt;
1468 read_descriptor_t rd_desc = {
1469 .count = 2*1024*1024,
1472 unsigned long total = 0;
1475 mutex_lock(&transport->recv_mutex);
1476 sk = transport->inet;
1480 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1483 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1488 rd_desc.count = 65536;
1491 mutex_unlock(&transport->recv_mutex);
1492 trace_xs_tcp_data_ready(xprt, read, total);
1495 static void xs_tcp_data_receive_workfn(struct work_struct *work)
1497 struct sock_xprt *transport =
1498 container_of(work, struct sock_xprt, recv_worker);
1499 xs_tcp_data_receive(transport);
1503 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1504 * @sk: socket with data to read
1507 static void xs_tcp_data_ready(struct sock *sk)
1509 struct sock_xprt *transport;
1510 struct rpc_xprt *xprt;
1512 dprintk("RPC: xs_tcp_data_ready...\n");
1514 read_lock_bh(&sk->sk_callback_lock);
1515 if (!(xprt = xprt_from_sock(sk)))
1517 transport = container_of(xprt, struct sock_xprt, xprt);
1519 /* Any data means we had a useful conversation, so
1520 * the we don't need to delay the next reconnect
1522 if (xprt->reestablish_timeout)
1523 xprt->reestablish_timeout = 0;
1524 queue_work(rpciod_workqueue, &transport->recv_worker);
1527 read_unlock_bh(&sk->sk_callback_lock);
1531 * xs_tcp_state_change - callback to handle TCP socket state changes
1532 * @sk: socket whose state has changed
1535 static void xs_tcp_state_change(struct sock *sk)
1537 struct rpc_xprt *xprt;
1538 struct sock_xprt *transport;
1540 read_lock_bh(&sk->sk_callback_lock);
1541 if (!(xprt = xprt_from_sock(sk)))
1543 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1544 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1545 sk->sk_state, xprt_connected(xprt),
1546 sock_flag(sk, SOCK_DEAD),
1547 sock_flag(sk, SOCK_ZAPPED),
1550 transport = container_of(xprt, struct sock_xprt, xprt);
1551 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1552 switch (sk->sk_state) {
1553 case TCP_ESTABLISHED:
1554 spin_lock(&xprt->transport_lock);
1555 if (!xprt_test_and_set_connected(xprt)) {
1557 /* Reset TCP record info */
1558 transport->tcp_offset = 0;
1559 transport->tcp_reclen = 0;
1560 transport->tcp_copied = 0;
1561 transport->tcp_flags =
1562 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1563 xprt->connect_cookie++;
1564 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1565 xprt_clear_connecting(xprt);
1567 xprt_wake_pending_tasks(xprt, -EAGAIN);
1569 spin_unlock(&xprt->transport_lock);
1572 /* The client initiated a shutdown of the socket */
1573 xprt->connect_cookie++;
1574 xprt->reestablish_timeout = 0;
1575 set_bit(XPRT_CLOSING, &xprt->state);
1576 smp_mb__before_atomic();
1577 clear_bit(XPRT_CONNECTED, &xprt->state);
1578 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1579 smp_mb__after_atomic();
1581 case TCP_CLOSE_WAIT:
1582 /* The server initiated a shutdown of the socket */
1583 xprt->connect_cookie++;
1584 clear_bit(XPRT_CONNECTED, &xprt->state);
1585 xs_tcp_force_close(xprt);
1588 * If the server closed down the connection, make sure that
1589 * we back off before reconnecting
1591 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1592 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1595 set_bit(XPRT_CLOSING, &xprt->state);
1596 smp_mb__before_atomic();
1597 clear_bit(XPRT_CONNECTED, &xprt->state);
1598 smp_mb__after_atomic();
1601 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1602 &transport->sock_state))
1603 xprt_clear_connecting(xprt);
1604 xs_sock_mark_closed(xprt);
1607 read_unlock_bh(&sk->sk_callback_lock);
1610 static void xs_write_space(struct sock *sk)
1612 struct socket *sock;
1613 struct rpc_xprt *xprt;
1615 if (unlikely(!(sock = sk->sk_socket)))
1617 clear_bit(SOCK_NOSPACE, &sock->flags);
1619 if (unlikely(!(xprt = xprt_from_sock(sk))))
1621 if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
1624 xprt_write_space(xprt);
1628 * xs_udp_write_space - callback invoked when socket buffer space
1630 * @sk: socket whose state has changed
1632 * Called when more output buffer space is available for this socket.
1633 * We try not to wake our writers until they can make "significant"
1634 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1635 * with a bunch of small requests.
1637 static void xs_udp_write_space(struct sock *sk)
1639 read_lock_bh(&sk->sk_callback_lock);
1641 /* from net/core/sock.c:sock_def_write_space */
1642 if (sock_writeable(sk))
1645 read_unlock_bh(&sk->sk_callback_lock);
1649 * xs_tcp_write_space - callback invoked when socket buffer space
1651 * @sk: socket whose state has changed
1653 * Called when more output buffer space is available for this socket.
1654 * We try not to wake our writers until they can make "significant"
1655 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1656 * with a bunch of small requests.
1658 static void xs_tcp_write_space(struct sock *sk)
1660 read_lock_bh(&sk->sk_callback_lock);
1662 /* from net/core/stream.c:sk_stream_write_space */
1663 if (sk_stream_is_writeable(sk))
1666 read_unlock_bh(&sk->sk_callback_lock);
1669 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1671 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1672 struct sock *sk = transport->inet;
1674 if (transport->rcvsize) {
1675 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1676 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1678 if (transport->sndsize) {
1679 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1680 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1681 sk->sk_write_space(sk);
1686 * xs_udp_set_buffer_size - set send and receive limits
1687 * @xprt: generic transport
1688 * @sndsize: requested size of send buffer, in bytes
1689 * @rcvsize: requested size of receive buffer, in bytes
1691 * Set socket send and receive buffer size limits.
1693 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1695 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1697 transport->sndsize = 0;
1699 transport->sndsize = sndsize + 1024;
1700 transport->rcvsize = 0;
1702 transport->rcvsize = rcvsize + 1024;
1704 xs_udp_do_set_buffer_size(xprt);
1708 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1709 * @task: task that timed out
1711 * Adjust the congestion window after a retransmit timeout has occurred.
1713 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1715 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1718 static unsigned short xs_get_random_port(void)
1720 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1721 unsigned short rand = (unsigned short) prandom_u32() % range;
1722 return rand + xprt_min_resvport;
1726 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1729 * Note that this function has to be called on all sockets that share the
1730 * same port, and it must be called before binding.
1732 static void xs_sock_set_reuseport(struct socket *sock)
1736 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1737 (char *)&opt, sizeof(opt));
1740 static unsigned short xs_sock_getport(struct socket *sock)
1742 struct sockaddr_storage buf;
1744 unsigned short port = 0;
1746 if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
1748 switch (buf.ss_family) {
1750 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1753 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1760 * xs_set_port - reset the port number in the remote endpoint address
1761 * @xprt: generic transport
1762 * @port: new port number
1765 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1767 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1769 rpc_set_port(xs_addr(xprt), port);
1770 xs_update_peer_port(xprt);
1773 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1775 if (transport->srcport == 0)
1776 transport->srcport = xs_sock_getport(sock);
1779 static unsigned short xs_get_srcport(struct sock_xprt *transport)
1781 unsigned short port = transport->srcport;
1783 if (port == 0 && transport->xprt.resvport)
1784 port = xs_get_random_port();
1788 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1790 if (transport->srcport != 0)
1791 transport->srcport = 0;
1792 if (!transport->xprt.resvport)
1794 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1795 return xprt_max_resvport;
1798 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1800 struct sockaddr_storage myaddr;
1802 unsigned short port = xs_get_srcport(transport);
1803 unsigned short last;
1806 * If we are asking for any ephemeral port (i.e. port == 0 &&
1807 * transport->xprt.resvport == 0), don't bind. Let the local
1808 * port selection happen implicitly when the socket is used
1809 * (for example at connect time).
1811 * This ensures that we can continue to establish TCP
1812 * connections even when all local ephemeral ports are already
1813 * a part of some TCP connection. This makes no difference
1814 * for UDP sockets, but also doens't harm them.
1816 * If we're asking for any reserved port (i.e. port == 0 &&
1817 * transport->xprt.resvport == 1) xs_get_srcport above will
1818 * ensure that port is non-zero and we will bind as needed.
1823 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1825 rpc_set_port((struct sockaddr *)&myaddr, port);
1826 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1827 transport->xprt.addrlen);
1829 transport->srcport = port;
1833 port = xs_next_srcport(transport, port);
1836 } while (err == -EADDRINUSE && nloop != 2);
1838 if (myaddr.ss_family == AF_INET)
1839 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1840 &((struct sockaddr_in *)&myaddr)->sin_addr,
1841 port, err ? "failed" : "ok", err);
1843 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1844 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1845 port, err ? "failed" : "ok", err);
1850 * We don't support autobind on AF_LOCAL sockets
1852 static void xs_local_rpcbind(struct rpc_task *task)
1855 xprt_set_bound(rcu_dereference(task->tk_client->cl_xprt));
1859 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1863 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1864 static struct lock_class_key xs_key[2];
1865 static struct lock_class_key xs_slock_key[2];
1867 static inline void xs_reclassify_socketu(struct socket *sock)
1869 struct sock *sk = sock->sk;
1871 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1872 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1875 static inline void xs_reclassify_socket4(struct socket *sock)
1877 struct sock *sk = sock->sk;
1879 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1880 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1883 static inline void xs_reclassify_socket6(struct socket *sock)
1885 struct sock *sk = sock->sk;
1887 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1888 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1891 static inline void xs_reclassify_socket(int family, struct socket *sock)
1893 WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1894 if (sock_owned_by_user(sock->sk))
1899 xs_reclassify_socketu(sock);
1902 xs_reclassify_socket4(sock);
1905 xs_reclassify_socket6(sock);
1910 static inline void xs_reclassify_socket(int family, struct socket *sock)
1915 static void xs_dummy_setup_socket(struct work_struct *work)
1919 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1920 struct sock_xprt *transport, int family, int type,
1921 int protocol, bool reuseport)
1923 struct socket *sock;
1926 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1928 dprintk("RPC: can't create %d transport socket (%d).\n",
1932 xs_reclassify_socket(family, sock);
1935 xs_sock_set_reuseport(sock);
1937 err = xs_bind(transport, sock);
1945 return ERR_PTR(err);
1948 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1949 struct socket *sock)
1951 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1954 if (!transport->inet) {
1955 struct sock *sk = sock->sk;
1957 write_lock_bh(&sk->sk_callback_lock);
1959 xs_save_old_callbacks(transport, sk);
1961 sk->sk_user_data = xprt;
1962 sk->sk_data_ready = xs_data_ready;
1963 sk->sk_write_space = xs_udp_write_space;
1964 sk->sk_error_report = xs_error_report;
1965 sk->sk_allocation = GFP_NOIO;
1967 xprt_clear_connected(xprt);
1969 /* Reset to new socket */
1970 transport->sock = sock;
1971 transport->inet = sk;
1973 write_unlock_bh(&sk->sk_callback_lock);
1976 /* Tell the socket layer to start connecting... */
1977 xprt->stat.connect_count++;
1978 xprt->stat.connect_start = jiffies;
1979 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1983 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1984 * @transport: socket transport to connect
1986 static int xs_local_setup_socket(struct sock_xprt *transport)
1988 struct rpc_xprt *xprt = &transport->xprt;
1989 struct socket *sock;
1992 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1993 SOCK_STREAM, 0, &sock, 1);
1995 dprintk("RPC: can't create AF_LOCAL "
1996 "transport socket (%d).\n", -status);
1999 xs_reclassify_socket(AF_LOCAL, sock);
2001 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
2002 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2004 status = xs_local_finish_connecting(xprt, sock);
2005 trace_rpc_socket_connect(xprt, sock, status);
2008 dprintk("RPC: xprt %p connected to %s\n",
2009 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2010 xprt_set_connected(xprt);
2014 dprintk("RPC: xprt %p: socket %s does not exist\n",
2015 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2018 dprintk("RPC: xprt %p: connection refused for %s\n",
2019 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2022 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2024 xprt->address_strings[RPC_DISPLAY_ADDR]);
2028 xprt_clear_connecting(xprt);
2029 xprt_wake_pending_tasks(xprt, status);
2033 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2035 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2038 if (RPC_IS_ASYNC(task)) {
2040 * We want the AF_LOCAL connect to be resolved in the
2041 * filesystem namespace of the process making the rpc
2042 * call. Thus we connect synchronously.
2044 * If we want to support asynchronous AF_LOCAL calls,
2045 * we'll need to figure out how to pass a namespace to
2048 rpc_exit(task, -ENOTCONN);
2051 ret = xs_local_setup_socket(transport);
2052 if (ret && !RPC_IS_SOFTCONN(task))
2053 msleep_interruptible(15000);
2056 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2058 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2059 * know that we have exclusive access to the socket), to guard against
2060 * races with xs_reset_transport.
2062 static void xs_set_memalloc(struct rpc_xprt *xprt)
2064 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2068 * If there's no sock, then we have nothing to set. The
2069 * reconnecting process will get it for us.
2071 if (!transport->inet)
2073 if (atomic_read(&xprt->swapper))
2074 sk_set_memalloc(transport->inet);
2078 * xs_enable_swap - Tag this transport as being used for swap.
2079 * @xprt: transport to tag
2081 * Take a reference to this transport on behalf of the rpc_clnt, and
2082 * optionally mark it for swapping if it wasn't already.
2085 xs_enable_swap(struct rpc_xprt *xprt)
2087 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2089 if (atomic_inc_return(&xprt->swapper) != 1)
2091 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2092 return -ERESTARTSYS;
2094 sk_set_memalloc(xs->inet);
2095 xprt_release_xprt(xprt, NULL);
2100 * xs_disable_swap - Untag this transport as being used for swap.
2101 * @xprt: transport to tag
2103 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2104 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2107 xs_disable_swap(struct rpc_xprt *xprt)
2109 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2111 if (!atomic_dec_and_test(&xprt->swapper))
2113 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2116 sk_clear_memalloc(xs->inet);
2117 xprt_release_xprt(xprt, NULL);
2120 static void xs_set_memalloc(struct rpc_xprt *xprt)
2125 xs_enable_swap(struct rpc_xprt *xprt)
2131 xs_disable_swap(struct rpc_xprt *xprt)
2136 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2138 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2140 if (!transport->inet) {
2141 struct sock *sk = sock->sk;
2143 write_lock_bh(&sk->sk_callback_lock);
2145 xs_save_old_callbacks(transport, sk);
2147 sk->sk_user_data = xprt;
2148 sk->sk_data_ready = xs_data_ready;
2149 sk->sk_write_space = xs_udp_write_space;
2150 sk->sk_allocation = GFP_NOIO;
2152 xprt_set_connected(xprt);
2154 /* Reset to new socket */
2155 transport->sock = sock;
2156 transport->inet = sk;
2158 xs_set_memalloc(xprt);
2160 write_unlock_bh(&sk->sk_callback_lock);
2162 xs_udp_do_set_buffer_size(xprt);
2165 static void xs_udp_setup_socket(struct work_struct *work)
2167 struct sock_xprt *transport =
2168 container_of(work, struct sock_xprt, connect_worker.work);
2169 struct rpc_xprt *xprt = &transport->xprt;
2170 struct socket *sock = transport->sock;
2173 sock = xs_create_sock(xprt, transport,
2174 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2175 IPPROTO_UDP, false);
2179 dprintk("RPC: worker connecting xprt %p via %s to "
2180 "%s (port %s)\n", xprt,
2181 xprt->address_strings[RPC_DISPLAY_PROTO],
2182 xprt->address_strings[RPC_DISPLAY_ADDR],
2183 xprt->address_strings[RPC_DISPLAY_PORT]);
2185 xs_udp_finish_connecting(xprt, sock);
2186 trace_rpc_socket_connect(xprt, sock, 0);
2189 xprt_unlock_connect(xprt, transport);
2190 xprt_clear_connecting(xprt);
2191 xprt_wake_pending_tasks(xprt, status);
2195 * xs_tcp_shutdown - gracefully shut down a TCP socket
2198 * Initiates a graceful shutdown of the TCP socket by calling the
2199 * equivalent of shutdown(SHUT_RDWR);
2201 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2203 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2204 struct socket *sock = transport->sock;
2208 if (xprt_connected(xprt)) {
2209 kernel_sock_shutdown(sock, SHUT_RDWR);
2210 trace_rpc_socket_shutdown(xprt, sock);
2212 xs_reset_transport(transport);
2215 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2217 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2218 int ret = -ENOTCONN;
2220 if (!transport->inet) {
2221 struct sock *sk = sock->sk;
2222 unsigned int keepidle = xprt->timeout->to_initval / HZ;
2223 unsigned int keepcnt = xprt->timeout->to_retries + 1;
2224 unsigned int opt_on = 1;
2227 /* TCP Keepalive options */
2228 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2229 (char *)&opt_on, sizeof(opt_on));
2230 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2231 (char *)&keepidle, sizeof(keepidle));
2232 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2233 (char *)&keepidle, sizeof(keepidle));
2234 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2235 (char *)&keepcnt, sizeof(keepcnt));
2237 /* TCP user timeout (see RFC5482) */
2238 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2239 (xprt->timeout->to_retries + 1);
2240 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2241 (char *)&timeo, sizeof(timeo));
2243 write_lock_bh(&sk->sk_callback_lock);
2245 xs_save_old_callbacks(transport, sk);
2247 sk->sk_user_data = xprt;
2248 sk->sk_data_ready = xs_tcp_data_ready;
2249 sk->sk_state_change = xs_tcp_state_change;
2250 sk->sk_write_space = xs_tcp_write_space;
2251 sk->sk_error_report = xs_error_report;
2252 sk->sk_allocation = GFP_NOIO;
2254 /* socket options */
2255 sock_reset_flag(sk, SOCK_LINGER);
2256 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2258 xprt_clear_connected(xprt);
2260 /* Reset to new socket */
2261 transport->sock = sock;
2262 transport->inet = sk;
2264 write_unlock_bh(&sk->sk_callback_lock);
2267 if (!xprt_bound(xprt))
2270 xs_set_memalloc(xprt);
2272 /* Tell the socket layer to start connecting... */
2273 xprt->stat.connect_count++;
2274 xprt->stat.connect_start = jiffies;
2275 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2276 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2279 xs_set_srcport(transport, sock);
2282 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2283 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2290 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2292 * Invoked by a work queue tasklet.
2294 static void xs_tcp_setup_socket(struct work_struct *work)
2296 struct sock_xprt *transport =
2297 container_of(work, struct sock_xprt, connect_worker.work);
2298 struct socket *sock = transport->sock;
2299 struct rpc_xprt *xprt = &transport->xprt;
2303 sock = xs_create_sock(xprt, transport,
2304 xs_addr(xprt)->sa_family, SOCK_STREAM,
2307 status = PTR_ERR(sock);
2312 dprintk("RPC: worker connecting xprt %p via %s to "
2313 "%s (port %s)\n", xprt,
2314 xprt->address_strings[RPC_DISPLAY_PROTO],
2315 xprt->address_strings[RPC_DISPLAY_ADDR],
2316 xprt->address_strings[RPC_DISPLAY_PORT]);
2318 status = xs_tcp_finish_connecting(xprt, sock);
2319 trace_rpc_socket_connect(xprt, sock, status);
2320 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2321 xprt, -status, xprt_connected(xprt),
2322 sock->sk->sk_state);
2325 printk("%s: connect returned unhandled error %d\n",
2327 case -EADDRNOTAVAIL:
2328 /* We're probably in TIME_WAIT. Get rid of existing socket,
2331 xs_tcp_force_close(xprt);
2336 xprt_unlock_connect(xprt, transport);
2339 /* Happens, for instance, if the user specified a link
2340 * local IPv6 address without a scope-id.
2347 /* retry with existing socket, after a delay */
2348 xs_tcp_force_close(xprt);
2353 xprt_unlock_connect(xprt, transport);
2354 xprt_clear_connecting(xprt);
2355 xprt_wake_pending_tasks(xprt, status);
2359 * xs_connect - connect a socket to a remote endpoint
2360 * @xprt: pointer to transport structure
2361 * @task: address of RPC task that manages state of connect request
2363 * TCP: If the remote end dropped the connection, delay reconnecting.
2365 * UDP socket connects are synchronous, but we use a work queue anyway
2366 * to guarantee that even unprivileged user processes can set up a
2367 * socket on a privileged port.
2369 * If a UDP socket connect fails, the delay behavior here prevents
2370 * retry floods (hard mounts).
2372 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2374 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2376 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2378 if (transport->sock != NULL) {
2379 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2381 xprt, xprt->reestablish_timeout / HZ);
2383 /* Start by resetting any existing state */
2384 xs_reset_transport(transport);
2386 queue_delayed_work(rpciod_workqueue,
2387 &transport->connect_worker,
2388 xprt->reestablish_timeout);
2389 xprt->reestablish_timeout <<= 1;
2390 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2391 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2392 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2393 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2395 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2396 queue_delayed_work(rpciod_workqueue,
2397 &transport->connect_worker, 0);
2402 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2403 * @xprt: rpc_xprt struct containing statistics
2407 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2411 if (xprt_connected(xprt))
2412 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2414 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2415 "%llu %llu %lu %llu %llu\n",
2416 xprt->stat.bind_count,
2417 xprt->stat.connect_count,
2418 xprt->stat.connect_time,
2422 xprt->stat.bad_xids,
2425 xprt->stat.max_slots,
2426 xprt->stat.sending_u,
2427 xprt->stat.pending_u);
2431 * xs_udp_print_stats - display UDP socket-specifc stats
2432 * @xprt: rpc_xprt struct containing statistics
2436 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2438 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2440 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2443 xprt->stat.bind_count,
2446 xprt->stat.bad_xids,
2449 xprt->stat.max_slots,
2450 xprt->stat.sending_u,
2451 xprt->stat.pending_u);
2455 * xs_tcp_print_stats - display TCP socket-specifc stats
2456 * @xprt: rpc_xprt struct containing statistics
2460 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2462 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2465 if (xprt_connected(xprt))
2466 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2468 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2469 "%llu %llu %lu %llu %llu\n",
2471 xprt->stat.bind_count,
2472 xprt->stat.connect_count,
2473 xprt->stat.connect_time,
2477 xprt->stat.bad_xids,
2480 xprt->stat.max_slots,
2481 xprt->stat.sending_u,
2482 xprt->stat.pending_u);
2486 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2487 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2488 * to use the server side send routines.
2490 static void *bc_malloc(struct rpc_task *task, size_t size)
2493 struct rpc_buffer *buf;
2495 WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2496 if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2499 page = alloc_page(GFP_KERNEL);
2503 buf = page_address(page);
2504 buf->len = PAGE_SIZE;
2510 * Free the space allocated in the bc_alloc routine
2512 static void bc_free(void *buffer)
2514 struct rpc_buffer *buf;
2519 buf = container_of(buffer, struct rpc_buffer, data);
2520 free_page((unsigned long)buf);
2524 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2525 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2527 static int bc_sendto(struct rpc_rqst *req)
2530 struct xdr_buf *xbufp = &req->rq_snd_buf;
2531 struct rpc_xprt *xprt = req->rq_xprt;
2532 struct sock_xprt *transport =
2533 container_of(xprt, struct sock_xprt, xprt);
2534 struct socket *sock = transport->sock;
2535 unsigned long headoff;
2536 unsigned long tailoff;
2538 xs_encode_stream_record_marker(xbufp);
2540 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2541 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2542 len = svc_send_common(sock, xbufp,
2543 virt_to_page(xbufp->head[0].iov_base), headoff,
2544 xbufp->tail[0].iov_base, tailoff);
2546 if (len != xbufp->len) {
2547 printk(KERN_NOTICE "Error sending entire callback!\n");
2555 * The send routine. Borrows from svc_send
2557 static int bc_send_request(struct rpc_task *task)
2559 struct rpc_rqst *req = task->tk_rqstp;
2560 struct svc_xprt *xprt;
2563 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2565 * Get the server socket associated with this callback xprt
2567 xprt = req->rq_xprt->bc_xprt;
2570 * Grab the mutex to serialize data as the connection is shared
2571 * with the fore channel
2573 if (!mutex_trylock(&xprt->xpt_mutex)) {
2574 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2575 if (!mutex_trylock(&xprt->xpt_mutex))
2577 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2579 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2582 len = bc_sendto(req);
2583 mutex_unlock(&xprt->xpt_mutex);
2592 * The close routine. Since this is client initiated, we do nothing
2595 static void bc_close(struct rpc_xprt *xprt)
2600 * The xprt destroy routine. Again, because this connection is client
2601 * initiated, we do nothing
2604 static void bc_destroy(struct rpc_xprt *xprt)
2606 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2609 module_put(THIS_MODULE);
2612 static struct rpc_xprt_ops xs_local_ops = {
2613 .reserve_xprt = xprt_reserve_xprt,
2614 .release_xprt = xs_tcp_release_xprt,
2615 .alloc_slot = xprt_alloc_slot,
2616 .rpcbind = xs_local_rpcbind,
2617 .set_port = xs_local_set_port,
2618 .connect = xs_local_connect,
2619 .buf_alloc = rpc_malloc,
2620 .buf_free = rpc_free,
2621 .send_request = xs_local_send_request,
2622 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2624 .destroy = xs_destroy,
2625 .print_stats = xs_local_print_stats,
2626 .enable_swap = xs_enable_swap,
2627 .disable_swap = xs_disable_swap,
2630 static struct rpc_xprt_ops xs_udp_ops = {
2631 .set_buffer_size = xs_udp_set_buffer_size,
2632 .reserve_xprt = xprt_reserve_xprt_cong,
2633 .release_xprt = xprt_release_xprt_cong,
2634 .alloc_slot = xprt_alloc_slot,
2635 .rpcbind = rpcb_getport_async,
2636 .set_port = xs_set_port,
2637 .connect = xs_connect,
2638 .buf_alloc = rpc_malloc,
2639 .buf_free = rpc_free,
2640 .send_request = xs_udp_send_request,
2641 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2642 .timer = xs_udp_timer,
2643 .release_request = xprt_release_rqst_cong,
2645 .destroy = xs_destroy,
2646 .print_stats = xs_udp_print_stats,
2647 .enable_swap = xs_enable_swap,
2648 .disable_swap = xs_disable_swap,
2649 .inject_disconnect = xs_inject_disconnect,
2652 static struct rpc_xprt_ops xs_tcp_ops = {
2653 .reserve_xprt = xprt_reserve_xprt,
2654 .release_xprt = xs_tcp_release_xprt,
2655 .alloc_slot = xprt_lock_and_alloc_slot,
2656 .rpcbind = rpcb_getport_async,
2657 .set_port = xs_set_port,
2658 .connect = xs_connect,
2659 .buf_alloc = rpc_malloc,
2660 .buf_free = rpc_free,
2661 .send_request = xs_tcp_send_request,
2662 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2663 .close = xs_tcp_shutdown,
2664 .destroy = xs_destroy,
2665 .print_stats = xs_tcp_print_stats,
2666 .enable_swap = xs_enable_swap,
2667 .disable_swap = xs_disable_swap,
2668 .inject_disconnect = xs_inject_disconnect,
2669 #ifdef CONFIG_SUNRPC_BACKCHANNEL
2670 .bc_setup = xprt_setup_bc,
2671 .bc_up = xs_tcp_bc_up,
2672 .bc_free_rqst = xprt_free_bc_rqst,
2673 .bc_destroy = xprt_destroy_bc,
2678 * The rpc_xprt_ops for the server backchannel
2681 static struct rpc_xprt_ops bc_tcp_ops = {
2682 .reserve_xprt = xprt_reserve_xprt,
2683 .release_xprt = xprt_release_xprt,
2684 .alloc_slot = xprt_alloc_slot,
2685 .buf_alloc = bc_malloc,
2686 .buf_free = bc_free,
2687 .send_request = bc_send_request,
2688 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2690 .destroy = bc_destroy,
2691 .print_stats = xs_tcp_print_stats,
2692 .enable_swap = xs_enable_swap,
2693 .disable_swap = xs_disable_swap,
2694 .inject_disconnect = xs_inject_disconnect,
2697 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2699 static const struct sockaddr_in sin = {
2700 .sin_family = AF_INET,
2701 .sin_addr.s_addr = htonl(INADDR_ANY),
2703 static const struct sockaddr_in6 sin6 = {
2704 .sin6_family = AF_INET6,
2705 .sin6_addr = IN6ADDR_ANY_INIT,
2712 memcpy(sap, &sin, sizeof(sin));
2715 memcpy(sap, &sin6, sizeof(sin6));
2718 dprintk("RPC: %s: Bad address family\n", __func__);
2719 return -EAFNOSUPPORT;
2724 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2725 unsigned int slot_table_size,
2726 unsigned int max_slot_table_size)
2728 struct rpc_xprt *xprt;
2729 struct sock_xprt *new;
2731 if (args->addrlen > sizeof(xprt->addr)) {
2732 dprintk("RPC: xs_setup_xprt: address too large\n");
2733 return ERR_PTR(-EBADF);
2736 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2737 max_slot_table_size);
2739 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2741 return ERR_PTR(-ENOMEM);
2744 new = container_of(xprt, struct sock_xprt, xprt);
2745 mutex_init(&new->recv_mutex);
2746 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2747 xprt->addrlen = args->addrlen;
2749 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2752 err = xs_init_anyaddr(args->dstaddr->sa_family,
2753 (struct sockaddr *)&new->srcaddr);
2756 return ERR_PTR(err);
2763 static const struct rpc_timeout xs_local_default_timeout = {
2764 .to_initval = 10 * HZ,
2765 .to_maxval = 10 * HZ,
2770 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2771 * @args: rpc transport creation arguments
2773 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2775 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2777 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2778 struct sock_xprt *transport;
2779 struct rpc_xprt *xprt;
2780 struct rpc_xprt *ret;
2782 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2783 xprt_max_tcp_slot_table_entries);
2786 transport = container_of(xprt, struct sock_xprt, xprt);
2789 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2790 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2792 xprt->bind_timeout = XS_BIND_TO;
2793 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2794 xprt->idle_timeout = XS_IDLE_DISC_TO;
2796 xprt->ops = &xs_local_ops;
2797 xprt->timeout = &xs_local_default_timeout;
2799 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
2800 INIT_DELAYED_WORK(&transport->connect_worker,
2801 xs_dummy_setup_socket);
2803 switch (sun->sun_family) {
2805 if (sun->sun_path[0] != '/') {
2806 dprintk("RPC: bad AF_LOCAL address: %s\n",
2808 ret = ERR_PTR(-EINVAL);
2811 xprt_set_bound(xprt);
2812 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2813 ret = ERR_PTR(xs_local_setup_socket(transport));
2818 ret = ERR_PTR(-EAFNOSUPPORT);
2822 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2823 xprt->address_strings[RPC_DISPLAY_ADDR]);
2825 if (try_module_get(THIS_MODULE))
2827 ret = ERR_PTR(-EINVAL);
2833 static const struct rpc_timeout xs_udp_default_timeout = {
2834 .to_initval = 5 * HZ,
2835 .to_maxval = 30 * HZ,
2836 .to_increment = 5 * HZ,
2841 * xs_setup_udp - Set up transport to use a UDP socket
2842 * @args: rpc transport creation arguments
2845 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2847 struct sockaddr *addr = args->dstaddr;
2848 struct rpc_xprt *xprt;
2849 struct sock_xprt *transport;
2850 struct rpc_xprt *ret;
2852 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2853 xprt_udp_slot_table_entries);
2856 transport = container_of(xprt, struct sock_xprt, xprt);
2858 xprt->prot = IPPROTO_UDP;
2860 /* XXX: header size can vary due to auth type, IPv6, etc. */
2861 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2863 xprt->bind_timeout = XS_BIND_TO;
2864 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2865 xprt->idle_timeout = XS_IDLE_DISC_TO;
2867 xprt->ops = &xs_udp_ops;
2869 xprt->timeout = &xs_udp_default_timeout;
2871 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2872 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2874 switch (addr->sa_family) {
2876 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2877 xprt_set_bound(xprt);
2879 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2882 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2883 xprt_set_bound(xprt);
2885 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2888 ret = ERR_PTR(-EAFNOSUPPORT);
2892 if (xprt_bound(xprt))
2893 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2894 xprt->address_strings[RPC_DISPLAY_ADDR],
2895 xprt->address_strings[RPC_DISPLAY_PORT],
2896 xprt->address_strings[RPC_DISPLAY_PROTO]);
2898 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2899 xprt->address_strings[RPC_DISPLAY_ADDR],
2900 xprt->address_strings[RPC_DISPLAY_PROTO]);
2902 if (try_module_get(THIS_MODULE))
2904 ret = ERR_PTR(-EINVAL);
2910 static const struct rpc_timeout xs_tcp_default_timeout = {
2911 .to_initval = 60 * HZ,
2912 .to_maxval = 60 * HZ,
2917 * xs_setup_tcp - Set up transport to use a TCP socket
2918 * @args: rpc transport creation arguments
2921 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2923 struct sockaddr *addr = args->dstaddr;
2924 struct rpc_xprt *xprt;
2925 struct sock_xprt *transport;
2926 struct rpc_xprt *ret;
2927 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2929 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2930 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2932 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2933 max_slot_table_size);
2936 transport = container_of(xprt, struct sock_xprt, xprt);
2938 xprt->prot = IPPROTO_TCP;
2939 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2940 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2942 xprt->bind_timeout = XS_BIND_TO;
2943 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2944 xprt->idle_timeout = XS_IDLE_DISC_TO;
2946 xprt->ops = &xs_tcp_ops;
2947 xprt->timeout = &xs_tcp_default_timeout;
2949 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
2950 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2952 switch (addr->sa_family) {
2954 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2955 xprt_set_bound(xprt);
2957 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2960 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2961 xprt_set_bound(xprt);
2963 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2966 ret = ERR_PTR(-EAFNOSUPPORT);
2970 if (xprt_bound(xprt))
2971 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2972 xprt->address_strings[RPC_DISPLAY_ADDR],
2973 xprt->address_strings[RPC_DISPLAY_PORT],
2974 xprt->address_strings[RPC_DISPLAY_PROTO]);
2976 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2977 xprt->address_strings[RPC_DISPLAY_ADDR],
2978 xprt->address_strings[RPC_DISPLAY_PROTO]);
2980 if (try_module_get(THIS_MODULE))
2982 ret = ERR_PTR(-EINVAL);
2989 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2990 * @args: rpc transport creation arguments
2993 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2995 struct sockaddr *addr = args->dstaddr;
2996 struct rpc_xprt *xprt;
2997 struct sock_xprt *transport;
2998 struct svc_sock *bc_sock;
2999 struct rpc_xprt *ret;
3001 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3002 xprt_tcp_slot_table_entries);
3005 transport = container_of(xprt, struct sock_xprt, xprt);
3007 xprt->prot = IPPROTO_TCP;
3008 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3009 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3010 xprt->timeout = &xs_tcp_default_timeout;
3013 xprt_set_bound(xprt);
3014 xprt->bind_timeout = 0;
3015 xprt->reestablish_timeout = 0;
3016 xprt->idle_timeout = 0;
3018 xprt->ops = &bc_tcp_ops;
3020 switch (addr->sa_family) {
3022 xs_format_peer_addresses(xprt, "tcp",
3026 xs_format_peer_addresses(xprt, "tcp",
3027 RPCBIND_NETID_TCP6);
3030 ret = ERR_PTR(-EAFNOSUPPORT);
3034 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3035 xprt->address_strings[RPC_DISPLAY_ADDR],
3036 xprt->address_strings[RPC_DISPLAY_PORT],
3037 xprt->address_strings[RPC_DISPLAY_PROTO]);
3040 * Once we've associated a backchannel xprt with a connection,
3041 * we want to keep it around as long as the connection lasts,
3042 * in case we need to start using it for a backchannel again;
3043 * this reference won't be dropped until bc_xprt is destroyed.
3046 args->bc_xprt->xpt_bc_xprt = xprt;
3047 xprt->bc_xprt = args->bc_xprt;
3048 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3049 transport->sock = bc_sock->sk_sock;
3050 transport->inet = bc_sock->sk_sk;
3053 * Since we don't want connections for the backchannel, we set
3054 * the xprt status to connected
3056 xprt_set_connected(xprt);
3058 if (try_module_get(THIS_MODULE))
3061 args->bc_xprt->xpt_bc_xprt = NULL;
3063 ret = ERR_PTR(-EINVAL);
3069 static struct xprt_class xs_local_transport = {
3070 .list = LIST_HEAD_INIT(xs_local_transport.list),
3071 .name = "named UNIX socket",
3072 .owner = THIS_MODULE,
3073 .ident = XPRT_TRANSPORT_LOCAL,
3074 .setup = xs_setup_local,
3077 static struct xprt_class xs_udp_transport = {
3078 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3080 .owner = THIS_MODULE,
3081 .ident = XPRT_TRANSPORT_UDP,
3082 .setup = xs_setup_udp,
3085 static struct xprt_class xs_tcp_transport = {
3086 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3088 .owner = THIS_MODULE,
3089 .ident = XPRT_TRANSPORT_TCP,
3090 .setup = xs_setup_tcp,
3093 static struct xprt_class xs_bc_tcp_transport = {
3094 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3095 .name = "tcp NFSv4.1 backchannel",
3096 .owner = THIS_MODULE,
3097 .ident = XPRT_TRANSPORT_BC_TCP,
3098 .setup = xs_setup_bc_tcp,
3102 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3105 int init_socket_xprt(void)
3107 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3108 if (!sunrpc_table_header)
3109 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3112 xprt_register_transport(&xs_local_transport);
3113 xprt_register_transport(&xs_udp_transport);
3114 xprt_register_transport(&xs_tcp_transport);
3115 xprt_register_transport(&xs_bc_tcp_transport);
3121 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3124 void cleanup_socket_xprt(void)
3126 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3127 if (sunrpc_table_header) {
3128 unregister_sysctl_table(sunrpc_table_header);
3129 sunrpc_table_header = NULL;
3133 xprt_unregister_transport(&xs_local_transport);
3134 xprt_unregister_transport(&xs_udp_transport);
3135 xprt_unregister_transport(&xs_tcp_transport);
3136 xprt_unregister_transport(&xs_bc_tcp_transport);
3139 static int param_set_uint_minmax(const char *val,
3140 const struct kernel_param *kp,
3141 unsigned int min, unsigned int max)
3148 ret = kstrtouint(val, 0, &num);
3149 if (ret == -EINVAL || num < min || num > max)
3151 *((unsigned int *)kp->arg) = num;
3155 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3157 return param_set_uint_minmax(val, kp,
3162 static const struct kernel_param_ops param_ops_portnr = {
3163 .set = param_set_portnr,
3164 .get = param_get_uint,
3167 #define param_check_portnr(name, p) \
3168 __param_check(name, p, unsigned int);
3170 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3171 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3173 static int param_set_slot_table_size(const char *val,
3174 const struct kernel_param *kp)
3176 return param_set_uint_minmax(val, kp,
3178 RPC_MAX_SLOT_TABLE);
3181 static const struct kernel_param_ops param_ops_slot_table_size = {
3182 .set = param_set_slot_table_size,
3183 .get = param_get_uint,
3186 #define param_check_slot_table_size(name, p) \
3187 __param_check(name, p, unsigned int);
3189 static int param_set_max_slot_table_size(const char *val,
3190 const struct kernel_param *kp)
3192 return param_set_uint_minmax(val, kp,
3194 RPC_MAX_SLOT_TABLE_LIMIT);
3197 static const struct kernel_param_ops param_ops_max_slot_table_size = {
3198 .set = param_set_max_slot_table_size,
3199 .get = param_get_uint,
3202 #define param_check_max_slot_table_size(name, p) \
3203 __param_check(name, p, unsigned int);
3205 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3206 slot_table_size, 0644);
3207 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3208 max_slot_table_size, 0644);
3209 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3210 slot_table_size, 0644);