4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
36 ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
38 int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
41 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
42 LASSERT(!conn->ksnc_closing);
45 CERROR("Error %d getting sock peer IP\n", rc);
49 rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
51 CERROR("Error %d getting sock local IP\n", rc);
59 ksocknal_lib_zc_capable(struct ksock_conn *conn)
61 int caps = conn->ksnc_sock->sk->sk_route_caps;
63 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
67 * ZC if the socket supports scatter/gather and doesn't need software
70 return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
74 ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
76 struct socket *sock = conn->ksnc_sock;
80 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
81 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
82 tx->tx_nob == tx->tx_resid && /* frist sending */
83 !tx->tx_msg.ksm_csum) /* not checksummed */
84 ksocknal_lib_csum_tx(tx);
87 * NB we can't trust socket ops to either consume our iovs
88 * or leave them alone.
91 #if SOCKNAL_SINGLE_FRAG_TX
93 struct kvec *scratchiov = &scratch;
94 unsigned int niov = 1;
96 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
97 unsigned int niov = tx->tx_niov;
99 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
102 for (nob = i = 0; i < niov; i++) {
103 scratchiov[i] = tx->tx_iov[i];
104 nob += scratchiov[i].iov_len;
107 if (!list_empty(&conn->ksnc_tx_queue) ||
109 msg.msg_flags |= MSG_MORE;
111 rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
117 ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
119 struct socket *sock = conn->ksnc_sock;
120 lnet_kiov_t *kiov = tx->tx_kiov;
124 /* Not NOOP message */
125 LASSERT(tx->tx_lnetmsg);
128 * NB we can't trust socket ops to either consume our iovs
129 * or leave them alone.
131 if (tx->tx_msg.ksm_zc_cookies[0]) {
132 /* Zero copy is enabled */
133 struct sock *sk = sock->sk;
134 struct page *page = kiov->kiov_page;
135 int offset = kiov->kiov_offset;
136 int fragsize = kiov->kiov_len;
137 int msgflg = MSG_DONTWAIT;
139 CDEBUG(D_NET, "page %p + offset %x for %d\n",
140 page, offset, kiov->kiov_len);
142 if (!list_empty(&conn->ksnc_tx_queue) ||
143 fragsize < tx->tx_resid)
146 if (sk->sk_prot->sendpage) {
147 rc = sk->sk_prot->sendpage(sk, page,
148 offset, fragsize, msgflg);
150 rc = tcp_sendpage(sk, page, offset, fragsize, msgflg);
153 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
155 struct kvec *scratchiov = &scratch;
156 unsigned int niov = 1;
158 #ifdef CONFIG_HIGHMEM
159 #warning "XXX risk of kmap deadlock on multiple frags..."
161 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
162 unsigned int niov = tx->tx_nkiov;
164 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
167 for (nob = i = 0; i < niov; i++) {
168 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
170 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
173 if (!list_empty(&conn->ksnc_tx_queue) ||
175 msg.msg_flags |= MSG_MORE;
177 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
179 for (i = 0; i < niov; i++)
180 kunmap(kiov[i].kiov_page);
186 ksocknal_lib_eager_ack(struct ksock_conn *conn)
189 struct socket *sock = conn->ksnc_sock;
192 * Remind the socket to ACK eagerly. If I don't, the socket might
193 * think I'm about to send something it could piggy-back the ACK
194 * on, introducing delay in completing zero-copy sends in my
197 kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
202 ksocknal_lib_recv_iov(struct ksock_conn *conn)
204 #if SOCKNAL_SINGLE_FRAG_RX
206 struct kvec *scratchiov = &scratch;
207 unsigned int niov = 1;
209 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
210 unsigned int niov = conn->ksnc_rx_niov;
212 struct kvec *iov = conn->ksnc_rx_iov;
213 struct msghdr msg = {
224 * NB we can't trust socket ops to either consume our iovs
225 * or leave them alone.
229 for (nob = i = 0; i < niov; i++) {
230 scratchiov[i] = iov[i];
231 nob += scratchiov[i].iov_len;
233 LASSERT(nob <= conn->ksnc_rx_nob_wanted);
235 rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
239 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
240 saved_csum = conn->ksnc_msg.ksm_csum;
241 conn->ksnc_msg.ksm_csum = 0;
245 /* accumulate checksum */
246 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
249 fragnob = iov[i].iov_len;
253 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
254 iov[i].iov_base, fragnob);
256 conn->ksnc_msg.ksm_csum = saved_csum;
263 ksocknal_lib_kiov_vunmap(void *addr)
272 ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
273 struct kvec *iov, struct page **pages)
279 if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
282 LASSERT(niov <= LNET_MAX_IOV);
285 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
288 for (nob = i = 0; i < niov; i++) {
289 if ((kiov[i].kiov_offset && i > 0) ||
290 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
293 pages[i] = kiov[i].kiov_page;
294 nob += kiov[i].kiov_len;
297 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
301 iov->iov_base = addr + kiov[0].kiov_offset;
308 ksocknal_lib_recv_kiov(struct ksock_conn *conn)
310 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
312 struct kvec *scratchiov = &scratch;
313 struct page **pages = NULL;
314 unsigned int niov = 1;
316 #ifdef CONFIG_HIGHMEM
317 #warning "XXX risk of kmap deadlock on multiple frags..."
319 struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
320 struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
321 unsigned int niov = conn->ksnc_rx_nkiov;
323 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
324 struct msghdr msg = {
337 * NB we can't trust socket ops to either consume our iovs
338 * or leave them alone.
340 addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
342 nob = scratchiov[0].iov_len;
346 for (nob = i = 0; i < niov; i++) {
347 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
348 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
354 LASSERT(nob <= conn->ksnc_rx_nob_wanted);
356 rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
357 n, nob, MSG_DONTWAIT);
359 if (conn->ksnc_msg.ksm_csum) {
360 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
364 * Dang! have to kmap again because I have nowhere to
365 * stash the mapped address. But by doing it while the
366 * page is still mapped, the kernel just bumps the map
367 * count and returns me the address it stashed.
369 base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
370 fragnob = kiov[i].kiov_len;
374 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
377 kunmap(kiov[i].kiov_page);
382 ksocknal_lib_kiov_vunmap(addr);
384 for (i = 0; i < niov; i++)
385 kunmap(kiov[i].kiov_page);
392 ksocknal_lib_csum_tx(struct ksock_tx *tx)
398 LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
399 LASSERT(tx->tx_conn);
400 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
402 tx->tx_msg.ksm_csum = 0;
404 csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
405 tx->tx_iov[0].iov_len);
408 for (i = 0; i < tx->tx_nkiov; i++) {
409 base = kmap(tx->tx_kiov[i].kiov_page) +
410 tx->tx_kiov[i].kiov_offset;
412 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
414 kunmap(tx->tx_kiov[i].kiov_page);
417 for (i = 1; i < tx->tx_niov; i++)
418 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
419 tx->tx_iov[i].iov_len);
422 if (*ksocknal_tunables.ksnd_inject_csum_error) {
424 *ksocknal_tunables.ksnd_inject_csum_error = 0;
427 tx->tx_msg.ksm_csum = csum;
431 ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
433 struct socket *sock = conn->ksnc_sock;
437 rc = ksocknal_connsock_addref(conn);
439 LASSERT(conn->ksnc_closing);
440 *txmem = *rxmem = *nagle = 0;
444 rc = lnet_sock_getbuf(sock, txmem, rxmem);
446 len = sizeof(*nagle);
447 rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
448 (char *)nagle, &len);
451 ksocknal_connsock_decref(conn);
456 *txmem = *rxmem = *nagle = 0;
462 ksocknal_lib_setup_sock(struct socket *sock)
470 struct linger linger;
472 sock->sk->sk_allocation = GFP_NOFS;
475 * Ensure this socket aborts active sends immediately when we close
481 rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
484 CERROR("Can't set SO_LINGER: %d\n", rc);
489 rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
492 CERROR("Can't set SO_LINGER2: %d\n", rc);
496 if (!*ksocknal_tunables.ksnd_nagle) {
499 rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
500 (char *)&option, sizeof(option));
502 CERROR("Can't disable nagle: %d\n", rc);
507 rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
508 *ksocknal_tunables.ksnd_rx_buffer_size);
510 CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
511 *ksocknal_tunables.ksnd_tx_buffer_size,
512 *ksocknal_tunables.ksnd_rx_buffer_size, rc);
516 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
518 /* snapshot tunables */
519 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
520 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
521 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
523 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
525 option = (do_keepalive ? 1 : 0);
526 rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
529 CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
536 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
539 CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
543 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
544 (char *)&keep_intvl, sizeof(keep_intvl));
546 CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
550 rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
553 CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
561 ksocknal_lib_push_conn(struct ksock_conn *conn)
569 rc = ksocknal_connsock_addref(conn);
570 if (rc) /* being shut down */
573 sk = conn->ksnc_sock->sk;
577 nonagle = tp->nonagle;
581 rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
582 (char *)&val, sizeof(val));
586 tp->nonagle = nonagle;
589 ksocknal_connsock_decref(conn);
593 * socket call back in Linux
596 ksocknal_data_ready(struct sock *sk)
598 struct ksock_conn *conn;
600 /* interleave correctly with closing sockets... */
602 read_lock(&ksocknal_data.ksnd_global_lock);
604 conn = sk->sk_user_data;
605 if (!conn) { /* raced with ksocknal_terminate_conn */
606 LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
607 sk->sk_data_ready(sk);
609 ksocknal_read_callback(conn);
612 read_unlock(&ksocknal_data.ksnd_global_lock);
616 ksocknal_write_space(struct sock *sk)
618 struct ksock_conn *conn;
622 /* interleave correctly with closing sockets... */
624 read_lock(&ksocknal_data.ksnd_global_lock);
626 conn = sk->sk_user_data;
627 wspace = sk_stream_wspace(sk);
628 min_wpace = sk_stream_min_wspace(sk);
630 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
631 sk, wspace, min_wpace, conn,
632 !conn ? "" : (conn->ksnc_tx_ready ?
633 " ready" : " blocked"),
634 !conn ? "" : (conn->ksnc_tx_scheduled ?
635 " scheduled" : " idle"),
636 !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
637 " empty" : " queued"));
639 if (!conn) { /* raced with ksocknal_terminate_conn */
640 LASSERT(sk->sk_write_space != &ksocknal_write_space);
641 sk->sk_write_space(sk);
643 read_unlock(&ksocknal_data.ksnd_global_lock);
647 if (wspace >= min_wpace) { /* got enough space */
648 ksocknal_write_callback(conn);
651 * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
652 * ENOMEM check in ksocknal_transmit is race-free (think about
655 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
658 read_unlock(&ksocknal_data.ksnd_global_lock);
662 ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
664 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
665 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
669 ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
671 sock->sk->sk_user_data = conn;
672 sock->sk->sk_data_ready = ksocknal_data_ready;
673 sock->sk->sk_write_space = ksocknal_write_space;
677 ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
680 * Remove conn's network callbacks.
681 * NB I _have_ to restore the callback, rather than storing a noop,
682 * since the socket could survive past this module being unloaded!!
684 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
685 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
688 * A callback could be in progress already; they hold a read lock
689 * on ksnd_global_lock (to serialise with me) and NOOP if
690 * sk_user_data is NULL.
692 sock->sk->sk_user_data = NULL;
696 ksocknal_lib_memory_pressure(struct ksock_conn *conn)
699 struct ksock_sched *sched;
701 sched = conn->ksnc_scheduler;
702 spin_lock_bh(&sched->kss_lock);
704 if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
705 !conn->ksnc_tx_ready) {
707 * SOCK_NOSPACE is set when the socket fills
708 * and cleared in the write_space callback
709 * (which also sets ksnc_tx_ready). If
710 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
711 * zero, I didn't fill the socket and
712 * write_space won't reschedule me, so I
713 * return -ENOMEM to get my caller to retry
719 spin_unlock_bh(&sched->kss_lock);