Merge tag 'pci-v4.9-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[cascardo/linux.git] / net / ipv4 / af_inet.c
index 55513e6..1effc98 100644 (file)
@@ -211,24 +211,19 @@ int inet_listen(struct socket *sock, int backlog)
         * we can only allow the backlog to be adjusted.
         */
        if (old_state != TCP_LISTEN) {
-               /* Check special setups for testing purpose to enable TFO w/o
-                * requiring TCP_FASTOPEN sockopt.
+               /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
                 * Note that only TCP sockets (SOCK_STREAM) will reach here.
-                * Also fastopenq may already been allocated because this
-                * socket was in TCP_LISTEN state previously but was
-                * shutdown() (rather than close()).
+                * Also fastopen backlog may already been set via the option
+                * because the socket was in TCP_LISTEN state previously but
+                * was shutdown() rather than close().
                 */
-               if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
+               if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
+                   (sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
                    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
-                       if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
-                               fastopen_queue_tune(sk, backlog);
-                       else if ((sysctl_tcp_fastopen &
-                                 TFO_SERVER_WO_SOCKOPT2) != 0)
-                               fastopen_queue_tune(sk,
-                                   ((uint)sysctl_tcp_fastopen) >> 16);
-
+                       fastopen_queue_tune(sk, backlog);
                        tcp_fastopen_init_key_once(true);
                }
+
                err = inet_csk_listen_start(sk, backlog);
                if (err)
                        goto out;
@@ -921,6 +916,8 @@ const struct proto_ops inet_stream_ops = {
        .mmap              = sock_no_mmap,
        .sendpage          = inet_sendpage,
        .splice_read       = tcp_splice_read,
+       .read_sock         = tcp_read_sock,
+       .peek_len          = tcp_peek_len,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_sock_common_setsockopt,
        .compat_getsockopt = compat_sock_common_getsockopt,
@@ -1195,7 +1192,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                 netdev_features_t features)
 {
-       bool udpfrag = false, fixedid = false, encap;
+       bool udpfrag = false, fixedid = false, gso_partial, encap;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
        unsigned int offset = 0;
@@ -1248,6 +1245,8 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        if (IS_ERR_OR_NULL(segs))
                goto out;
 
+       gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
        skb = segs;
        do {
                iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
@@ -1262,9 +1261,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                iph->id = htons(id);
                                id += skb_shinfo(skb)->gso_segs;
                        }
-                       tot_len = skb_shinfo(skb)->gso_size +
-                                 SKB_GSO_CB(skb)->data_offset +
-                                 skb->head - (unsigned char *)iph;
+
+                       if (gso_partial)
+                               tot_len = skb_shinfo(skb)->gso_size +
+                                         SKB_GSO_CB(skb)->data_offset +
+                                         skb->head - (unsigned char *)iph;
+                       else
+                               tot_len = skb->len - nhoff;
                } else {
                        if (!fixedid)
                                iph->id = htons(id++);