Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / net / ipv4 / tcp_ipv4.c
index 7158d4f..61b7be3 100644 (file)
@@ -86,7 +86,6 @@
 
 int sysctl_tcp_tw_reuse __read_mostly;
 int sysctl_tcp_low_latency __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_low_latency);
 
 #ifdef CONFIG_TCP_MD5SIG
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1175,6 +1174,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
                                      NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
                net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
                                     &iph->saddr, ntohs(th->source),
                                     &iph->daddr, ntohs(th->dest),
@@ -1195,7 +1195,6 @@ static void tcp_v4_init_req(struct request_sock *req,
 
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-       ireq->no_srccheck = inet_sk(sk_listener)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 }
 
@@ -1537,6 +1536,34 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_prequeue);
 
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+       u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+
+       /* Only socket owner can try to collapse/prune rx queues
+        * to reduce memory overhead, so add a little headroom here.
+        * Few sockets backlog are possibly concurrently non empty.
+        */
+       limit += 64*1024;
+
+       /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+        * we can fix skb->truesize to its real value to avoid future drops.
+        * This is valid because skb is not yet charged to the socket.
+        * It has been noticed pure SACK packets were sometimes dropped
+        * (if cooked by drivers without copybreak feature).
+        */
+       if (!skb->data_len)
+               skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+       if (unlikely(sk_add_backlog(sk, skb, limit))) {
+               bh_unlock_sock(sk);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+               return true;
+       }
+       return false;
+}
+EXPORT_SYMBOL(tcp_add_backlog);
+
 /*
  *     From tcp_input.c
  */
@@ -1608,6 +1635,7 @@ process:
 
                sk = req->rsk_listener;
                if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+                       sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
                }
@@ -1666,10 +1694,7 @@ process:
        if (!sock_owned_by_user(sk)) {
                if (!tcp_prequeue(sk, skb))
                        ret = tcp_v4_do_rcv(sk, skb);
-       } else if (unlikely(sk_add_backlog(sk, skb,
-                                          sk->sk_rcvbuf + sk->sk_sndbuf))) {
-               bh_unlock_sock(sk);
-               __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
+       } else if (tcp_add_backlog(sk, skb)) {
                goto discard_and_relse;
        }
        bh_unlock_sock(sk);
@@ -1818,7 +1843,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_write_queue_purge(sk);
 
        /* Cleans up our, hopefully empty, out_of_order_queue. */
-       __skb_queue_purge(&tp->out_of_order_queue);
+       skb_rbtree_purge(&tp->out_of_order_queue);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Clean up the MD5 key list, if any */
@@ -1845,9 +1870,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
        local_bh_disable();
        sk_sockets_allocated_dec(sk);
        local_bh_enable();
-
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
-               sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -1864,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
        struct inet_listen_hashbucket *ilb;
-       struct inet_connection_sock *icsk;
        struct sock *sk = cur;
 
        if (!sk) {
@@ -1886,7 +1907,6 @@ get_sk:
                        continue;
                if (sk->sk_family == st->family)
                        return sk;
-               icsk = inet_csk(sk);
        }
        spin_unlock_bh(&ilb->lock);
        st->offset = 0;