igmp: Namespacify igmp_qrv sysctl knob
[cascardo/linux.git] / net / ipv4 / tcp_ipv4.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Implementation of the Transmission Control Protocol(TCP).
7  *
8  *              IPv4 specific functions
9  *
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  *
18  *      This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23
24 /*
25  * Changes:
26  *              David S. Miller :       New socket lookup architecture.
27  *                                      This code is dedicated to John Dyson.
28  *              David S. Miller :       Change semantics of established hash,
29  *                                      half is devoted to TIME_WAIT sockets
30  *                                      and the rest go in the other half.
31  *              Andi Kleen :            Add support for syncookies and fixed
32  *                                      some bugs: ip options weren't passed to
33  *                                      the TCP layer, missed a check for an
34  *                                      ACK bit.
35  *              Andi Kleen :            Implemented fast path mtu discovery.
36  *                                      Fixed many serious bugs in the
37  *                                      request_sock handling and moved
38  *                                      most of it into the af independent code.
39  *                                      Added tail drop and some other bugfixes.
40  *                                      Added new listen semantics.
41  *              Mike McLagan    :       Routing by source
42  *      Juan Jose Ciarlante:            ip_dynaddr bits
43  *              Andi Kleen:             various fixes.
44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
45  *                                      coma.
46  *      Andi Kleen              :       Fix new listen.
47  *      Andi Kleen              :       Fix accept error reporting.
48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
50  *                                      a single port at the same time.
51  */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83
84 #include <linux/crypto.h>
85 #include <linux/scatterlist.h>
86
87 int sysctl_tcp_tw_reuse __read_mostly;
88 int sysctl_tcp_low_latency __read_mostly;
89 EXPORT_SYMBOL(sysctl_tcp_low_latency);
90
91 #ifdef CONFIG_TCP_MD5SIG
92 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
93                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
94 #endif
95
96 struct inet_hashinfo tcp_hashinfo;
97 EXPORT_SYMBOL(tcp_hashinfo);
98
99 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
100 {
101         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
102                                           ip_hdr(skb)->saddr,
103                                           tcp_hdr(skb)->dest,
104                                           tcp_hdr(skb)->source);
105 }
106
107 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108 {
109         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110         struct tcp_sock *tp = tcp_sk(sk);
111
112         /* With PAWS, it is safe from the viewpoint
113            of data integrity. Even without PAWS it is safe provided sequence
114            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115
116            Actually, the idea is close to VJ's one, only timestamp cache is
117            held not per host, but per port pair and TW bucket is used as state
118            holder.
119
120            If TW bucket has been already destroyed we fall back to VJ's scheme
121            and use initial timestamp retrieved from peer table.
122          */
123         if (tcptw->tw_ts_recent_stamp &&
124             (!twp || (sysctl_tcp_tw_reuse &&
125                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
126                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127                 if (tp->write_seq == 0)
128                         tp->write_seq = 1;
129                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
130                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
131                 sock_hold(sktw);
132                 return 1;
133         }
134
135         return 0;
136 }
137 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138
139 /* This will initiate an outgoing connection. */
140 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141 {
142         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
143         struct inet_sock *inet = inet_sk(sk);
144         struct tcp_sock *tp = tcp_sk(sk);
145         __be16 orig_sport, orig_dport;
146         __be32 daddr, nexthop;
147         struct flowi4 *fl4;
148         struct rtable *rt;
149         int err;
150         struct ip_options_rcu *inet_opt;
151
152         if (addr_len < sizeof(struct sockaddr_in))
153                 return -EINVAL;
154
155         if (usin->sin_family != AF_INET)
156                 return -EAFNOSUPPORT;
157
158         nexthop = daddr = usin->sin_addr.s_addr;
159         inet_opt = rcu_dereference_protected(inet->inet_opt,
160                                              sock_owned_by_user(sk));
161         if (inet_opt && inet_opt->opt.srr) {
162                 if (!daddr)
163                         return -EINVAL;
164                 nexthop = inet_opt->opt.faddr;
165         }
166
167         orig_sport = inet->inet_sport;
168         orig_dport = usin->sin_port;
169         fl4 = &inet->cork.fl.u.ip4;
170         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
171                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172                               IPPROTO_TCP,
173                               orig_sport, orig_dport, sk);
174         if (IS_ERR(rt)) {
175                 err = PTR_ERR(rt);
176                 if (err == -ENETUNREACH)
177                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
178                 return err;
179         }
180
181         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182                 ip_rt_put(rt);
183                 return -ENETUNREACH;
184         }
185
186         if (!inet_opt || !inet_opt->opt.srr)
187                 daddr = fl4->daddr;
188
189         if (!inet->inet_saddr)
190                 inet->inet_saddr = fl4->saddr;
191         sk_rcv_saddr_set(sk, inet->inet_saddr);
192
193         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
194                 /* Reset inherited state */
195                 tp->rx_opt.ts_recent       = 0;
196                 tp->rx_opt.ts_recent_stamp = 0;
197                 if (likely(!tp->repair))
198                         tp->write_seq      = 0;
199         }
200
201         if (tcp_death_row.sysctl_tw_recycle &&
202             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203                 tcp_fetch_timewait_stamp(sk, &rt->dst);
204
205         inet->inet_dport = usin->sin_port;
206         sk_daddr_set(sk, daddr);
207
208         inet_csk(sk)->icsk_ext_hdr_len = 0;
209         if (inet_opt)
210                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
211
212         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
213
214         /* Socket identity is still unknown (sport may be zero).
215          * However we set state to SYN-SENT and not releasing socket
216          * lock select source port, enter ourselves into the hash tables and
217          * complete initialization after this.
218          */
219         tcp_set_state(sk, TCP_SYN_SENT);
220         err = inet_hash_connect(&tcp_death_row, sk);
221         if (err)
222                 goto failure;
223
224         sk_set_txhash(sk);
225
226         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227                                inet->inet_sport, inet->inet_dport, sk);
228         if (IS_ERR(rt)) {
229                 err = PTR_ERR(rt);
230                 rt = NULL;
231                 goto failure;
232         }
233         /* OK, now commit destination to socket.  */
234         sk->sk_gso_type = SKB_GSO_TCPV4;
235         sk_setup_caps(sk, &rt->dst);
236
237         if (!tp->write_seq && likely(!tp->repair))
238                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239                                                            inet->inet_daddr,
240                                                            inet->inet_sport,
241                                                            usin->sin_port);
242
243         inet->inet_id = tp->write_seq ^ jiffies;
244
245         err = tcp_connect(sk);
246
247         rt = NULL;
248         if (err)
249                 goto failure;
250
251         return 0;
252
253 failure:
254         /*
255          * This unhashes the socket and releases the local port,
256          * if necessary.
257          */
258         tcp_set_state(sk, TCP_CLOSE);
259         ip_rt_put(rt);
260         sk->sk_route_caps = 0;
261         inet->inet_dport = 0;
262         return err;
263 }
264 EXPORT_SYMBOL(tcp_v4_connect);
265
266 /*
267  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268  * It can be called through tcp_release_cb() if socket was owned by user
269  * at the time tcp_v4_err() was called to handle ICMP message.
270  */
271 void tcp_v4_mtu_reduced(struct sock *sk)
272 {
273         struct dst_entry *dst;
274         struct inet_sock *inet = inet_sk(sk);
275         u32 mtu = tcp_sk(sk)->mtu_info;
276
277         dst = inet_csk_update_pmtu(sk, mtu);
278         if (!dst)
279                 return;
280
281         /* Something is about to be wrong... Remember soft error
282          * for the case, if this connection will not able to recover.
283          */
284         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285                 sk->sk_err_soft = EMSGSIZE;
286
287         mtu = dst_mtu(dst);
288
289         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
290             ip_sk_accept_pmtu(sk) &&
291             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
292                 tcp_sync_mss(sk, mtu);
293
294                 /* Resend the TCP packet because it's
295                  * clear that the old packet has been
296                  * dropped. This is the new "fast" path mtu
297                  * discovery.
298                  */
299                 tcp_simple_retransmit(sk);
300         } /* else let the usual retransmit timer handle it */
301 }
302 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
303
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 {
306         struct dst_entry *dst = __sk_dst_check(sk, 0);
307
308         if (dst)
309                 dst->ops->redirect(dst, sk, skb);
310 }
311
312
313 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
314 void tcp_req_err(struct sock *sk, u32 seq)
315 {
316         struct request_sock *req = inet_reqsk(sk);
317         struct net *net = sock_net(sk);
318
319         /* ICMPs are not backlogged, hence we cannot get
320          * an established socket here.
321          */
322         WARN_ON(req->sk);
323
324         if (seq != tcp_rsk(req)->snt_isn) {
325                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
326         } else {
327                 /*
328                  * Still in SYN_RECV, just remove it silently.
329                  * There is no good way to pass the error to the newly
330                  * created socket, and POSIX does not want network
331                  * errors returned from accept().
332                  */
333                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
334                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
335         }
336         reqsk_put(req);
337 }
338 EXPORT_SYMBOL(tcp_req_err);
339
340 /*
341  * This routine is called by the ICMP module when it gets some
342  * sort of error condition.  If err < 0 then the socket should
343  * be closed and the error returned to the user.  If err > 0
344  * it's just the icmp type << 8 | icmp code.  After adjustment
345  * header points to the first 8 bytes of the tcp header.  We need
346  * to find the appropriate port.
347  *
348  * The locking strategy used here is very "optimistic". When
349  * someone else accesses the socket the ICMP is just dropped
350  * and for some paths there is no check at all.
351  * A more general error queue to queue errors for later handling
352  * is probably better.
353  *
354  */
355
356 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
357 {
358         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
359         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
360         struct inet_connection_sock *icsk;
361         struct tcp_sock *tp;
362         struct inet_sock *inet;
363         const int type = icmp_hdr(icmp_skb)->type;
364         const int code = icmp_hdr(icmp_skb)->code;
365         struct sock *sk;
366         struct sk_buff *skb;
367         struct request_sock *fastopen;
368         __u32 seq, snd_una;
369         __u32 remaining;
370         int err;
371         struct net *net = dev_net(icmp_skb->dev);
372
373         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
374                                        th->dest, iph->saddr, ntohs(th->source),
375                                        inet_iif(icmp_skb));
376         if (!sk) {
377                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
378                 return;
379         }
380         if (sk->sk_state == TCP_TIME_WAIT) {
381                 inet_twsk_put(inet_twsk(sk));
382                 return;
383         }
384         seq = ntohl(th->seq);
385         if (sk->sk_state == TCP_NEW_SYN_RECV)
386                 return tcp_req_err(sk, seq);
387
388         bh_lock_sock(sk);
389         /* If too many ICMPs get dropped on busy
390          * servers this needs to be solved differently.
391          * We do take care of PMTU discovery (RFC1191) special case :
392          * we can receive locally generated ICMP messages while socket is held.
393          */
394         if (sock_owned_by_user(sk)) {
395                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
396                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
397         }
398         if (sk->sk_state == TCP_CLOSE)
399                 goto out;
400
401         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
402                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
403                 goto out;
404         }
405
406         icsk = inet_csk(sk);
407         tp = tcp_sk(sk);
408         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
409         fastopen = tp->fastopen_rsk;
410         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
411         if (sk->sk_state != TCP_LISTEN &&
412             !between(seq, snd_una, tp->snd_nxt)) {
413                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
414                 goto out;
415         }
416
417         switch (type) {
418         case ICMP_REDIRECT:
419                 do_redirect(icmp_skb, sk);
420                 goto out;
421         case ICMP_SOURCE_QUENCH:
422                 /* Just silently ignore these. */
423                 goto out;
424         case ICMP_PARAMETERPROB:
425                 err = EPROTO;
426                 break;
427         case ICMP_DEST_UNREACH:
428                 if (code > NR_ICMP_UNREACH)
429                         goto out;
430
431                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
432                         /* We are not interested in TCP_LISTEN and open_requests
433                          * (SYN-ACKs send out by Linux are always <576bytes so
434                          * they should go through unfragmented).
435                          */
436                         if (sk->sk_state == TCP_LISTEN)
437                                 goto out;
438
439                         tp->mtu_info = info;
440                         if (!sock_owned_by_user(sk)) {
441                                 tcp_v4_mtu_reduced(sk);
442                         } else {
443                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
444                                         sock_hold(sk);
445                         }
446                         goto out;
447                 }
448
449                 err = icmp_err_convert[code].errno;
450                 /* check if icmp_skb allows revert of backoff
451                  * (see draft-zimmermann-tcp-lcd) */
452                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
453                         break;
454                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
455                     !icsk->icsk_backoff || fastopen)
456                         break;
457
458                 if (sock_owned_by_user(sk))
459                         break;
460
461                 icsk->icsk_backoff--;
462                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
463                                                TCP_TIMEOUT_INIT;
464                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
465
466                 skb = tcp_write_queue_head(sk);
467                 BUG_ON(!skb);
468
469                 remaining = icsk->icsk_rto -
470                             min(icsk->icsk_rto,
471                                 tcp_time_stamp - tcp_skb_timestamp(skb));
472
473                 if (remaining) {
474                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
475                                                   remaining, TCP_RTO_MAX);
476                 } else {
477                         /* RTO revert clocked out retransmission.
478                          * Will retransmit now */
479                         tcp_retransmit_timer(sk);
480                 }
481
482                 break;
483         case ICMP_TIME_EXCEEDED:
484                 err = EHOSTUNREACH;
485                 break;
486         default:
487                 goto out;
488         }
489
490         switch (sk->sk_state) {
491         case TCP_SYN_SENT:
492         case TCP_SYN_RECV:
493                 /* Only in fast or simultaneous open. If a fast open socket is
494                  * is already accepted it is treated as a connected one below.
495                  */
496                 if (fastopen && !fastopen->sk)
497                         break;
498
499                 if (!sock_owned_by_user(sk)) {
500                         sk->sk_err = err;
501
502                         sk->sk_error_report(sk);
503
504                         tcp_done(sk);
505                 } else {
506                         sk->sk_err_soft = err;
507                 }
508                 goto out;
509         }
510
511         /* If we've already connected we will keep trying
512          * until we time out, or the user gives up.
513          *
514          * rfc1122 4.2.3.9 allows to consider as hard errors
515          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516          * but it is obsoleted by pmtu discovery).
517          *
518          * Note, that in modern internet, where routing is unreliable
519          * and in each dark corner broken firewalls sit, sending random
520          * errors ordered by their masters even this two messages finally lose
521          * their original sense (even Linux sends invalid PORT_UNREACHs)
522          *
523          * Now we are in compliance with RFCs.
524          *                                                      --ANK (980905)
525          */
526
527         inet = inet_sk(sk);
528         if (!sock_owned_by_user(sk) && inet->recverr) {
529                 sk->sk_err = err;
530                 sk->sk_error_report(sk);
531         } else  { /* Only an error on timeout */
532                 sk->sk_err_soft = err;
533         }
534
535 out:
536         bh_unlock_sock(sk);
537         sock_put(sk);
538 }
539
540 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
541 {
542         struct tcphdr *th = tcp_hdr(skb);
543
544         if (skb->ip_summed == CHECKSUM_PARTIAL) {
545                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546                 skb->csum_start = skb_transport_header(skb) - skb->head;
547                 skb->csum_offset = offsetof(struct tcphdr, check);
548         } else {
549                 th->check = tcp_v4_check(skb->len, saddr, daddr,
550                                          csum_partial(th,
551                                                       th->doff << 2,
552                                                       skb->csum));
553         }
554 }
555
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
558 {
559         const struct inet_sock *inet = inet_sk(sk);
560
561         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
562 }
563 EXPORT_SYMBOL(tcp_v4_send_check);
564
565 /*
566  *      This routine will send an RST to the other tcp.
567  *
568  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
569  *                    for reset.
570  *      Answer: if a packet caused RST, it is not for a socket
571  *              existing in our system, if it is matched to a socket,
572  *              it is just duplicate segment or bug in other side's TCP.
573  *              So that we build reply only basing on parameters
574  *              arrived with segment.
575  *      Exception: precedence violation. We do not implement it in any case.
576  */
577
578 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
579 {
580         const struct tcphdr *th = tcp_hdr(skb);
581         struct {
582                 struct tcphdr th;
583 #ifdef CONFIG_TCP_MD5SIG
584                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
585 #endif
586         } rep;
587         struct ip_reply_arg arg;
588 #ifdef CONFIG_TCP_MD5SIG
589         struct tcp_md5sig_key *key = NULL;
590         const __u8 *hash_location = NULL;
591         unsigned char newhash[16];
592         int genhash;
593         struct sock *sk1 = NULL;
594 #endif
595         struct net *net;
596
597         /* Never send a reset in response to a reset. */
598         if (th->rst)
599                 return;
600
601         /* If sk not NULL, it means we did a successful lookup and incoming
602          * route had to be correct. prequeue might have dropped our dst.
603          */
604         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
605                 return;
606
607         /* Swap the send and the receive. */
608         memset(&rep, 0, sizeof(rep));
609         rep.th.dest   = th->source;
610         rep.th.source = th->dest;
611         rep.th.doff   = sizeof(struct tcphdr) / 4;
612         rep.th.rst    = 1;
613
614         if (th->ack) {
615                 rep.th.seq = th->ack_seq;
616         } else {
617                 rep.th.ack = 1;
618                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619                                        skb->len - (th->doff << 2));
620         }
621
622         memset(&arg, 0, sizeof(arg));
623         arg.iov[0].iov_base = (unsigned char *)&rep;
624         arg.iov[0].iov_len  = sizeof(rep.th);
625
626         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
627 #ifdef CONFIG_TCP_MD5SIG
628         hash_location = tcp_parse_md5sig_option(th);
629         if (sk && sk_fullsock(sk)) {
630                 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
631                                         &ip_hdr(skb)->saddr, AF_INET);
632         } else if (hash_location) {
633                 /*
634                  * active side is lost. Try to find listening socket through
635                  * source port, and then find md5 key through listening socket.
636                  * we are not loose security here:
637                  * Incoming packet is checked with md5 hash with finding key,
638                  * no RST generated if md5 hash doesn't match.
639                  */
640                 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
641                                              ip_hdr(skb)->saddr,
642                                              th->source, ip_hdr(skb)->daddr,
643                                              ntohs(th->source), inet_iif(skb));
644                 /* don't send rst if it can't find key */
645                 if (!sk1)
646                         return;
647                 rcu_read_lock();
648                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
649                                         &ip_hdr(skb)->saddr, AF_INET);
650                 if (!key)
651                         goto release_sk1;
652
653                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
654                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
655                         goto release_sk1;
656         }
657
658         if (key) {
659                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
660                                    (TCPOPT_NOP << 16) |
661                                    (TCPOPT_MD5SIG << 8) |
662                                    TCPOLEN_MD5SIG);
663                 /* Update length and the length the header thinks exists */
664                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665                 rep.th.doff = arg.iov[0].iov_len / 4;
666
667                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
668                                      key, ip_hdr(skb)->saddr,
669                                      ip_hdr(skb)->daddr, &rep.th);
670         }
671 #endif
672         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673                                       ip_hdr(skb)->saddr, /* XXX */
674                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
675         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
676         arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
677
678         /* When socket is gone, all binding information is lost.
679          * routing might fail in this case. No choice here, if we choose to force
680          * input interface, we will misroute in case of asymmetric route.
681          */
682         if (sk)
683                 arg.bound_dev_if = sk->sk_bound_dev_if;
684
685         BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
686                      offsetof(struct inet_timewait_sock, tw_bound_dev_if));
687
688         arg.tos = ip_hdr(skb)->tos;
689         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
690                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
691                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
692                               &arg, arg.iov[0].iov_len);
693
694         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
695         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
696
697 #ifdef CONFIG_TCP_MD5SIG
698 release_sk1:
699         if (sk1) {
700                 rcu_read_unlock();
701                 sock_put(sk1);
702         }
703 #endif
704 }
705
706 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
707    outside socket context is ugly, certainly. What can I do?
708  */
709
710 static void tcp_v4_send_ack(struct net *net,
711                             struct sk_buff *skb, u32 seq, u32 ack,
712                             u32 win, u32 tsval, u32 tsecr, int oif,
713                             struct tcp_md5sig_key *key,
714                             int reply_flags, u8 tos)
715 {
716         const struct tcphdr *th = tcp_hdr(skb);
717         struct {
718                 struct tcphdr th;
719                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
720 #ifdef CONFIG_TCP_MD5SIG
721                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
722 #endif
723                         ];
724         } rep;
725         struct ip_reply_arg arg;
726
727         memset(&rep.th, 0, sizeof(struct tcphdr));
728         memset(&arg, 0, sizeof(arg));
729
730         arg.iov[0].iov_base = (unsigned char *)&rep;
731         arg.iov[0].iov_len  = sizeof(rep.th);
732         if (tsecr) {
733                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
734                                    (TCPOPT_TIMESTAMP << 8) |
735                                    TCPOLEN_TIMESTAMP);
736                 rep.opt[1] = htonl(tsval);
737                 rep.opt[2] = htonl(tsecr);
738                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
739         }
740
741         /* Swap the send and the receive. */
742         rep.th.dest    = th->source;
743         rep.th.source  = th->dest;
744         rep.th.doff    = arg.iov[0].iov_len / 4;
745         rep.th.seq     = htonl(seq);
746         rep.th.ack_seq = htonl(ack);
747         rep.th.ack     = 1;
748         rep.th.window  = htons(win);
749
750 #ifdef CONFIG_TCP_MD5SIG
751         if (key) {
752                 int offset = (tsecr) ? 3 : 0;
753
754                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
755                                           (TCPOPT_NOP << 16) |
756                                           (TCPOPT_MD5SIG << 8) |
757                                           TCPOLEN_MD5SIG);
758                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
759                 rep.th.doff = arg.iov[0].iov_len/4;
760
761                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
762                                     key, ip_hdr(skb)->saddr,
763                                     ip_hdr(skb)->daddr, &rep.th);
764         }
765 #endif
766         arg.flags = reply_flags;
767         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
768                                       ip_hdr(skb)->saddr, /* XXX */
769                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
770         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
771         if (oif)
772                 arg.bound_dev_if = oif;
773         arg.tos = tos;
774         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
775                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
776                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
777                               &arg, arg.iov[0].iov_len);
778
779         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
780 }
781
782 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
783 {
784         struct inet_timewait_sock *tw = inet_twsk(sk);
785         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
786
787         tcp_v4_send_ack(sock_net(sk), skb,
788                         tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
789                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
790                         tcp_time_stamp + tcptw->tw_ts_offset,
791                         tcptw->tw_ts_recent,
792                         tw->tw_bound_dev_if,
793                         tcp_twsk_md5_key(tcptw),
794                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
795                         tw->tw_tos
796                         );
797
798         inet_twsk_put(tw);
799 }
800
801 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
802                                   struct request_sock *req)
803 {
804         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
805          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
806          */
807         u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
808                                              tcp_sk(sk)->snd_nxt;
809
810         tcp_v4_send_ack(sock_net(sk), skb, seq,
811                         tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
812                         tcp_time_stamp,
813                         req->ts_recent,
814                         0,
815                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
816                                           AF_INET),
817                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
818                         ip_hdr(skb)->tos);
819 }
820
821 /*
822  *      Send a SYN-ACK after having received a SYN.
823  *      This still operates on a request_sock only, not on a big
824  *      socket.
825  */
826 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
827                               struct flowi *fl,
828                               struct request_sock *req,
829                               struct tcp_fastopen_cookie *foc,
830                                   bool attach_req)
831 {
832         const struct inet_request_sock *ireq = inet_rsk(req);
833         struct flowi4 fl4;
834         int err = -1;
835         struct sk_buff *skb;
836
837         /* First, grab a route. */
838         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
839                 return -1;
840
841         skb = tcp_make_synack(sk, dst, req, foc, attach_req);
842
843         if (skb) {
844                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
845
846                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
847                                             ireq->ir_rmt_addr,
848                                             ireq->opt);
849                 err = net_xmit_eval(err);
850         }
851
852         return err;
853 }
854
855 /*
856  *      IPv4 request_sock destructor.
857  */
858 static void tcp_v4_reqsk_destructor(struct request_sock *req)
859 {
860         kfree(inet_rsk(req)->opt);
861 }
862
863 #ifdef CONFIG_TCP_MD5SIG
864 /*
865  * RFC2385 MD5 checksumming requires a mapping of
866  * IP address->MD5 Key.
867  * We need to maintain these in the sk structure.
868  */
869
870 /* Find the Key structure for an address.  */
871 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
872                                          const union tcp_md5_addr *addr,
873                                          int family)
874 {
875         const struct tcp_sock *tp = tcp_sk(sk);
876         struct tcp_md5sig_key *key;
877         unsigned int size = sizeof(struct in_addr);
878         const struct tcp_md5sig_info *md5sig;
879
880         /* caller either holds rcu_read_lock() or socket lock */
881         md5sig = rcu_dereference_check(tp->md5sig_info,
882                                        sock_owned_by_user(sk) ||
883                                        lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
884         if (!md5sig)
885                 return NULL;
886 #if IS_ENABLED(CONFIG_IPV6)
887         if (family == AF_INET6)
888                 size = sizeof(struct in6_addr);
889 #endif
890         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
891                 if (key->family != family)
892                         continue;
893                 if (!memcmp(&key->addr, addr, size))
894                         return key;
895         }
896         return NULL;
897 }
898 EXPORT_SYMBOL(tcp_md5_do_lookup);
899
900 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
901                                          const struct sock *addr_sk)
902 {
903         const union tcp_md5_addr *addr;
904
905         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
906         return tcp_md5_do_lookup(sk, addr, AF_INET);
907 }
908 EXPORT_SYMBOL(tcp_v4_md5_lookup);
909
910 /* This can be called on a newly created socket, from other files */
911 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
912                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
913 {
914         /* Add Key to the list */
915         struct tcp_md5sig_key *key;
916         struct tcp_sock *tp = tcp_sk(sk);
917         struct tcp_md5sig_info *md5sig;
918
919         key = tcp_md5_do_lookup(sk, addr, family);
920         if (key) {
921                 /* Pre-existing entry - just update that one. */
922                 memcpy(key->key, newkey, newkeylen);
923                 key->keylen = newkeylen;
924                 return 0;
925         }
926
927         md5sig = rcu_dereference_protected(tp->md5sig_info,
928                                            sock_owned_by_user(sk) ||
929                                            lockdep_is_held(&sk->sk_lock.slock));
930         if (!md5sig) {
931                 md5sig = kmalloc(sizeof(*md5sig), gfp);
932                 if (!md5sig)
933                         return -ENOMEM;
934
935                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
936                 INIT_HLIST_HEAD(&md5sig->head);
937                 rcu_assign_pointer(tp->md5sig_info, md5sig);
938         }
939
940         key = sock_kmalloc(sk, sizeof(*key), gfp);
941         if (!key)
942                 return -ENOMEM;
943         if (!tcp_alloc_md5sig_pool()) {
944                 sock_kfree_s(sk, key, sizeof(*key));
945                 return -ENOMEM;
946         }
947
948         memcpy(key->key, newkey, newkeylen);
949         key->keylen = newkeylen;
950         key->family = family;
951         memcpy(&key->addr, addr,
952                (family == AF_INET6) ? sizeof(struct in6_addr) :
953                                       sizeof(struct in_addr));
954         hlist_add_head_rcu(&key->node, &md5sig->head);
955         return 0;
956 }
957 EXPORT_SYMBOL(tcp_md5_do_add);
958
959 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
960 {
961         struct tcp_md5sig_key *key;
962
963         key = tcp_md5_do_lookup(sk, addr, family);
964         if (!key)
965                 return -ENOENT;
966         hlist_del_rcu(&key->node);
967         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
968         kfree_rcu(key, rcu);
969         return 0;
970 }
971 EXPORT_SYMBOL(tcp_md5_do_del);
972
973 static void tcp_clear_md5_list(struct sock *sk)
974 {
975         struct tcp_sock *tp = tcp_sk(sk);
976         struct tcp_md5sig_key *key;
977         struct hlist_node *n;
978         struct tcp_md5sig_info *md5sig;
979
980         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
981
982         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
983                 hlist_del_rcu(&key->node);
984                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
985                 kfree_rcu(key, rcu);
986         }
987 }
988
989 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
990                                  int optlen)
991 {
992         struct tcp_md5sig cmd;
993         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
994
995         if (optlen < sizeof(cmd))
996                 return -EINVAL;
997
998         if (copy_from_user(&cmd, optval, sizeof(cmd)))
999                 return -EFAULT;
1000
1001         if (sin->sin_family != AF_INET)
1002                 return -EINVAL;
1003
1004         if (!cmd.tcpm_keylen)
1005                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1006                                       AF_INET);
1007
1008         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1009                 return -EINVAL;
1010
1011         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1012                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1013                               GFP_KERNEL);
1014 }
1015
1016 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1017                                         __be32 daddr, __be32 saddr, int nbytes)
1018 {
1019         struct tcp4_pseudohdr *bp;
1020         struct scatterlist sg;
1021
1022         bp = &hp->md5_blk.ip4;
1023
1024         /*
1025          * 1. the TCP pseudo-header (in the order: source IP address,
1026          * destination IP address, zero-padded protocol number, and
1027          * segment length)
1028          */
1029         bp->saddr = saddr;
1030         bp->daddr = daddr;
1031         bp->pad = 0;
1032         bp->protocol = IPPROTO_TCP;
1033         bp->len = cpu_to_be16(nbytes);
1034
1035         sg_init_one(&sg, bp, sizeof(*bp));
1036         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1037 }
1038
1039 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1040                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1041 {
1042         struct tcp_md5sig_pool *hp;
1043         struct hash_desc *desc;
1044
1045         hp = tcp_get_md5sig_pool();
1046         if (!hp)
1047                 goto clear_hash_noput;
1048         desc = &hp->md5_desc;
1049
1050         if (crypto_hash_init(desc))
1051                 goto clear_hash;
1052         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1053                 goto clear_hash;
1054         if (tcp_md5_hash_header(hp, th))
1055                 goto clear_hash;
1056         if (tcp_md5_hash_key(hp, key))
1057                 goto clear_hash;
1058         if (crypto_hash_final(desc, md5_hash))
1059                 goto clear_hash;
1060
1061         tcp_put_md5sig_pool();
1062         return 0;
1063
1064 clear_hash:
1065         tcp_put_md5sig_pool();
1066 clear_hash_noput:
1067         memset(md5_hash, 0, 16);
1068         return 1;
1069 }
1070
1071 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1072                         const struct sock *sk,
1073                         const struct sk_buff *skb)
1074 {
1075         struct tcp_md5sig_pool *hp;
1076         struct hash_desc *desc;
1077         const struct tcphdr *th = tcp_hdr(skb);
1078         __be32 saddr, daddr;
1079
1080         if (sk) { /* valid for establish/request sockets */
1081                 saddr = sk->sk_rcv_saddr;
1082                 daddr = sk->sk_daddr;
1083         } else {
1084                 const struct iphdr *iph = ip_hdr(skb);
1085                 saddr = iph->saddr;
1086                 daddr = iph->daddr;
1087         }
1088
1089         hp = tcp_get_md5sig_pool();
1090         if (!hp)
1091                 goto clear_hash_noput;
1092         desc = &hp->md5_desc;
1093
1094         if (crypto_hash_init(desc))
1095                 goto clear_hash;
1096
1097         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1098                 goto clear_hash;
1099         if (tcp_md5_hash_header(hp, th))
1100                 goto clear_hash;
1101         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1102                 goto clear_hash;
1103         if (tcp_md5_hash_key(hp, key))
1104                 goto clear_hash;
1105         if (crypto_hash_final(desc, md5_hash))
1106                 goto clear_hash;
1107
1108         tcp_put_md5sig_pool();
1109         return 0;
1110
1111 clear_hash:
1112         tcp_put_md5sig_pool();
1113 clear_hash_noput:
1114         memset(md5_hash, 0, 16);
1115         return 1;
1116 }
1117 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1118
1119 #endif
1120
1121 /* Called with rcu_read_lock() */
1122 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1123                                     const struct sk_buff *skb)
1124 {
1125 #ifdef CONFIG_TCP_MD5SIG
1126         /*
1127          * This gets called for each TCP segment that arrives
1128          * so we want to be efficient.
1129          * We have 3 drop cases:
1130          * o No MD5 hash and one expected.
1131          * o MD5 hash and we're not expecting one.
1132          * o MD5 hash and its wrong.
1133          */
1134         const __u8 *hash_location = NULL;
1135         struct tcp_md5sig_key *hash_expected;
1136         const struct iphdr *iph = ip_hdr(skb);
1137         const struct tcphdr *th = tcp_hdr(skb);
1138         int genhash;
1139         unsigned char newhash[16];
1140
1141         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1142                                           AF_INET);
1143         hash_location = tcp_parse_md5sig_option(th);
1144
1145         /* We've parsed the options - do we have a hash? */
1146         if (!hash_expected && !hash_location)
1147                 return false;
1148
1149         if (hash_expected && !hash_location) {
1150                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1151                 return true;
1152         }
1153
1154         if (!hash_expected && hash_location) {
1155                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1156                 return true;
1157         }
1158
1159         /* Okay, so this is hash_expected and hash_location -
1160          * so we need to calculate the checksum.
1161          */
1162         genhash = tcp_v4_md5_hash_skb(newhash,
1163                                       hash_expected,
1164                                       NULL, skb);
1165
1166         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1167                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1168                                      &iph->saddr, ntohs(th->source),
1169                                      &iph->daddr, ntohs(th->dest),
1170                                      genhash ? " tcp_v4_calc_md5_hash failed"
1171                                      : "");
1172                 return true;
1173         }
1174         return false;
1175 #endif
1176         return false;
1177 }
1178
1179 static void tcp_v4_init_req(struct request_sock *req,
1180                             const struct sock *sk_listener,
1181                             struct sk_buff *skb)
1182 {
1183         struct inet_request_sock *ireq = inet_rsk(req);
1184
1185         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1186         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1187         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1188         ireq->opt = tcp_v4_save_options(skb);
1189 }
1190
1191 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1192                                           struct flowi *fl,
1193                                           const struct request_sock *req,
1194                                           bool *strict)
1195 {
1196         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1197
1198         if (strict) {
1199                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1200                         *strict = true;
1201                 else
1202                         *strict = false;
1203         }
1204
1205         return dst;
1206 }
1207
1208 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1209         .family         =       PF_INET,
1210         .obj_size       =       sizeof(struct tcp_request_sock),
1211         .rtx_syn_ack    =       tcp_rtx_synack,
1212         .send_ack       =       tcp_v4_reqsk_send_ack,
1213         .destructor     =       tcp_v4_reqsk_destructor,
1214         .send_reset     =       tcp_v4_send_reset,
1215         .syn_ack_timeout =      tcp_syn_ack_timeout,
1216 };
1217
1218 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1219         .mss_clamp      =       TCP_MSS_DEFAULT,
1220 #ifdef CONFIG_TCP_MD5SIG
1221         .req_md5_lookup =       tcp_v4_md5_lookup,
1222         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1223 #endif
1224         .init_req       =       tcp_v4_init_req,
1225 #ifdef CONFIG_SYN_COOKIES
1226         .cookie_init_seq =      cookie_v4_init_sequence,
1227 #endif
1228         .route_req      =       tcp_v4_route_req,
1229         .init_seq       =       tcp_v4_init_sequence,
1230         .send_synack    =       tcp_v4_send_synack,
1231 };
1232
1233 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1234 {
1235         /* Never answer to SYNs send to broadcast or multicast */
1236         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1237                 goto drop;
1238
1239         return tcp_conn_request(&tcp_request_sock_ops,
1240                                 &tcp_request_sock_ipv4_ops, sk, skb);
1241
1242 drop:
1243         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1244         return 0;
1245 }
1246 EXPORT_SYMBOL(tcp_v4_conn_request);
1247
1248
1249 /*
1250  * The three way handshake has completed - we got a valid synack -
1251  * now create the new socket.
1252  */
1253 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1254                                   struct request_sock *req,
1255                                   struct dst_entry *dst,
1256                                   struct request_sock *req_unhash,
1257                                   bool *own_req)
1258 {
1259         struct inet_request_sock *ireq;
1260         struct inet_sock *newinet;
1261         struct tcp_sock *newtp;
1262         struct sock *newsk;
1263 #ifdef CONFIG_TCP_MD5SIG
1264         struct tcp_md5sig_key *key;
1265 #endif
1266         struct ip_options_rcu *inet_opt;
1267
1268         if (sk_acceptq_is_full(sk))
1269                 goto exit_overflow;
1270
1271         newsk = tcp_create_openreq_child(sk, req, skb);
1272         if (!newsk)
1273                 goto exit_nonewsk;
1274
1275         newsk->sk_gso_type = SKB_GSO_TCPV4;
1276         inet_sk_rx_dst_set(newsk, skb);
1277
1278         newtp                 = tcp_sk(newsk);
1279         newinet               = inet_sk(newsk);
1280         ireq                  = inet_rsk(req);
1281         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1282         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1283         newsk->sk_bound_dev_if = ireq->ir_iif;
1284         newinet->inet_saddr           = ireq->ir_loc_addr;
1285         inet_opt              = ireq->opt;
1286         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1287         ireq->opt             = NULL;
1288         newinet->mc_index     = inet_iif(skb);
1289         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1290         newinet->rcv_tos      = ip_hdr(skb)->tos;
1291         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1292         if (inet_opt)
1293                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1294         newinet->inet_id = newtp->write_seq ^ jiffies;
1295
1296         if (!dst) {
1297                 dst = inet_csk_route_child_sock(sk, newsk, req);
1298                 if (!dst)
1299                         goto put_and_exit;
1300         } else {
1301                 /* syncookie case : see end of cookie_v4_check() */
1302         }
1303         sk_setup_caps(newsk, dst);
1304
1305         tcp_ca_openreq_child(newsk, dst);
1306
1307         tcp_sync_mss(newsk, dst_mtu(dst));
1308         newtp->advmss = dst_metric_advmss(dst);
1309         if (tcp_sk(sk)->rx_opt.user_mss &&
1310             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1311                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1312
1313         tcp_initialize_rcv_mss(newsk);
1314
1315 #ifdef CONFIG_TCP_MD5SIG
1316         /* Copy over the MD5 key from the original socket */
1317         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1318                                 AF_INET);
1319         if (key) {
1320                 /*
1321                  * We're using one, so create a matching key
1322                  * on the newsk structure. If we fail to get
1323                  * memory, then we end up not copying the key
1324                  * across. Shucks.
1325                  */
1326                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1327                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1328                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1329         }
1330 #endif
1331
1332         if (__inet_inherit_port(sk, newsk) < 0)
1333                 goto put_and_exit;
1334         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1335         if (*own_req)
1336                 tcp_move_syn(newtp, req);
1337
1338         return newsk;
1339
1340 exit_overflow:
1341         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1342 exit_nonewsk:
1343         dst_release(dst);
1344 exit:
1345         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1346         return NULL;
1347 put_and_exit:
1348         inet_csk_prepare_forced_close(newsk);
1349         tcp_done(newsk);
1350         goto exit;
1351 }
1352 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1353
1354 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1355 {
1356 #ifdef CONFIG_SYN_COOKIES
1357         const struct tcphdr *th = tcp_hdr(skb);
1358
1359         if (!th->syn)
1360                 sk = cookie_v4_check(sk, skb);
1361 #endif
1362         return sk;
1363 }
1364
1365 /* The socket must have it's spinlock held when we get
1366  * here, unless it is a TCP_LISTEN socket.
1367  *
1368  * We have a potential double-lock case here, so even when
1369  * doing backlog processing we use the BH locking scheme.
1370  * This is because we cannot sleep with the original spinlock
1371  * held.
1372  */
1373 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1374 {
1375         struct sock *rsk;
1376
1377         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1378                 struct dst_entry *dst = sk->sk_rx_dst;
1379
1380                 sock_rps_save_rxhash(sk, skb);
1381                 sk_mark_napi_id(sk, skb);
1382                 if (dst) {
1383                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1384                             !dst->ops->check(dst, 0)) {
1385                                 dst_release(dst);
1386                                 sk->sk_rx_dst = NULL;
1387                         }
1388                 }
1389                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1390                 return 0;
1391         }
1392
1393         if (tcp_checksum_complete(skb))
1394                 goto csum_err;
1395
1396         if (sk->sk_state == TCP_LISTEN) {
1397                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1398
1399                 if (!nsk)
1400                         goto discard;
1401                 if (nsk != sk) {
1402                         sock_rps_save_rxhash(nsk, skb);
1403                         sk_mark_napi_id(nsk, skb);
1404                         if (tcp_child_process(sk, nsk, skb)) {
1405                                 rsk = nsk;
1406                                 goto reset;
1407                         }
1408                         return 0;
1409                 }
1410         } else
1411                 sock_rps_save_rxhash(sk, skb);
1412
1413         if (tcp_rcv_state_process(sk, skb)) {
1414                 rsk = sk;
1415                 goto reset;
1416         }
1417         return 0;
1418
1419 reset:
1420         tcp_v4_send_reset(rsk, skb);
1421 discard:
1422         kfree_skb(skb);
1423         /* Be careful here. If this function gets more complicated and
1424          * gcc suffers from register pressure on the x86, sk (in %ebx)
1425          * might be destroyed here. This current version compiles correctly,
1426          * but you have been warned.
1427          */
1428         return 0;
1429
1430 csum_err:
1431         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1432         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1433         goto discard;
1434 }
1435 EXPORT_SYMBOL(tcp_v4_do_rcv);
1436
1437 void tcp_v4_early_demux(struct sk_buff *skb)
1438 {
1439         const struct iphdr *iph;
1440         const struct tcphdr *th;
1441         struct sock *sk;
1442
1443         if (skb->pkt_type != PACKET_HOST)
1444                 return;
1445
1446         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1447                 return;
1448
1449         iph = ip_hdr(skb);
1450         th = tcp_hdr(skb);
1451
1452         if (th->doff < sizeof(struct tcphdr) / 4)
1453                 return;
1454
1455         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1456                                        iph->saddr, th->source,
1457                                        iph->daddr, ntohs(th->dest),
1458                                        skb->skb_iif);
1459         if (sk) {
1460                 skb->sk = sk;
1461                 skb->destructor = sock_edemux;
1462                 if (sk_fullsock(sk)) {
1463                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1464
1465                         if (dst)
1466                                 dst = dst_check(dst, 0);
1467                         if (dst &&
1468                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1469                                 skb_dst_set_noref(skb, dst);
1470                 }
1471         }
1472 }
1473
1474 /* Packet is added to VJ-style prequeue for processing in process
1475  * context, if a reader task is waiting. Apparently, this exciting
1476  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1477  * failed somewhere. Latency? Burstiness? Well, at least now we will
1478  * see, why it failed. 8)8)                               --ANK
1479  *
1480  */
1481 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1482 {
1483         struct tcp_sock *tp = tcp_sk(sk);
1484
1485         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1486                 return false;
1487
1488         if (skb->len <= tcp_hdrlen(skb) &&
1489             skb_queue_len(&tp->ucopy.prequeue) == 0)
1490                 return false;
1491
1492         /* Before escaping RCU protected region, we need to take care of skb
1493          * dst. Prequeue is only enabled for established sockets.
1494          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1495          * Instead of doing full sk_rx_dst validity here, let's perform
1496          * an optimistic check.
1497          */
1498         if (likely(sk->sk_rx_dst))
1499                 skb_dst_drop(skb);
1500         else
1501                 skb_dst_force_safe(skb);
1502
1503         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1504         tp->ucopy.memory += skb->truesize;
1505         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1506                 struct sk_buff *skb1;
1507
1508                 BUG_ON(sock_owned_by_user(sk));
1509
1510                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1511                         sk_backlog_rcv(sk, skb1);
1512                         NET_INC_STATS_BH(sock_net(sk),
1513                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1514                 }
1515
1516                 tp->ucopy.memory = 0;
1517         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1518                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1519                                            POLLIN | POLLRDNORM | POLLRDBAND);
1520                 if (!inet_csk_ack_scheduled(sk))
1521                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1522                                                   (3 * tcp_rto_min(sk)) / 4,
1523                                                   TCP_RTO_MAX);
1524         }
1525         return true;
1526 }
1527 EXPORT_SYMBOL(tcp_prequeue);
1528
1529 /*
1530  *      From tcp_input.c
1531  */
1532
1533 int tcp_v4_rcv(struct sk_buff *skb)
1534 {
1535         const struct iphdr *iph;
1536         const struct tcphdr *th;
1537         struct sock *sk;
1538         int ret;
1539         struct net *net = dev_net(skb->dev);
1540
1541         if (skb->pkt_type != PACKET_HOST)
1542                 goto discard_it;
1543
1544         /* Count it even if it's bad */
1545         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1546
1547         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1548                 goto discard_it;
1549
1550         th = tcp_hdr(skb);
1551
1552         if (th->doff < sizeof(struct tcphdr) / 4)
1553                 goto bad_packet;
1554         if (!pskb_may_pull(skb, th->doff * 4))
1555                 goto discard_it;
1556
1557         /* An explanation is required here, I think.
1558          * Packet length and doff are validated by header prediction,
1559          * provided case of th->doff==0 is eliminated.
1560          * So, we defer the checks. */
1561
1562         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1563                 goto csum_error;
1564
1565         th = tcp_hdr(skb);
1566         iph = ip_hdr(skb);
1567         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1568          * barrier() makes sure compiler wont play fool^Waliasing games.
1569          */
1570         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1571                 sizeof(struct inet_skb_parm));
1572         barrier();
1573
1574         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1575         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1576                                     skb->len - th->doff * 4);
1577         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1578         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1579         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1580         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1581         TCP_SKB_CB(skb)->sacked  = 0;
1582
1583 lookup:
1584         sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1585                                th->dest);
1586         if (!sk)
1587                 goto no_tcp_socket;
1588
1589 process:
1590         if (sk->sk_state == TCP_TIME_WAIT)
1591                 goto do_time_wait;
1592
1593         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1594                 struct request_sock *req = inet_reqsk(sk);
1595                 struct sock *nsk = NULL;
1596
1597                 sk = req->rsk_listener;
1598                 if (tcp_v4_inbound_md5_hash(sk, skb))
1599                         goto discard_and_relse;
1600                 if (likely(sk->sk_state == TCP_LISTEN)) {
1601                         nsk = tcp_check_req(sk, skb, req, false);
1602                 } else {
1603                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1604                         goto lookup;
1605                 }
1606                 if (!nsk) {
1607                         reqsk_put(req);
1608                         goto discard_it;
1609                 }
1610                 if (nsk == sk) {
1611                         sock_hold(sk);
1612                         reqsk_put(req);
1613                 } else if (tcp_child_process(sk, nsk, skb)) {
1614                         tcp_v4_send_reset(nsk, skb);
1615                         goto discard_it;
1616                 } else {
1617                         return 0;
1618                 }
1619         }
1620         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1621                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1622                 goto discard_and_relse;
1623         }
1624
1625         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1626                 goto discard_and_relse;
1627
1628         if (tcp_v4_inbound_md5_hash(sk, skb))
1629                 goto discard_and_relse;
1630
1631         nf_reset(skb);
1632
1633         if (sk_filter(sk, skb))
1634                 goto discard_and_relse;
1635
1636         skb->dev = NULL;
1637
1638         if (sk->sk_state == TCP_LISTEN) {
1639                 ret = tcp_v4_do_rcv(sk, skb);
1640                 goto put_and_return;
1641         }
1642
1643         sk_incoming_cpu_update(sk);
1644
1645         bh_lock_sock_nested(sk);
1646         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1647         ret = 0;
1648         if (!sock_owned_by_user(sk)) {
1649                 if (!tcp_prequeue(sk, skb))
1650                         ret = tcp_v4_do_rcv(sk, skb);
1651         } else if (unlikely(sk_add_backlog(sk, skb,
1652                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1653                 bh_unlock_sock(sk);
1654                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1655                 goto discard_and_relse;
1656         }
1657         bh_unlock_sock(sk);
1658
1659 put_and_return:
1660         sock_put(sk);
1661
1662         return ret;
1663
1664 no_tcp_socket:
1665         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1666                 goto discard_it;
1667
1668         if (tcp_checksum_complete(skb)) {
1669 csum_error:
1670                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1671 bad_packet:
1672                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1673         } else {
1674                 tcp_v4_send_reset(NULL, skb);
1675         }
1676
1677 discard_it:
1678         /* Discard frame. */
1679         kfree_skb(skb);
1680         return 0;
1681
1682 discard_and_relse:
1683         sock_put(sk);
1684         goto discard_it;
1685
1686 do_time_wait:
1687         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1688                 inet_twsk_put(inet_twsk(sk));
1689                 goto discard_it;
1690         }
1691
1692         if (tcp_checksum_complete(skb)) {
1693                 inet_twsk_put(inet_twsk(sk));
1694                 goto csum_error;
1695         }
1696         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1697         case TCP_TW_SYN: {
1698                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1699                                                         &tcp_hashinfo, skb,
1700                                                         __tcp_hdrlen(th),
1701                                                         iph->saddr, th->source,
1702                                                         iph->daddr, th->dest,
1703                                                         inet_iif(skb));
1704                 if (sk2) {
1705                         inet_twsk_deschedule_put(inet_twsk(sk));
1706                         sk = sk2;
1707                         goto process;
1708                 }
1709                 /* Fall through to ACK */
1710         }
1711         case TCP_TW_ACK:
1712                 tcp_v4_timewait_ack(sk, skb);
1713                 break;
1714         case TCP_TW_RST:
1715                 tcp_v4_send_reset(sk, skb);
1716                 inet_twsk_deschedule_put(inet_twsk(sk));
1717                 goto discard_it;
1718         case TCP_TW_SUCCESS:;
1719         }
1720         goto discard_it;
1721 }
1722
1723 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1724         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1725         .twsk_unique    = tcp_twsk_unique,
1726         .twsk_destructor= tcp_twsk_destructor,
1727 };
1728
1729 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1730 {
1731         struct dst_entry *dst = skb_dst(skb);
1732
1733         if (dst && dst_hold_safe(dst)) {
1734                 sk->sk_rx_dst = dst;
1735                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1736         }
1737 }
1738 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1739
1740 const struct inet_connection_sock_af_ops ipv4_specific = {
1741         .queue_xmit        = ip_queue_xmit,
1742         .send_check        = tcp_v4_send_check,
1743         .rebuild_header    = inet_sk_rebuild_header,
1744         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1745         .conn_request      = tcp_v4_conn_request,
1746         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1747         .net_header_len    = sizeof(struct iphdr),
1748         .setsockopt        = ip_setsockopt,
1749         .getsockopt        = ip_getsockopt,
1750         .addr2sockaddr     = inet_csk_addr2sockaddr,
1751         .sockaddr_len      = sizeof(struct sockaddr_in),
1752         .bind_conflict     = inet_csk_bind_conflict,
1753 #ifdef CONFIG_COMPAT
1754         .compat_setsockopt = compat_ip_setsockopt,
1755         .compat_getsockopt = compat_ip_getsockopt,
1756 #endif
1757         .mtu_reduced       = tcp_v4_mtu_reduced,
1758 };
1759 EXPORT_SYMBOL(ipv4_specific);
1760
1761 #ifdef CONFIG_TCP_MD5SIG
1762 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1763         .md5_lookup             = tcp_v4_md5_lookup,
1764         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1765         .md5_parse              = tcp_v4_parse_md5_keys,
1766 };
1767 #endif
1768
1769 /* NOTE: A lot of things set to zero explicitly by call to
1770  *       sk_alloc() so need not be done here.
1771  */
1772 static int tcp_v4_init_sock(struct sock *sk)
1773 {
1774         struct inet_connection_sock *icsk = inet_csk(sk);
1775
1776         tcp_init_sock(sk);
1777
1778         icsk->icsk_af_ops = &ipv4_specific;
1779
1780 #ifdef CONFIG_TCP_MD5SIG
1781         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1782 #endif
1783
1784         return 0;
1785 }
1786
1787 void tcp_v4_destroy_sock(struct sock *sk)
1788 {
1789         struct tcp_sock *tp = tcp_sk(sk);
1790
1791         tcp_clear_xmit_timers(sk);
1792
1793         tcp_cleanup_congestion_control(sk);
1794
1795         /* Cleanup up the write buffer. */
1796         tcp_write_queue_purge(sk);
1797
1798         /* Cleans up our, hopefully empty, out_of_order_queue. */
1799         __skb_queue_purge(&tp->out_of_order_queue);
1800
1801 #ifdef CONFIG_TCP_MD5SIG
1802         /* Clean up the MD5 key list, if any */
1803         if (tp->md5sig_info) {
1804                 tcp_clear_md5_list(sk);
1805                 kfree_rcu(tp->md5sig_info, rcu);
1806                 tp->md5sig_info = NULL;
1807         }
1808 #endif
1809
1810         /* Clean prequeue, it must be empty really */
1811         __skb_queue_purge(&tp->ucopy.prequeue);
1812
1813         /* Clean up a referenced TCP bind bucket. */
1814         if (inet_csk(sk)->icsk_bind_hash)
1815                 inet_put_port(sk);
1816
1817         BUG_ON(tp->fastopen_rsk);
1818
1819         /* If socket is aborted during connect operation */
1820         tcp_free_fastopen_req(tp);
1821         tcp_saved_syn_free(tp);
1822
1823         sk_sockets_allocated_dec(sk);
1824
1825         if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1826                 sock_release_memcg(sk);
1827 }
1828 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1829
1830 #ifdef CONFIG_PROC_FS
1831 /* Proc filesystem TCP sock list dumping. */
1832
1833 /*
1834  * Get next listener socket follow cur.  If cur is NULL, get first socket
1835  * starting from bucket given in st->bucket; when st->bucket is zero the
1836  * very first socket in the hash table is returned.
1837  */
1838 static void *listening_get_next(struct seq_file *seq, void *cur)
1839 {
1840         struct inet_connection_sock *icsk;
1841         struct hlist_nulls_node *node;
1842         struct sock *sk = cur;
1843         struct inet_listen_hashbucket *ilb;
1844         struct tcp_iter_state *st = seq->private;
1845         struct net *net = seq_file_net(seq);
1846
1847         if (!sk) {
1848                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1849                 spin_lock_bh(&ilb->lock);
1850                 sk = sk_nulls_head(&ilb->head);
1851                 st->offset = 0;
1852                 goto get_sk;
1853         }
1854         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1855         ++st->num;
1856         ++st->offset;
1857
1858         sk = sk_nulls_next(sk);
1859 get_sk:
1860         sk_nulls_for_each_from(sk, node) {
1861                 if (!net_eq(sock_net(sk), net))
1862                         continue;
1863                 if (sk->sk_family == st->family) {
1864                         cur = sk;
1865                         goto out;
1866                 }
1867                 icsk = inet_csk(sk);
1868         }
1869         spin_unlock_bh(&ilb->lock);
1870         st->offset = 0;
1871         if (++st->bucket < INET_LHTABLE_SIZE) {
1872                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1873                 spin_lock_bh(&ilb->lock);
1874                 sk = sk_nulls_head(&ilb->head);
1875                 goto get_sk;
1876         }
1877         cur = NULL;
1878 out:
1879         return cur;
1880 }
1881
1882 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1883 {
1884         struct tcp_iter_state *st = seq->private;
1885         void *rc;
1886
1887         st->bucket = 0;
1888         st->offset = 0;
1889         rc = listening_get_next(seq, NULL);
1890
1891         while (rc && *pos) {
1892                 rc = listening_get_next(seq, rc);
1893                 --*pos;
1894         }
1895         return rc;
1896 }
1897
1898 static inline bool empty_bucket(const struct tcp_iter_state *st)
1899 {
1900         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1901 }
1902
1903 /*
1904  * Get first established socket starting from bucket given in st->bucket.
1905  * If st->bucket is zero, the very first socket in the hash is returned.
1906  */
1907 static void *established_get_first(struct seq_file *seq)
1908 {
1909         struct tcp_iter_state *st = seq->private;
1910         struct net *net = seq_file_net(seq);
1911         void *rc = NULL;
1912
1913         st->offset = 0;
1914         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1915                 struct sock *sk;
1916                 struct hlist_nulls_node *node;
1917                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1918
1919                 /* Lockless fast path for the common case of empty buckets */
1920                 if (empty_bucket(st))
1921                         continue;
1922
1923                 spin_lock_bh(lock);
1924                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1925                         if (sk->sk_family != st->family ||
1926                             !net_eq(sock_net(sk), net)) {
1927                                 continue;
1928                         }
1929                         rc = sk;
1930                         goto out;
1931                 }
1932                 spin_unlock_bh(lock);
1933         }
1934 out:
1935         return rc;
1936 }
1937
1938 static void *established_get_next(struct seq_file *seq, void *cur)
1939 {
1940         struct sock *sk = cur;
1941         struct hlist_nulls_node *node;
1942         struct tcp_iter_state *st = seq->private;
1943         struct net *net = seq_file_net(seq);
1944
1945         ++st->num;
1946         ++st->offset;
1947
1948         sk = sk_nulls_next(sk);
1949
1950         sk_nulls_for_each_from(sk, node) {
1951                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1952                         return sk;
1953         }
1954
1955         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1956         ++st->bucket;
1957         return established_get_first(seq);
1958 }
1959
1960 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1961 {
1962         struct tcp_iter_state *st = seq->private;
1963         void *rc;
1964
1965         st->bucket = 0;
1966         rc = established_get_first(seq);
1967
1968         while (rc && pos) {
1969                 rc = established_get_next(seq, rc);
1970                 --pos;
1971         }
1972         return rc;
1973 }
1974
1975 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1976 {
1977         void *rc;
1978         struct tcp_iter_state *st = seq->private;
1979
1980         st->state = TCP_SEQ_STATE_LISTENING;
1981         rc        = listening_get_idx(seq, &pos);
1982
1983         if (!rc) {
1984                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1985                 rc        = established_get_idx(seq, pos);
1986         }
1987
1988         return rc;
1989 }
1990
1991 static void *tcp_seek_last_pos(struct seq_file *seq)
1992 {
1993         struct tcp_iter_state *st = seq->private;
1994         int offset = st->offset;
1995         int orig_num = st->num;
1996         void *rc = NULL;
1997
1998         switch (st->state) {
1999         case TCP_SEQ_STATE_LISTENING:
2000                 if (st->bucket >= INET_LHTABLE_SIZE)
2001                         break;
2002                 st->state = TCP_SEQ_STATE_LISTENING;
2003                 rc = listening_get_next(seq, NULL);
2004                 while (offset-- && rc)
2005                         rc = listening_get_next(seq, rc);
2006                 if (rc)
2007                         break;
2008                 st->bucket = 0;
2009                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2010                 /* Fallthrough */
2011         case TCP_SEQ_STATE_ESTABLISHED:
2012                 if (st->bucket > tcp_hashinfo.ehash_mask)
2013                         break;
2014                 rc = established_get_first(seq);
2015                 while (offset-- && rc)
2016                         rc = established_get_next(seq, rc);
2017         }
2018
2019         st->num = orig_num;
2020
2021         return rc;
2022 }
2023
2024 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2025 {
2026         struct tcp_iter_state *st = seq->private;
2027         void *rc;
2028
2029         if (*pos && *pos == st->last_pos) {
2030                 rc = tcp_seek_last_pos(seq);
2031                 if (rc)
2032                         goto out;
2033         }
2034
2035         st->state = TCP_SEQ_STATE_LISTENING;
2036         st->num = 0;
2037         st->bucket = 0;
2038         st->offset = 0;
2039         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2040
2041 out:
2042         st->last_pos = *pos;
2043         return rc;
2044 }
2045
2046 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2047 {
2048         struct tcp_iter_state *st = seq->private;
2049         void *rc = NULL;
2050
2051         if (v == SEQ_START_TOKEN) {
2052                 rc = tcp_get_idx(seq, 0);
2053                 goto out;
2054         }
2055
2056         switch (st->state) {
2057         case TCP_SEQ_STATE_LISTENING:
2058                 rc = listening_get_next(seq, v);
2059                 if (!rc) {
2060                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2061                         st->bucket = 0;
2062                         st->offset = 0;
2063                         rc        = established_get_first(seq);
2064                 }
2065                 break;
2066         case TCP_SEQ_STATE_ESTABLISHED:
2067                 rc = established_get_next(seq, v);
2068                 break;
2069         }
2070 out:
2071         ++*pos;
2072         st->last_pos = *pos;
2073         return rc;
2074 }
2075
2076 static void tcp_seq_stop(struct seq_file *seq, void *v)
2077 {
2078         struct tcp_iter_state *st = seq->private;
2079
2080         switch (st->state) {
2081         case TCP_SEQ_STATE_LISTENING:
2082                 if (v != SEQ_START_TOKEN)
2083                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2084                 break;
2085         case TCP_SEQ_STATE_ESTABLISHED:
2086                 if (v)
2087                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2088                 break;
2089         }
2090 }
2091
2092 int tcp_seq_open(struct inode *inode, struct file *file)
2093 {
2094         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2095         struct tcp_iter_state *s;
2096         int err;
2097
2098         err = seq_open_net(inode, file, &afinfo->seq_ops,
2099                           sizeof(struct tcp_iter_state));
2100         if (err < 0)
2101                 return err;
2102
2103         s = ((struct seq_file *)file->private_data)->private;
2104         s->family               = afinfo->family;
2105         s->last_pos             = 0;
2106         return 0;
2107 }
2108 EXPORT_SYMBOL(tcp_seq_open);
2109
2110 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2111 {
2112         int rc = 0;
2113         struct proc_dir_entry *p;
2114
2115         afinfo->seq_ops.start           = tcp_seq_start;
2116         afinfo->seq_ops.next            = tcp_seq_next;
2117         afinfo->seq_ops.stop            = tcp_seq_stop;
2118
2119         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2120                              afinfo->seq_fops, afinfo);
2121         if (!p)
2122                 rc = -ENOMEM;
2123         return rc;
2124 }
2125 EXPORT_SYMBOL(tcp_proc_register);
2126
2127 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2128 {
2129         remove_proc_entry(afinfo->name, net->proc_net);
2130 }
2131 EXPORT_SYMBOL(tcp_proc_unregister);
2132
2133 static void get_openreq4(const struct request_sock *req,
2134                          struct seq_file *f, int i)
2135 {
2136         const struct inet_request_sock *ireq = inet_rsk(req);
2137         long delta = req->rsk_timer.expires - jiffies;
2138
2139         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2140                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2141                 i,
2142                 ireq->ir_loc_addr,
2143                 ireq->ir_num,
2144                 ireq->ir_rmt_addr,
2145                 ntohs(ireq->ir_rmt_port),
2146                 TCP_SYN_RECV,
2147                 0, 0, /* could print option size, but that is af dependent. */
2148                 1,    /* timers active (only the expire timer) */
2149                 jiffies_delta_to_clock_t(delta),
2150                 req->num_timeout,
2151                 from_kuid_munged(seq_user_ns(f),
2152                                  sock_i_uid(req->rsk_listener)),
2153                 0,  /* non standard timer */
2154                 0, /* open_requests have no inode */
2155                 0,
2156                 req);
2157 }
2158
2159 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2160 {
2161         int timer_active;
2162         unsigned long timer_expires;
2163         const struct tcp_sock *tp = tcp_sk(sk);
2164         const struct inet_connection_sock *icsk = inet_csk(sk);
2165         const struct inet_sock *inet = inet_sk(sk);
2166         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2167         __be32 dest = inet->inet_daddr;
2168         __be32 src = inet->inet_rcv_saddr;
2169         __u16 destp = ntohs(inet->inet_dport);
2170         __u16 srcp = ntohs(inet->inet_sport);
2171         int rx_queue;
2172         int state;
2173
2174         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2175             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2176             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2177                 timer_active    = 1;
2178                 timer_expires   = icsk->icsk_timeout;
2179         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2180                 timer_active    = 4;
2181                 timer_expires   = icsk->icsk_timeout;
2182         } else if (timer_pending(&sk->sk_timer)) {
2183                 timer_active    = 2;
2184                 timer_expires   = sk->sk_timer.expires;
2185         } else {
2186                 timer_active    = 0;
2187                 timer_expires = jiffies;
2188         }
2189
2190         state = sk_state_load(sk);
2191         if (state == TCP_LISTEN)
2192                 rx_queue = sk->sk_ack_backlog;
2193         else
2194                 /* Because we don't lock the socket,
2195                  * we might find a transient negative value.
2196                  */
2197                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2198
2199         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2200                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2201                 i, src, srcp, dest, destp, state,
2202                 tp->write_seq - tp->snd_una,
2203                 rx_queue,
2204                 timer_active,
2205                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2206                 icsk->icsk_retransmits,
2207                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2208                 icsk->icsk_probes_out,
2209                 sock_i_ino(sk),
2210                 atomic_read(&sk->sk_refcnt), sk,
2211                 jiffies_to_clock_t(icsk->icsk_rto),
2212                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2213                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2214                 tp->snd_cwnd,
2215                 state == TCP_LISTEN ?
2216                     fastopenq->max_qlen :
2217                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2218 }
2219
2220 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2221                                struct seq_file *f, int i)
2222 {
2223         long delta = tw->tw_timer.expires - jiffies;
2224         __be32 dest, src;
2225         __u16 destp, srcp;
2226
2227         dest  = tw->tw_daddr;
2228         src   = tw->tw_rcv_saddr;
2229         destp = ntohs(tw->tw_dport);
2230         srcp  = ntohs(tw->tw_sport);
2231
2232         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2233                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2234                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2235                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2236                 atomic_read(&tw->tw_refcnt), tw);
2237 }
2238
2239 #define TMPSZ 150
2240
2241 static int tcp4_seq_show(struct seq_file *seq, void *v)
2242 {
2243         struct tcp_iter_state *st;
2244         struct sock *sk = v;
2245
2246         seq_setwidth(seq, TMPSZ - 1);
2247         if (v == SEQ_START_TOKEN) {
2248                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2249                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2250                            "inode");
2251                 goto out;
2252         }
2253         st = seq->private;
2254
2255         if (sk->sk_state == TCP_TIME_WAIT)
2256                 get_timewait4_sock(v, seq, st->num);
2257         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2258                 get_openreq4(v, seq, st->num);
2259         else
2260                 get_tcp4_sock(v, seq, st->num);
2261 out:
2262         seq_pad(seq, '\n');
2263         return 0;
2264 }
2265
2266 static const struct file_operations tcp_afinfo_seq_fops = {
2267         .owner   = THIS_MODULE,
2268         .open    = tcp_seq_open,
2269         .read    = seq_read,
2270         .llseek  = seq_lseek,
2271         .release = seq_release_net
2272 };
2273
2274 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2275         .name           = "tcp",
2276         .family         = AF_INET,
2277         .seq_fops       = &tcp_afinfo_seq_fops,
2278         .seq_ops        = {
2279                 .show           = tcp4_seq_show,
2280         },
2281 };
2282
2283 static int __net_init tcp4_proc_init_net(struct net *net)
2284 {
2285         return tcp_proc_register(net, &tcp4_seq_afinfo);
2286 }
2287
2288 static void __net_exit tcp4_proc_exit_net(struct net *net)
2289 {
2290         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2291 }
2292
2293 static struct pernet_operations tcp4_net_ops = {
2294         .init = tcp4_proc_init_net,
2295         .exit = tcp4_proc_exit_net,
2296 };
2297
2298 int __init tcp4_proc_init(void)
2299 {
2300         return register_pernet_subsys(&tcp4_net_ops);
2301 }
2302
2303 void tcp4_proc_exit(void)
2304 {
2305         unregister_pernet_subsys(&tcp4_net_ops);
2306 }
2307 #endif /* CONFIG_PROC_FS */
2308
2309 struct proto tcp_prot = {
2310         .name                   = "TCP",
2311         .owner                  = THIS_MODULE,
2312         .close                  = tcp_close,
2313         .connect                = tcp_v4_connect,
2314         .disconnect             = tcp_disconnect,
2315         .accept                 = inet_csk_accept,
2316         .ioctl                  = tcp_ioctl,
2317         .init                   = tcp_v4_init_sock,
2318         .destroy                = tcp_v4_destroy_sock,
2319         .shutdown               = tcp_shutdown,
2320         .setsockopt             = tcp_setsockopt,
2321         .getsockopt             = tcp_getsockopt,
2322         .recvmsg                = tcp_recvmsg,
2323         .sendmsg                = tcp_sendmsg,
2324         .sendpage               = tcp_sendpage,
2325         .backlog_rcv            = tcp_v4_do_rcv,
2326         .release_cb             = tcp_release_cb,
2327         .hash                   = inet_hash,
2328         .unhash                 = inet_unhash,
2329         .get_port               = inet_csk_get_port,
2330         .enter_memory_pressure  = tcp_enter_memory_pressure,
2331         .stream_memory_free     = tcp_stream_memory_free,
2332         .sockets_allocated      = &tcp_sockets_allocated,
2333         .orphan_count           = &tcp_orphan_count,
2334         .memory_allocated       = &tcp_memory_allocated,
2335         .memory_pressure        = &tcp_memory_pressure,
2336         .sysctl_mem             = sysctl_tcp_mem,
2337         .sysctl_wmem            = sysctl_tcp_wmem,
2338         .sysctl_rmem            = sysctl_tcp_rmem,
2339         .max_header             = MAX_TCP_HEADER,
2340         .obj_size               = sizeof(struct tcp_sock),
2341         .slab_flags             = SLAB_DESTROY_BY_RCU,
2342         .twsk_prot              = &tcp_timewait_sock_ops,
2343         .rsk_prot               = &tcp_request_sock_ops,
2344         .h.hashinfo             = &tcp_hashinfo,
2345         .no_autobind            = true,
2346 #ifdef CONFIG_COMPAT
2347         .compat_setsockopt      = compat_tcp_setsockopt,
2348         .compat_getsockopt      = compat_tcp_getsockopt,
2349 #endif
2350         .diag_destroy           = tcp_abort,
2351 };
2352 EXPORT_SYMBOL(tcp_prot);
2353
2354 static void __net_exit tcp_sk_exit(struct net *net)
2355 {
2356         int cpu;
2357
2358         for_each_possible_cpu(cpu)
2359                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2360         free_percpu(net->ipv4.tcp_sk);
2361 }
2362
2363 static int __net_init tcp_sk_init(struct net *net)
2364 {
2365         int res, cpu;
2366
2367         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2368         if (!net->ipv4.tcp_sk)
2369                 return -ENOMEM;
2370
2371         for_each_possible_cpu(cpu) {
2372                 struct sock *sk;
2373
2374                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2375                                            IPPROTO_TCP, net);
2376                 if (res)
2377                         goto fail;
2378                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2379         }
2380
2381         net->ipv4.sysctl_tcp_ecn = 2;
2382         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2383
2384         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2385         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2386         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2387
2388         net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2389         net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2390         net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2391
2392         net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2393         net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2394         net->ipv4.sysctl_tcp_syncookies = 1;
2395         net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2396         net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2397         net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2398         net->ipv4.sysctl_tcp_orphan_retries = 0;
2399         net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2400         net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2401
2402         net->ipv4.sysctl_igmp_max_memberships = 20;
2403         net->ipv4.sysctl_igmp_max_msf = 10;
2404         /* IGMP reports for link-local multicast groups are enabled by default */
2405         net->ipv4.sysctl_igmp_llm_reports = 1;
2406         net->ipv4.sysctl_igmp_qrv = 2;
2407
2408         return 0;
2409 fail:
2410         tcp_sk_exit(net);
2411
2412         return res;
2413 }
2414
2415 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2416 {
2417         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2418 }
2419
2420 static struct pernet_operations __net_initdata tcp_sk_ops = {
2421        .init       = tcp_sk_init,
2422        .exit       = tcp_sk_exit,
2423        .exit_batch = tcp_sk_exit_batch,
2424 };
2425
2426 void __init tcp_v4_init(void)
2427 {
2428         inet_hashinfo_init(&tcp_hashinfo);
2429         if (register_pernet_subsys(&tcp_sk_ops))
2430                 panic("Failed to create the TCP control socket.\n");
2431 }