62c42e970c893488ff693f2d828bb6ae115d1cc1
[cascardo/linux.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
57 #include <net/netns/hash.h>
58
59 #define NF_CONNTRACK_VERSION    "0.5.0"
60
61 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
62                                       enum nf_nat_manip_type manip,
63                                       const struct nlattr *attr) __read_mostly;
64 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
65
66 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
67 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
68
69 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
70 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
71
72 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
73 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
74
75 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
76 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
77 static __read_mostly seqcount_t nf_conntrack_generation;
78 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
79 static __read_mostly bool nf_conntrack_locks_all;
80
81 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
82 {
83         spin_lock(lock);
84         while (unlikely(nf_conntrack_locks_all)) {
85                 spin_unlock(lock);
86                 spin_unlock_wait(&nf_conntrack_locks_all_lock);
87                 spin_lock(lock);
88         }
89 }
90 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
91
92 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
93 {
94         h1 %= CONNTRACK_LOCKS;
95         h2 %= CONNTRACK_LOCKS;
96         spin_unlock(&nf_conntrack_locks[h1]);
97         if (h1 != h2)
98                 spin_unlock(&nf_conntrack_locks[h2]);
99 }
100
101 /* return true if we need to recompute hashes (in case hash table was resized) */
102 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
103                                      unsigned int h2, unsigned int sequence)
104 {
105         h1 %= CONNTRACK_LOCKS;
106         h2 %= CONNTRACK_LOCKS;
107         if (h1 <= h2) {
108                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
109                 if (h1 != h2)
110                         spin_lock_nested(&nf_conntrack_locks[h2],
111                                          SINGLE_DEPTH_NESTING);
112         } else {
113                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
114                 spin_lock_nested(&nf_conntrack_locks[h1],
115                                  SINGLE_DEPTH_NESTING);
116         }
117         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
118                 nf_conntrack_double_unlock(h1, h2);
119                 return true;
120         }
121         return false;
122 }
123
124 static void nf_conntrack_all_lock(void)
125 {
126         int i;
127
128         spin_lock(&nf_conntrack_locks_all_lock);
129         nf_conntrack_locks_all = true;
130
131         for (i = 0; i < CONNTRACK_LOCKS; i++) {
132                 spin_unlock_wait(&nf_conntrack_locks[i]);
133         }
134 }
135
136 static void nf_conntrack_all_unlock(void)
137 {
138         nf_conntrack_locks_all = false;
139         spin_unlock(&nf_conntrack_locks_all_lock);
140 }
141
142 unsigned int nf_conntrack_htable_size __read_mostly;
143 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
144
145 unsigned int nf_conntrack_max __read_mostly;
146 EXPORT_SYMBOL_GPL(nf_conntrack_max);
147
148 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
149 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
150
151 static unsigned int nf_conntrack_hash_rnd __read_mostly;
152
153 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
154                               const struct net *net)
155 {
156         unsigned int n;
157         u32 seed;
158
159         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
160
161         /* The direction must be ignored, so we hash everything up to the
162          * destination ports (which is a multiple of 4) and treat the last
163          * three bytes manually.
164          */
165         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
166         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
167         return jhash2((u32 *)tuple, n, seed ^
168                       (((__force __u16)tuple->dst.u.all << 16) |
169                       tuple->dst.protonum));
170 }
171
172 static u32 scale_hash(u32 hash)
173 {
174         return reciprocal_scale(hash, nf_conntrack_htable_size);
175 }
176
177 static u32 __hash_conntrack(const struct net *net,
178                             const struct nf_conntrack_tuple *tuple,
179                             unsigned int size)
180 {
181         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
182 }
183
184 static u32 hash_conntrack(const struct net *net,
185                           const struct nf_conntrack_tuple *tuple)
186 {
187         return scale_hash(hash_conntrack_raw(tuple, net));
188 }
189
190 bool
191 nf_ct_get_tuple(const struct sk_buff *skb,
192                 unsigned int nhoff,
193                 unsigned int dataoff,
194                 u_int16_t l3num,
195                 u_int8_t protonum,
196                 struct net *net,
197                 struct nf_conntrack_tuple *tuple,
198                 const struct nf_conntrack_l3proto *l3proto,
199                 const struct nf_conntrack_l4proto *l4proto)
200 {
201         memset(tuple, 0, sizeof(*tuple));
202
203         tuple->src.l3num = l3num;
204         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
205                 return false;
206
207         tuple->dst.protonum = protonum;
208         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
209
210         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
211 }
212 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
213
214 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
215                        u_int16_t l3num,
216                        struct net *net, struct nf_conntrack_tuple *tuple)
217 {
218         struct nf_conntrack_l3proto *l3proto;
219         struct nf_conntrack_l4proto *l4proto;
220         unsigned int protoff;
221         u_int8_t protonum;
222         int ret;
223
224         rcu_read_lock();
225
226         l3proto = __nf_ct_l3proto_find(l3num);
227         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
228         if (ret != NF_ACCEPT) {
229                 rcu_read_unlock();
230                 return false;
231         }
232
233         l4proto = __nf_ct_l4proto_find(l3num, protonum);
234
235         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
236                               l3proto, l4proto);
237
238         rcu_read_unlock();
239         return ret;
240 }
241 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
242
243 bool
244 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
245                    const struct nf_conntrack_tuple *orig,
246                    const struct nf_conntrack_l3proto *l3proto,
247                    const struct nf_conntrack_l4proto *l4proto)
248 {
249         memset(inverse, 0, sizeof(*inverse));
250
251         inverse->src.l3num = orig->src.l3num;
252         if (l3proto->invert_tuple(inverse, orig) == 0)
253                 return false;
254
255         inverse->dst.dir = !orig->dst.dir;
256
257         inverse->dst.protonum = orig->dst.protonum;
258         return l4proto->invert_tuple(inverse, orig);
259 }
260 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
261
262 static void
263 clean_from_lists(struct nf_conn *ct)
264 {
265         pr_debug("clean_from_lists(%p)\n", ct);
266         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
267         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
268
269         /* Destroy all pending expectations */
270         nf_ct_remove_expectations(ct);
271 }
272
273 /* must be called with local_bh_disable */
274 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
275 {
276         struct ct_pcpu *pcpu;
277
278         /* add this conntrack to the (per cpu) dying list */
279         ct->cpu = smp_processor_id();
280         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
281
282         spin_lock(&pcpu->lock);
283         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
284                              &pcpu->dying);
285         spin_unlock(&pcpu->lock);
286 }
287
288 /* must be called with local_bh_disable */
289 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
290 {
291         struct ct_pcpu *pcpu;
292
293         /* add this conntrack to the (per cpu) unconfirmed list */
294         ct->cpu = smp_processor_id();
295         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
296
297         spin_lock(&pcpu->lock);
298         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
299                              &pcpu->unconfirmed);
300         spin_unlock(&pcpu->lock);
301 }
302
303 /* must be called with local_bh_disable */
304 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
305 {
306         struct ct_pcpu *pcpu;
307
308         /* We overload first tuple to link into unconfirmed or dying list.*/
309         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
310
311         spin_lock(&pcpu->lock);
312         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
313         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
314         spin_unlock(&pcpu->lock);
315 }
316
317 /* Released via destroy_conntrack() */
318 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
319                                  const struct nf_conntrack_zone *zone,
320                                  gfp_t flags)
321 {
322         struct nf_conn *tmpl;
323
324         tmpl = kzalloc(sizeof(*tmpl), flags);
325         if (tmpl == NULL)
326                 return NULL;
327
328         tmpl->status = IPS_TEMPLATE;
329         write_pnet(&tmpl->ct_net, net);
330
331         if (nf_ct_zone_add(tmpl, flags, zone) < 0)
332                 goto out_free;
333
334         atomic_set(&tmpl->ct_general.use, 0);
335
336         return tmpl;
337 out_free:
338         kfree(tmpl);
339         return NULL;
340 }
341 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
342
343 void nf_ct_tmpl_free(struct nf_conn *tmpl)
344 {
345         nf_ct_ext_destroy(tmpl);
346         nf_ct_ext_free(tmpl);
347         kfree(tmpl);
348 }
349 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
350
351 static void
352 destroy_conntrack(struct nf_conntrack *nfct)
353 {
354         struct nf_conn *ct = (struct nf_conn *)nfct;
355         struct net *net = nf_ct_net(ct);
356         struct nf_conntrack_l4proto *l4proto;
357
358         pr_debug("destroy_conntrack(%p)\n", ct);
359         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
360         NF_CT_ASSERT(!timer_pending(&ct->timeout));
361
362         if (unlikely(nf_ct_is_template(ct))) {
363                 nf_ct_tmpl_free(ct);
364                 return;
365         }
366         rcu_read_lock();
367         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
368         if (l4proto->destroy)
369                 l4proto->destroy(ct);
370
371         rcu_read_unlock();
372
373         local_bh_disable();
374         /* Expectations will have been removed in clean_from_lists,
375          * except TFTP can create an expectation on the first packet,
376          * before connection is in the list, so we need to clean here,
377          * too.
378          */
379         nf_ct_remove_expectations(ct);
380
381         nf_ct_del_from_dying_or_unconfirmed_list(ct);
382
383         NF_CT_STAT_INC(net, delete);
384         local_bh_enable();
385
386         if (ct->master)
387                 nf_ct_put(ct->master);
388
389         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
390         nf_conntrack_free(ct);
391 }
392
393 static void nf_ct_delete_from_lists(struct nf_conn *ct)
394 {
395         struct net *net = nf_ct_net(ct);
396         unsigned int hash, reply_hash;
397         unsigned int sequence;
398
399         nf_ct_helper_destroy(ct);
400
401         local_bh_disable();
402         do {
403                 sequence = read_seqcount_begin(&nf_conntrack_generation);
404                 hash = hash_conntrack(net,
405                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
406                 reply_hash = hash_conntrack(net,
407                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
408         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
409
410         clean_from_lists(ct);
411         nf_conntrack_double_unlock(hash, reply_hash);
412
413         nf_ct_add_to_dying_list(ct);
414
415         NF_CT_STAT_INC(net, delete_list);
416         local_bh_enable();
417 }
418
419 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
420 {
421         struct nf_conn_tstamp *tstamp;
422
423         tstamp = nf_conn_tstamp_find(ct);
424         if (tstamp && tstamp->stop == 0)
425                 tstamp->stop = ktime_get_real_ns();
426
427         if (nf_ct_is_dying(ct))
428                 goto delete;
429
430         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
431                                     portid, report) < 0) {
432                 /* destroy event was not delivered */
433                 nf_ct_delete_from_lists(ct);
434                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
435                 return false;
436         }
437
438         nf_conntrack_ecache_work(nf_ct_net(ct));
439         set_bit(IPS_DYING_BIT, &ct->status);
440  delete:
441         nf_ct_delete_from_lists(ct);
442         nf_ct_put(ct);
443         return true;
444 }
445 EXPORT_SYMBOL_GPL(nf_ct_delete);
446
447 static void death_by_timeout(unsigned long ul_conntrack)
448 {
449         nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
450 }
451
452 static inline bool
453 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
454                 const struct nf_conntrack_tuple *tuple,
455                 const struct nf_conntrack_zone *zone,
456                 const struct net *net)
457 {
458         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
459
460         /* A conntrack can be recreated with the equal tuple,
461          * so we need to check that the conntrack is confirmed
462          */
463         return nf_ct_tuple_equal(tuple, &h->tuple) &&
464                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
465                nf_ct_is_confirmed(ct) &&
466                net_eq(net, nf_ct_net(ct));
467 }
468
469 /*
470  * Warning :
471  * - Caller must take a reference on returned object
472  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
473  */
474 static struct nf_conntrack_tuple_hash *
475 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
476                       const struct nf_conntrack_tuple *tuple, u32 hash)
477 {
478         struct nf_conntrack_tuple_hash *h;
479         struct hlist_nulls_head *ct_hash;
480         struct hlist_nulls_node *n;
481         unsigned int bucket, sequence;
482
483 begin:
484         do {
485                 sequence = read_seqcount_begin(&nf_conntrack_generation);
486                 bucket = scale_hash(hash);
487                 ct_hash = nf_conntrack_hash;
488         } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
489
490         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
491                 if (nf_ct_key_equal(h, tuple, zone, net)) {
492                         NF_CT_STAT_INC_ATOMIC(net, found);
493                         return h;
494                 }
495                 NF_CT_STAT_INC_ATOMIC(net, searched);
496         }
497         /*
498          * if the nulls value we got at the end of this lookup is
499          * not the expected one, we must restart lookup.
500          * We probably met an item that was moved to another chain.
501          */
502         if (get_nulls_value(n) != bucket) {
503                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
504                 goto begin;
505         }
506
507         return NULL;
508 }
509
510 /* Find a connection corresponding to a tuple. */
511 static struct nf_conntrack_tuple_hash *
512 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
513                         const struct nf_conntrack_tuple *tuple, u32 hash)
514 {
515         struct nf_conntrack_tuple_hash *h;
516         struct nf_conn *ct;
517
518         rcu_read_lock();
519 begin:
520         h = ____nf_conntrack_find(net, zone, tuple, hash);
521         if (h) {
522                 ct = nf_ct_tuplehash_to_ctrack(h);
523                 if (unlikely(nf_ct_is_dying(ct) ||
524                              !atomic_inc_not_zero(&ct->ct_general.use)))
525                         h = NULL;
526                 else {
527                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
528                                 nf_ct_put(ct);
529                                 goto begin;
530                         }
531                 }
532         }
533         rcu_read_unlock();
534
535         return h;
536 }
537
538 struct nf_conntrack_tuple_hash *
539 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
540                       const struct nf_conntrack_tuple *tuple)
541 {
542         return __nf_conntrack_find_get(net, zone, tuple,
543                                        hash_conntrack_raw(tuple, net));
544 }
545 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
546
547 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
548                                        unsigned int hash,
549                                        unsigned int reply_hash)
550 {
551         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
552                            &nf_conntrack_hash[hash]);
553         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
554                            &nf_conntrack_hash[reply_hash]);
555 }
556
557 int
558 nf_conntrack_hash_check_insert(struct nf_conn *ct)
559 {
560         const struct nf_conntrack_zone *zone;
561         struct net *net = nf_ct_net(ct);
562         unsigned int hash, reply_hash;
563         struct nf_conntrack_tuple_hash *h;
564         struct hlist_nulls_node *n;
565         unsigned int sequence;
566
567         zone = nf_ct_zone(ct);
568
569         local_bh_disable();
570         do {
571                 sequence = read_seqcount_begin(&nf_conntrack_generation);
572                 hash = hash_conntrack(net,
573                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
574                 reply_hash = hash_conntrack(net,
575                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
576         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
577
578         /* See if there's one in the list already, including reverse */
579         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
580                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
581                                     zone, net))
582                         goto out;
583
584         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
585                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
586                                     zone, net))
587                         goto out;
588
589         add_timer(&ct->timeout);
590         smp_wmb();
591         /* The caller holds a reference to this object */
592         atomic_set(&ct->ct_general.use, 2);
593         __nf_conntrack_hash_insert(ct, hash, reply_hash);
594         nf_conntrack_double_unlock(hash, reply_hash);
595         NF_CT_STAT_INC(net, insert);
596         local_bh_enable();
597         return 0;
598
599 out:
600         nf_conntrack_double_unlock(hash, reply_hash);
601         NF_CT_STAT_INC(net, insert_failed);
602         local_bh_enable();
603         return -EEXIST;
604 }
605 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
606
607 static inline void nf_ct_acct_update(struct nf_conn *ct,
608                                      enum ip_conntrack_info ctinfo,
609                                      unsigned int len)
610 {
611         struct nf_conn_acct *acct;
612
613         acct = nf_conn_acct_find(ct);
614         if (acct) {
615                 struct nf_conn_counter *counter = acct->counter;
616
617                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
618                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
619         }
620 }
621
622 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
623                              const struct nf_conn *loser_ct)
624 {
625         struct nf_conn_acct *acct;
626
627         acct = nf_conn_acct_find(loser_ct);
628         if (acct) {
629                 struct nf_conn_counter *counter = acct->counter;
630                 unsigned int bytes;
631
632                 /* u32 should be fine since we must have seen one packet. */
633                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
634                 nf_ct_acct_update(ct, ctinfo, bytes);
635         }
636 }
637
638 /* Resolve race on insertion if this protocol allows this. */
639 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
640                                enum ip_conntrack_info ctinfo,
641                                struct nf_conntrack_tuple_hash *h)
642 {
643         /* This is the conntrack entry already in hashes that won race. */
644         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
645         struct nf_conntrack_l4proto *l4proto;
646
647         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
648         if (l4proto->allow_clash &&
649             !nf_ct_is_dying(ct) &&
650             atomic_inc_not_zero(&ct->ct_general.use)) {
651                 nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
652                 nf_conntrack_put(skb->nfct);
653                 /* Assign conntrack already in hashes to this skbuff. Don't
654                  * modify skb->nfctinfo to ensure consistent stateful filtering.
655                  */
656                 skb->nfct = &ct->ct_general;
657                 return NF_ACCEPT;
658         }
659         NF_CT_STAT_INC(net, drop);
660         return NF_DROP;
661 }
662
663 /* Confirm a connection given skb; places it in hash table */
664 int
665 __nf_conntrack_confirm(struct sk_buff *skb)
666 {
667         const struct nf_conntrack_zone *zone;
668         unsigned int hash, reply_hash;
669         struct nf_conntrack_tuple_hash *h;
670         struct nf_conn *ct;
671         struct nf_conn_help *help;
672         struct nf_conn_tstamp *tstamp;
673         struct hlist_nulls_node *n;
674         enum ip_conntrack_info ctinfo;
675         struct net *net;
676         unsigned int sequence;
677         int ret = NF_DROP;
678
679         ct = nf_ct_get(skb, &ctinfo);
680         net = nf_ct_net(ct);
681
682         /* ipt_REJECT uses nf_conntrack_attach to attach related
683            ICMP/TCP RST packets in other direction.  Actual packet
684            which created connection will be IP_CT_NEW or for an
685            expected connection, IP_CT_RELATED. */
686         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
687                 return NF_ACCEPT;
688
689         zone = nf_ct_zone(ct);
690         local_bh_disable();
691
692         do {
693                 sequence = read_seqcount_begin(&nf_conntrack_generation);
694                 /* reuse the hash saved before */
695                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
696                 hash = scale_hash(hash);
697                 reply_hash = hash_conntrack(net,
698                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
699
700         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
701
702         /* We're not in hash table, and we refuse to set up related
703          * connections for unconfirmed conns.  But packet copies and
704          * REJECT will give spurious warnings here.
705          */
706         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
707
708         /* No external references means no one else could have
709          * confirmed us.
710          */
711         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
712         pr_debug("Confirming conntrack %p\n", ct);
713         /* We have to check the DYING flag after unlink to prevent
714          * a race against nf_ct_get_next_corpse() possibly called from
715          * user context, else we insert an already 'dead' hash, blocking
716          * further use of that particular connection -JM.
717          */
718         nf_ct_del_from_dying_or_unconfirmed_list(ct);
719
720         if (unlikely(nf_ct_is_dying(ct))) {
721                 nf_ct_add_to_dying_list(ct);
722                 goto dying;
723         }
724
725         /* See if there's one in the list already, including reverse:
726            NAT could have grabbed it without realizing, since we're
727            not in the hash.  If there is, we lost race. */
728         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
729                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
730                                     zone, net))
731                         goto out;
732
733         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
734                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
735                                     zone, net))
736                         goto out;
737
738         /* Timer relative to confirmation time, not original
739            setting time, otherwise we'd get timer wrap in
740            weird delay cases. */
741         ct->timeout.expires += jiffies;
742         add_timer(&ct->timeout);
743         atomic_inc(&ct->ct_general.use);
744         ct->status |= IPS_CONFIRMED;
745
746         /* set conntrack timestamp, if enabled. */
747         tstamp = nf_conn_tstamp_find(ct);
748         if (tstamp) {
749                 if (skb->tstamp.tv64 == 0)
750                         __net_timestamp(skb);
751
752                 tstamp->start = ktime_to_ns(skb->tstamp);
753         }
754         /* Since the lookup is lockless, hash insertion must be done after
755          * starting the timer and setting the CONFIRMED bit. The RCU barriers
756          * guarantee that no other CPU can find the conntrack before the above
757          * stores are visible.
758          */
759         __nf_conntrack_hash_insert(ct, hash, reply_hash);
760         nf_conntrack_double_unlock(hash, reply_hash);
761         NF_CT_STAT_INC(net, insert);
762         local_bh_enable();
763
764         help = nfct_help(ct);
765         if (help && help->helper)
766                 nf_conntrack_event_cache(IPCT_HELPER, ct);
767
768         nf_conntrack_event_cache(master_ct(ct) ?
769                                  IPCT_RELATED : IPCT_NEW, ct);
770         return NF_ACCEPT;
771
772 out:
773         nf_ct_add_to_dying_list(ct);
774         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
775 dying:
776         nf_conntrack_double_unlock(hash, reply_hash);
777         NF_CT_STAT_INC(net, insert_failed);
778         local_bh_enable();
779         return ret;
780 }
781 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
782
783 /* Returns true if a connection correspondings to the tuple (required
784    for NAT). */
785 int
786 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
787                          const struct nf_conn *ignored_conntrack)
788 {
789         struct net *net = nf_ct_net(ignored_conntrack);
790         const struct nf_conntrack_zone *zone;
791         struct nf_conntrack_tuple_hash *h;
792         struct hlist_nulls_head *ct_hash;
793         unsigned int hash, sequence;
794         struct hlist_nulls_node *n;
795         struct nf_conn *ct;
796
797         zone = nf_ct_zone(ignored_conntrack);
798
799         rcu_read_lock();
800         do {
801                 sequence = read_seqcount_begin(&nf_conntrack_generation);
802                 hash = hash_conntrack(net, tuple);
803                 ct_hash = nf_conntrack_hash;
804         } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
805
806         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
807                 ct = nf_ct_tuplehash_to_ctrack(h);
808                 if (ct != ignored_conntrack &&
809                     nf_ct_key_equal(h, tuple, zone, net)) {
810                         NF_CT_STAT_INC_ATOMIC(net, found);
811                         rcu_read_unlock();
812                         return 1;
813                 }
814                 NF_CT_STAT_INC_ATOMIC(net, searched);
815         }
816         rcu_read_unlock();
817
818         return 0;
819 }
820 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
821
822 #define NF_CT_EVICTION_RANGE    8
823
824 /* There's a small race here where we may free a just-assured
825    connection.  Too bad: we're in trouble anyway. */
826 static noinline int early_drop(struct net *net, unsigned int _hash)
827 {
828         /* Use oldest entry, which is roughly LRU */
829         struct nf_conntrack_tuple_hash *h;
830         struct nf_conn *tmp;
831         struct hlist_nulls_node *n;
832         unsigned int i, hash, sequence;
833         struct nf_conn *ct = NULL;
834         spinlock_t *lockp;
835         bool ret = false;
836
837         i = 0;
838
839         local_bh_disable();
840 restart:
841         sequence = read_seqcount_begin(&nf_conntrack_generation);
842         for (; i < NF_CT_EVICTION_RANGE; i++) {
843                 hash = scale_hash(_hash++);
844                 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
845                 nf_conntrack_lock(lockp);
846                 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
847                         spin_unlock(lockp);
848                         goto restart;
849                 }
850                 hlist_nulls_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
851                                                hnnode) {
852                         tmp = nf_ct_tuplehash_to_ctrack(h);
853
854                         if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
855                             !net_eq(nf_ct_net(tmp), net) ||
856                             nf_ct_is_dying(tmp))
857                                 continue;
858
859                         if (atomic_inc_not_zero(&tmp->ct_general.use)) {
860                                 ct = tmp;
861                                 break;
862                         }
863                 }
864
865                 spin_unlock(lockp);
866                 if (ct)
867                         break;
868         }
869
870         local_bh_enable();
871
872         if (!ct)
873                 return false;
874
875         /* kill only if in same netns -- might have moved due to
876          * SLAB_DESTROY_BY_RCU rules
877          */
878         if (net_eq(nf_ct_net(ct), net) && del_timer(&ct->timeout)) {
879                 if (nf_ct_delete(ct, 0, 0)) {
880                         NF_CT_STAT_INC_ATOMIC(net, early_drop);
881                         ret = true;
882                 }
883         }
884
885         nf_ct_put(ct);
886         return ret;
887 }
888
889 static struct nf_conn *
890 __nf_conntrack_alloc(struct net *net,
891                      const struct nf_conntrack_zone *zone,
892                      const struct nf_conntrack_tuple *orig,
893                      const struct nf_conntrack_tuple *repl,
894                      gfp_t gfp, u32 hash)
895 {
896         struct nf_conn *ct;
897
898         /* We don't want any race condition at early drop stage */
899         atomic_inc(&net->ct.count);
900
901         if (nf_conntrack_max &&
902             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
903                 if (!early_drop(net, hash)) {
904                         atomic_dec(&net->ct.count);
905                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
906                         return ERR_PTR(-ENOMEM);
907                 }
908         }
909
910         /*
911          * Do not use kmem_cache_zalloc(), as this cache uses
912          * SLAB_DESTROY_BY_RCU.
913          */
914         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
915         if (ct == NULL)
916                 goto out;
917
918         spin_lock_init(&ct->lock);
919         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
920         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
921         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
922         /* save hash for reusing when confirming */
923         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
924         ct->status = 0;
925         /* Don't set timer yet: wait for confirmation */
926         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
927         write_pnet(&ct->ct_net, net);
928         memset(&ct->__nfct_init_offset[0], 0,
929                offsetof(struct nf_conn, proto) -
930                offsetof(struct nf_conn, __nfct_init_offset[0]));
931
932         if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
933                 goto out_free;
934
935         /* Because we use RCU lookups, we set ct_general.use to zero before
936          * this is inserted in any list.
937          */
938         atomic_set(&ct->ct_general.use, 0);
939         return ct;
940 out_free:
941         kmem_cache_free(nf_conntrack_cachep, ct);
942 out:
943         atomic_dec(&net->ct.count);
944         return ERR_PTR(-ENOMEM);
945 }
946
947 struct nf_conn *nf_conntrack_alloc(struct net *net,
948                                    const struct nf_conntrack_zone *zone,
949                                    const struct nf_conntrack_tuple *orig,
950                                    const struct nf_conntrack_tuple *repl,
951                                    gfp_t gfp)
952 {
953         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
954 }
955 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
956
957 void nf_conntrack_free(struct nf_conn *ct)
958 {
959         struct net *net = nf_ct_net(ct);
960
961         /* A freed object has refcnt == 0, that's
962          * the golden rule for SLAB_DESTROY_BY_RCU
963          */
964         NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
965
966         nf_ct_ext_destroy(ct);
967         nf_ct_ext_free(ct);
968         kmem_cache_free(nf_conntrack_cachep, ct);
969         smp_mb__before_atomic();
970         atomic_dec(&net->ct.count);
971 }
972 EXPORT_SYMBOL_GPL(nf_conntrack_free);
973
974
975 /* Allocate a new conntrack: we return -ENOMEM if classification
976    failed due to stress.  Otherwise it really is unclassifiable. */
977 static struct nf_conntrack_tuple_hash *
978 init_conntrack(struct net *net, struct nf_conn *tmpl,
979                const struct nf_conntrack_tuple *tuple,
980                struct nf_conntrack_l3proto *l3proto,
981                struct nf_conntrack_l4proto *l4proto,
982                struct sk_buff *skb,
983                unsigned int dataoff, u32 hash)
984 {
985         struct nf_conn *ct;
986         struct nf_conn_help *help;
987         struct nf_conntrack_tuple repl_tuple;
988         struct nf_conntrack_ecache *ecache;
989         struct nf_conntrack_expect *exp = NULL;
990         const struct nf_conntrack_zone *zone;
991         struct nf_conn_timeout *timeout_ext;
992         struct nf_conntrack_zone tmp;
993         unsigned int *timeouts;
994
995         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
996                 pr_debug("Can't invert tuple.\n");
997                 return NULL;
998         }
999
1000         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1001         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1002                                   hash);
1003         if (IS_ERR(ct))
1004                 return (struct nf_conntrack_tuple_hash *)ct;
1005
1006         if (tmpl && nfct_synproxy(tmpl)) {
1007                 nfct_seqadj_ext_add(ct);
1008                 nfct_synproxy_ext_add(ct);
1009         }
1010
1011         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1012         if (timeout_ext) {
1013                 timeouts = nf_ct_timeout_data(timeout_ext);
1014                 if (unlikely(!timeouts))
1015                         timeouts = l4proto->get_timeouts(net);
1016         } else {
1017                 timeouts = l4proto->get_timeouts(net);
1018         }
1019
1020         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1021                 nf_conntrack_free(ct);
1022                 pr_debug("can't track with proto module\n");
1023                 return NULL;
1024         }
1025
1026         if (timeout_ext)
1027                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1028                                       GFP_ATOMIC);
1029
1030         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1031         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1032         nf_ct_labels_ext_add(ct);
1033
1034         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1035         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1036                                  ecache ? ecache->expmask : 0,
1037                              GFP_ATOMIC);
1038
1039         local_bh_disable();
1040         if (net->ct.expect_count) {
1041                 spin_lock(&nf_conntrack_expect_lock);
1042                 exp = nf_ct_find_expectation(net, zone, tuple);
1043                 if (exp) {
1044                         pr_debug("expectation arrives ct=%p exp=%p\n",
1045                                  ct, exp);
1046                         /* Welcome, Mr. Bond.  We've been expecting you... */
1047                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1048                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1049                         ct->master = exp->master;
1050                         if (exp->helper) {
1051                                 help = nf_ct_helper_ext_add(ct, exp->helper,
1052                                                             GFP_ATOMIC);
1053                                 if (help)
1054                                         rcu_assign_pointer(help->helper, exp->helper);
1055                         }
1056
1057 #ifdef CONFIG_NF_CONNTRACK_MARK
1058                         ct->mark = exp->master->mark;
1059 #endif
1060 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1061                         ct->secmark = exp->master->secmark;
1062 #endif
1063                         NF_CT_STAT_INC(net, expect_new);
1064                 }
1065                 spin_unlock(&nf_conntrack_expect_lock);
1066         }
1067         if (!exp) {
1068                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1069                 NF_CT_STAT_INC(net, new);
1070         }
1071
1072         /* Now it is inserted into the unconfirmed list, bump refcount */
1073         nf_conntrack_get(&ct->ct_general);
1074         nf_ct_add_to_unconfirmed_list(ct);
1075
1076         local_bh_enable();
1077
1078         if (exp) {
1079                 if (exp->expectfn)
1080                         exp->expectfn(ct, exp);
1081                 nf_ct_expect_put(exp);
1082         }
1083
1084         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1085 }
1086
1087 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1088 static inline struct nf_conn *
1089 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1090                   struct sk_buff *skb,
1091                   unsigned int dataoff,
1092                   u_int16_t l3num,
1093                   u_int8_t protonum,
1094                   struct nf_conntrack_l3proto *l3proto,
1095                   struct nf_conntrack_l4proto *l4proto,
1096                   int *set_reply,
1097                   enum ip_conntrack_info *ctinfo)
1098 {
1099         const struct nf_conntrack_zone *zone;
1100         struct nf_conntrack_tuple tuple;
1101         struct nf_conntrack_tuple_hash *h;
1102         struct nf_conntrack_zone tmp;
1103         struct nf_conn *ct;
1104         u32 hash;
1105
1106         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1107                              dataoff, l3num, protonum, net, &tuple, l3proto,
1108                              l4proto)) {
1109                 pr_debug("Can't get tuple\n");
1110                 return NULL;
1111         }
1112
1113         /* look for tuple match */
1114         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1115         hash = hash_conntrack_raw(&tuple, net);
1116         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1117         if (!h) {
1118                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1119                                    skb, dataoff, hash);
1120                 if (!h)
1121                         return NULL;
1122                 if (IS_ERR(h))
1123                         return (void *)h;
1124         }
1125         ct = nf_ct_tuplehash_to_ctrack(h);
1126
1127         /* It exists; we have (non-exclusive) reference. */
1128         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1129                 *ctinfo = IP_CT_ESTABLISHED_REPLY;
1130                 /* Please set reply bit if this packet OK */
1131                 *set_reply = 1;
1132         } else {
1133                 /* Once we've had two way comms, always ESTABLISHED. */
1134                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1135                         pr_debug("normal packet for %p\n", ct);
1136                         *ctinfo = IP_CT_ESTABLISHED;
1137                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1138                         pr_debug("related packet for %p\n", ct);
1139                         *ctinfo = IP_CT_RELATED;
1140                 } else {
1141                         pr_debug("new packet for %p\n", ct);
1142                         *ctinfo = IP_CT_NEW;
1143                 }
1144                 *set_reply = 0;
1145         }
1146         skb->nfct = &ct->ct_general;
1147         skb->nfctinfo = *ctinfo;
1148         return ct;
1149 }
1150
1151 unsigned int
1152 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1153                 struct sk_buff *skb)
1154 {
1155         struct nf_conn *ct, *tmpl = NULL;
1156         enum ip_conntrack_info ctinfo;
1157         struct nf_conntrack_l3proto *l3proto;
1158         struct nf_conntrack_l4proto *l4proto;
1159         unsigned int *timeouts;
1160         unsigned int dataoff;
1161         u_int8_t protonum;
1162         int set_reply = 0;
1163         int ret;
1164
1165         if (skb->nfct) {
1166                 /* Previously seen (loopback or untracked)?  Ignore. */
1167                 tmpl = (struct nf_conn *)skb->nfct;
1168                 if (!nf_ct_is_template(tmpl)) {
1169                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1170                         return NF_ACCEPT;
1171                 }
1172                 skb->nfct = NULL;
1173         }
1174
1175         /* rcu_read_lock()ed by nf_hook_slow */
1176         l3proto = __nf_ct_l3proto_find(pf);
1177         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1178                                    &dataoff, &protonum);
1179         if (ret <= 0) {
1180                 pr_debug("not prepared to track yet or error occurred\n");
1181                 NF_CT_STAT_INC_ATOMIC(net, error);
1182                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1183                 ret = -ret;
1184                 goto out;
1185         }
1186
1187         l4proto = __nf_ct_l4proto_find(pf, protonum);
1188
1189         /* It may be an special packet, error, unclean...
1190          * inverse of the return code tells to the netfilter
1191          * core what to do with the packet. */
1192         if (l4proto->error != NULL) {
1193                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1194                                      pf, hooknum);
1195                 if (ret <= 0) {
1196                         NF_CT_STAT_INC_ATOMIC(net, error);
1197                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1198                         ret = -ret;
1199                         goto out;
1200                 }
1201                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1202                 if (skb->nfct)
1203                         goto out;
1204         }
1205
1206         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1207                                l3proto, l4proto, &set_reply, &ctinfo);
1208         if (!ct) {
1209                 /* Not valid part of a connection */
1210                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1211                 ret = NF_ACCEPT;
1212                 goto out;
1213         }
1214
1215         if (IS_ERR(ct)) {
1216                 /* Too stressed to deal. */
1217                 NF_CT_STAT_INC_ATOMIC(net, drop);
1218                 ret = NF_DROP;
1219                 goto out;
1220         }
1221
1222         NF_CT_ASSERT(skb->nfct);
1223
1224         /* Decide what timeout policy we want to apply to this flow. */
1225         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1226
1227         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1228         if (ret <= 0) {
1229                 /* Invalid: inverse of the return code tells
1230                  * the netfilter core what to do */
1231                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1232                 nf_conntrack_put(skb->nfct);
1233                 skb->nfct = NULL;
1234                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1235                 if (ret == -NF_DROP)
1236                         NF_CT_STAT_INC_ATOMIC(net, drop);
1237                 ret = -ret;
1238                 goto out;
1239         }
1240
1241         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1242                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1243 out:
1244         if (tmpl) {
1245                 /* Special case: we have to repeat this hook, assign the
1246                  * template again to this packet. We assume that this packet
1247                  * has no conntrack assigned. This is used by nf_ct_tcp. */
1248                 if (ret == NF_REPEAT)
1249                         skb->nfct = (struct nf_conntrack *)tmpl;
1250                 else
1251                         nf_ct_put(tmpl);
1252         }
1253
1254         return ret;
1255 }
1256 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1257
1258 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1259                           const struct nf_conntrack_tuple *orig)
1260 {
1261         bool ret;
1262
1263         rcu_read_lock();
1264         ret = nf_ct_invert_tuple(inverse, orig,
1265                                  __nf_ct_l3proto_find(orig->src.l3num),
1266                                  __nf_ct_l4proto_find(orig->src.l3num,
1267                                                       orig->dst.protonum));
1268         rcu_read_unlock();
1269         return ret;
1270 }
1271 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1272
1273 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1274    implicitly racy: see __nf_conntrack_confirm */
1275 void nf_conntrack_alter_reply(struct nf_conn *ct,
1276                               const struct nf_conntrack_tuple *newreply)
1277 {
1278         struct nf_conn_help *help = nfct_help(ct);
1279
1280         /* Should be unconfirmed, so not in hash table yet */
1281         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1282
1283         pr_debug("Altering reply tuple of %p to ", ct);
1284         nf_ct_dump_tuple(newreply);
1285
1286         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1287         if (ct->master || (help && !hlist_empty(&help->expectations)))
1288                 return;
1289
1290         rcu_read_lock();
1291         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1292         rcu_read_unlock();
1293 }
1294 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1295
1296 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1297 void __nf_ct_refresh_acct(struct nf_conn *ct,
1298                           enum ip_conntrack_info ctinfo,
1299                           const struct sk_buff *skb,
1300                           unsigned long extra_jiffies,
1301                           int do_acct)
1302 {
1303         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1304         NF_CT_ASSERT(skb);
1305
1306         /* Only update if this is not a fixed timeout */
1307         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1308                 goto acct;
1309
1310         /* If not in hash table, timer will not be active yet */
1311         if (!nf_ct_is_confirmed(ct)) {
1312                 ct->timeout.expires = extra_jiffies;
1313         } else {
1314                 unsigned long newtime = jiffies + extra_jiffies;
1315
1316                 /* Only update the timeout if the new timeout is at least
1317                    HZ jiffies from the old timeout. Need del_timer for race
1318                    avoidance (may already be dying). */
1319                 if (newtime - ct->timeout.expires >= HZ)
1320                         mod_timer_pending(&ct->timeout, newtime);
1321         }
1322
1323 acct:
1324         if (do_acct)
1325                 nf_ct_acct_update(ct, ctinfo, skb->len);
1326 }
1327 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1328
1329 bool __nf_ct_kill_acct(struct nf_conn *ct,
1330                        enum ip_conntrack_info ctinfo,
1331                        const struct sk_buff *skb,
1332                        int do_acct)
1333 {
1334         if (do_acct)
1335                 nf_ct_acct_update(ct, ctinfo, skb->len);
1336
1337         if (del_timer(&ct->timeout)) {
1338                 ct->timeout.function((unsigned long)ct);
1339                 return true;
1340         }
1341         return false;
1342 }
1343 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1344
1345 #ifdef CONFIG_NF_CONNTRACK_ZONES
1346 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1347         .len    = sizeof(struct nf_conntrack_zone),
1348         .align  = __alignof__(struct nf_conntrack_zone),
1349         .id     = NF_CT_EXT_ZONE,
1350 };
1351 #endif
1352
1353 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1354
1355 #include <linux/netfilter/nfnetlink.h>
1356 #include <linux/netfilter/nfnetlink_conntrack.h>
1357 #include <linux/mutex.h>
1358
1359 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1360  * in ip_conntrack_core, since we don't want the protocols to autoload
1361  * or depend on ctnetlink */
1362 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1363                                const struct nf_conntrack_tuple *tuple)
1364 {
1365         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1366             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1367                 goto nla_put_failure;
1368         return 0;
1369
1370 nla_put_failure:
1371         return -1;
1372 }
1373 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1374
1375 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1376         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1377         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1378 };
1379 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1380
1381 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1382                                struct nf_conntrack_tuple *t)
1383 {
1384         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1385                 return -EINVAL;
1386
1387         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1388         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1389
1390         return 0;
1391 }
1392 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1393
1394 int nf_ct_port_nlattr_tuple_size(void)
1395 {
1396         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1397 }
1398 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1399 #endif
1400
1401 /* Used by ipt_REJECT and ip6t_REJECT. */
1402 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1403 {
1404         struct nf_conn *ct;
1405         enum ip_conntrack_info ctinfo;
1406
1407         /* This ICMP is in reverse direction to the packet which caused it */
1408         ct = nf_ct_get(skb, &ctinfo);
1409         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1410                 ctinfo = IP_CT_RELATED_REPLY;
1411         else
1412                 ctinfo = IP_CT_RELATED;
1413
1414         /* Attach to new skbuff, and increment count */
1415         nskb->nfct = &ct->ct_general;
1416         nskb->nfctinfo = ctinfo;
1417         nf_conntrack_get(nskb->nfct);
1418 }
1419
1420 /* Bring out ya dead! */
1421 static struct nf_conn *
1422 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1423                 void *data, unsigned int *bucket)
1424 {
1425         struct nf_conntrack_tuple_hash *h;
1426         struct nf_conn *ct;
1427         struct hlist_nulls_node *n;
1428         int cpu;
1429         spinlock_t *lockp;
1430
1431         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1432                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1433                 local_bh_disable();
1434                 nf_conntrack_lock(lockp);
1435                 if (*bucket < nf_conntrack_htable_size) {
1436                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1437                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1438                                         continue;
1439                                 ct = nf_ct_tuplehash_to_ctrack(h);
1440                                 if (net_eq(nf_ct_net(ct), net) &&
1441                                     iter(ct, data))
1442                                         goto found;
1443                         }
1444                 }
1445                 spin_unlock(lockp);
1446                 local_bh_enable();
1447                 cond_resched();
1448         }
1449
1450         for_each_possible_cpu(cpu) {
1451                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1452
1453                 spin_lock_bh(&pcpu->lock);
1454                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1455                         ct = nf_ct_tuplehash_to_ctrack(h);
1456                         if (iter(ct, data))
1457                                 set_bit(IPS_DYING_BIT, &ct->status);
1458                 }
1459                 spin_unlock_bh(&pcpu->lock);
1460                 cond_resched();
1461         }
1462         return NULL;
1463 found:
1464         atomic_inc(&ct->ct_general.use);
1465         spin_unlock(lockp);
1466         local_bh_enable();
1467         return ct;
1468 }
1469
1470 void nf_ct_iterate_cleanup(struct net *net,
1471                            int (*iter)(struct nf_conn *i, void *data),
1472                            void *data, u32 portid, int report)
1473 {
1474         struct nf_conn *ct;
1475         unsigned int bucket = 0;
1476
1477         might_sleep();
1478
1479         if (atomic_read(&net->ct.count) == 0)
1480                 return;
1481
1482         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1483                 /* Time to push up daises... */
1484                 if (del_timer(&ct->timeout))
1485                         nf_ct_delete(ct, portid, report);
1486
1487                 /* ... else the timer will get him soon. */
1488
1489                 nf_ct_put(ct);
1490                 cond_resched();
1491         }
1492 }
1493 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1494
1495 static int kill_all(struct nf_conn *i, void *data)
1496 {
1497         return 1;
1498 }
1499
1500 void nf_ct_free_hashtable(void *hash, unsigned int size)
1501 {
1502         if (is_vmalloc_addr(hash))
1503                 vfree(hash);
1504         else
1505                 free_pages((unsigned long)hash,
1506                            get_order(sizeof(struct hlist_head) * size));
1507 }
1508 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1509
1510 static int untrack_refs(void)
1511 {
1512         int cnt = 0, cpu;
1513
1514         for_each_possible_cpu(cpu) {
1515                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1516
1517                 cnt += atomic_read(&ct->ct_general.use) - 1;
1518         }
1519         return cnt;
1520 }
1521
1522 void nf_conntrack_cleanup_start(void)
1523 {
1524         RCU_INIT_POINTER(ip_ct_attach, NULL);
1525 }
1526
1527 void nf_conntrack_cleanup_end(void)
1528 {
1529         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1530         while (untrack_refs() > 0)
1531                 schedule();
1532
1533         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1534
1535 #ifdef CONFIG_NF_CONNTRACK_ZONES
1536         nf_ct_extend_unregister(&nf_ct_zone_extend);
1537 #endif
1538         nf_conntrack_proto_fini();
1539         nf_conntrack_seqadj_fini();
1540         nf_conntrack_labels_fini();
1541         nf_conntrack_helper_fini();
1542         nf_conntrack_timeout_fini();
1543         nf_conntrack_ecache_fini();
1544         nf_conntrack_tstamp_fini();
1545         nf_conntrack_acct_fini();
1546         nf_conntrack_expect_fini();
1547
1548         kmem_cache_destroy(nf_conntrack_cachep);
1549 }
1550
1551 /*
1552  * Mishearing the voices in his head, our hero wonders how he's
1553  * supposed to kill the mall.
1554  */
1555 void nf_conntrack_cleanup_net(struct net *net)
1556 {
1557         LIST_HEAD(single);
1558
1559         list_add(&net->exit_list, &single);
1560         nf_conntrack_cleanup_net_list(&single);
1561 }
1562
1563 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1564 {
1565         int busy;
1566         struct net *net;
1567
1568         /*
1569          * This makes sure all current packets have passed through
1570          *  netfilter framework.  Roll on, two-stage module
1571          *  delete...
1572          */
1573         synchronize_net();
1574 i_see_dead_people:
1575         busy = 0;
1576         list_for_each_entry(net, net_exit_list, exit_list) {
1577                 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1578                 if (atomic_read(&net->ct.count) != 0)
1579                         busy = 1;
1580         }
1581         if (busy) {
1582                 schedule();
1583                 goto i_see_dead_people;
1584         }
1585
1586         list_for_each_entry(net, net_exit_list, exit_list) {
1587                 nf_conntrack_proto_pernet_fini(net);
1588                 nf_conntrack_helper_pernet_fini(net);
1589                 nf_conntrack_ecache_pernet_fini(net);
1590                 nf_conntrack_tstamp_pernet_fini(net);
1591                 nf_conntrack_acct_pernet_fini(net);
1592                 nf_conntrack_expect_pernet_fini(net);
1593                 free_percpu(net->ct.stat);
1594                 free_percpu(net->ct.pcpu_lists);
1595         }
1596 }
1597
1598 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1599 {
1600         struct hlist_nulls_head *hash;
1601         unsigned int nr_slots, i;
1602         size_t sz;
1603
1604         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1605                 return NULL;
1606
1607         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1608         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1609
1610         if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1611                 return NULL;
1612
1613         sz = nr_slots * sizeof(struct hlist_nulls_head);
1614         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1615                                         get_order(sz));
1616         if (!hash)
1617                 hash = vzalloc(sz);
1618
1619         if (hash && nulls)
1620                 for (i = 0; i < nr_slots; i++)
1621                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1622
1623         return hash;
1624 }
1625 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1626
1627 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1628 {
1629         int i, bucket, rc;
1630         unsigned int hashsize, old_size;
1631         struct hlist_nulls_head *hash, *old_hash;
1632         struct nf_conntrack_tuple_hash *h;
1633         struct nf_conn *ct;
1634
1635         if (current->nsproxy->net_ns != &init_net)
1636                 return -EOPNOTSUPP;
1637
1638         /* On boot, we can set this without any fancy locking. */
1639         if (!nf_conntrack_htable_size)
1640                 return param_set_uint(val, kp);
1641
1642         rc = kstrtouint(val, 0, &hashsize);
1643         if (rc)
1644                 return rc;
1645         if (!hashsize)
1646                 return -EINVAL;
1647
1648         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1649         if (!hash)
1650                 return -ENOMEM;
1651
1652         local_bh_disable();
1653         nf_conntrack_all_lock();
1654         write_seqcount_begin(&nf_conntrack_generation);
1655
1656         /* Lookups in the old hash might happen in parallel, which means we
1657          * might get false negatives during connection lookup. New connections
1658          * created because of a false negative won't make it into the hash
1659          * though since that required taking the locks.
1660          */
1661
1662         for (i = 0; i < nf_conntrack_htable_size; i++) {
1663                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1664                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1665                                               struct nf_conntrack_tuple_hash, hnnode);
1666                         ct = nf_ct_tuplehash_to_ctrack(h);
1667                         hlist_nulls_del_rcu(&h->hnnode);
1668                         bucket = __hash_conntrack(nf_ct_net(ct),
1669                                                   &h->tuple, hashsize);
1670                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1671                 }
1672         }
1673         old_size = nf_conntrack_htable_size;
1674         old_hash = nf_conntrack_hash;
1675
1676         nf_conntrack_hash = hash;
1677         nf_conntrack_htable_size = hashsize;
1678
1679         write_seqcount_end(&nf_conntrack_generation);
1680         nf_conntrack_all_unlock();
1681         local_bh_enable();
1682
1683         synchronize_net();
1684         nf_ct_free_hashtable(old_hash, old_size);
1685         return 0;
1686 }
1687 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1688
1689 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1690                   &nf_conntrack_htable_size, 0600);
1691
1692 void nf_ct_untracked_status_or(unsigned long bits)
1693 {
1694         int cpu;
1695
1696         for_each_possible_cpu(cpu)
1697                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1698 }
1699 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1700
1701 int nf_conntrack_init_start(void)
1702 {
1703         int max_factor = 8;
1704         int ret = -ENOMEM;
1705         int i, cpu;
1706
1707         seqcount_init(&nf_conntrack_generation);
1708
1709         for (i = 0; i < CONNTRACK_LOCKS; i++)
1710                 spin_lock_init(&nf_conntrack_locks[i]);
1711
1712         if (!nf_conntrack_htable_size) {
1713                 /* Idea from tcp.c: use 1/16384 of memory.
1714                  * On i386: 32MB machine has 512 buckets.
1715                  * >= 1GB machines have 16384 buckets.
1716                  * >= 4GB machines have 65536 buckets.
1717                  */
1718                 nf_conntrack_htable_size
1719                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1720                            / sizeof(struct hlist_head));
1721                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1722                         nf_conntrack_htable_size = 65536;
1723                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1724                         nf_conntrack_htable_size = 16384;
1725                 if (nf_conntrack_htable_size < 32)
1726                         nf_conntrack_htable_size = 32;
1727
1728                 /* Use a max. factor of four by default to get the same max as
1729                  * with the old struct list_heads. When a table size is given
1730                  * we use the old value of 8 to avoid reducing the max.
1731                  * entries. */
1732                 max_factor = 4;
1733         }
1734
1735         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
1736         if (!nf_conntrack_hash)
1737                 return -ENOMEM;
1738
1739         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1740
1741         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1742                                                 sizeof(struct nf_conn), 0,
1743                                                 SLAB_DESTROY_BY_RCU, NULL);
1744         if (!nf_conntrack_cachep)
1745                 goto err_cachep;
1746
1747         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1748                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1749                nf_conntrack_max);
1750
1751         ret = nf_conntrack_expect_init();
1752         if (ret < 0)
1753                 goto err_expect;
1754
1755         ret = nf_conntrack_acct_init();
1756         if (ret < 0)
1757                 goto err_acct;
1758
1759         ret = nf_conntrack_tstamp_init();
1760         if (ret < 0)
1761                 goto err_tstamp;
1762
1763         ret = nf_conntrack_ecache_init();
1764         if (ret < 0)
1765                 goto err_ecache;
1766
1767         ret = nf_conntrack_timeout_init();
1768         if (ret < 0)
1769                 goto err_timeout;
1770
1771         ret = nf_conntrack_helper_init();
1772         if (ret < 0)
1773                 goto err_helper;
1774
1775         ret = nf_conntrack_labels_init();
1776         if (ret < 0)
1777                 goto err_labels;
1778
1779         ret = nf_conntrack_seqadj_init();
1780         if (ret < 0)
1781                 goto err_seqadj;
1782
1783 #ifdef CONFIG_NF_CONNTRACK_ZONES
1784         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1785         if (ret < 0)
1786                 goto err_extend;
1787 #endif
1788         ret = nf_conntrack_proto_init();
1789         if (ret < 0)
1790                 goto err_proto;
1791
1792         /* Set up fake conntrack: to never be deleted, not in any hashes */
1793         for_each_possible_cpu(cpu) {
1794                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1795                 write_pnet(&ct->ct_net, &init_net);
1796                 atomic_set(&ct->ct_general.use, 1);
1797         }
1798         /*  - and look it like as a confirmed connection */
1799         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1800         return 0;
1801
1802 err_proto:
1803 #ifdef CONFIG_NF_CONNTRACK_ZONES
1804         nf_ct_extend_unregister(&nf_ct_zone_extend);
1805 err_extend:
1806 #endif
1807         nf_conntrack_seqadj_fini();
1808 err_seqadj:
1809         nf_conntrack_labels_fini();
1810 err_labels:
1811         nf_conntrack_helper_fini();
1812 err_helper:
1813         nf_conntrack_timeout_fini();
1814 err_timeout:
1815         nf_conntrack_ecache_fini();
1816 err_ecache:
1817         nf_conntrack_tstamp_fini();
1818 err_tstamp:
1819         nf_conntrack_acct_fini();
1820 err_acct:
1821         nf_conntrack_expect_fini();
1822 err_expect:
1823         kmem_cache_destroy(nf_conntrack_cachep);
1824 err_cachep:
1825         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1826         return ret;
1827 }
1828
1829 void nf_conntrack_init_end(void)
1830 {
1831         /* For use by REJECT target */
1832         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1833         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1834 }
1835
1836 /*
1837  * We need to use special "null" values, not used in hash table
1838  */
1839 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1840 #define DYING_NULLS_VAL         ((1<<30)+1)
1841 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
1842
1843 int nf_conntrack_init_net(struct net *net)
1844 {
1845         int ret = -ENOMEM;
1846         int cpu;
1847
1848         atomic_set(&net->ct.count, 0);
1849
1850         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1851         if (!net->ct.pcpu_lists)
1852                 goto err_stat;
1853
1854         for_each_possible_cpu(cpu) {
1855                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1856
1857                 spin_lock_init(&pcpu->lock);
1858                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1859                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1860         }
1861
1862         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1863         if (!net->ct.stat)
1864                 goto err_pcpu_lists;
1865
1866         ret = nf_conntrack_expect_pernet_init(net);
1867         if (ret < 0)
1868                 goto err_expect;
1869         ret = nf_conntrack_acct_pernet_init(net);
1870         if (ret < 0)
1871                 goto err_acct;
1872         ret = nf_conntrack_tstamp_pernet_init(net);
1873         if (ret < 0)
1874                 goto err_tstamp;
1875         ret = nf_conntrack_ecache_pernet_init(net);
1876         if (ret < 0)
1877                 goto err_ecache;
1878         ret = nf_conntrack_helper_pernet_init(net);
1879         if (ret < 0)
1880                 goto err_helper;
1881         ret = nf_conntrack_proto_pernet_init(net);
1882         if (ret < 0)
1883                 goto err_proto;
1884         return 0;
1885
1886 err_proto:
1887         nf_conntrack_helper_pernet_fini(net);
1888 err_helper:
1889         nf_conntrack_ecache_pernet_fini(net);
1890 err_ecache:
1891         nf_conntrack_tstamp_pernet_fini(net);
1892 err_tstamp:
1893         nf_conntrack_acct_pernet_fini(net);
1894 err_acct:
1895         nf_conntrack_expect_pernet_fini(net);
1896 err_expect:
1897         free_percpu(net->ct.stat);
1898 err_pcpu_lists:
1899         free_percpu(net->ct.pcpu_lists);
1900 err_stat:
1901         return ret;
1902 }