1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
58 #define NF_CONNTRACK_VERSION "0.5.0"
60 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
61 enum nf_nat_manip_type manip,
62 const struct nlattr *attr) __read_mostly;
63 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
65 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
66 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
68 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
69 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
71 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
72 static __read_mostly seqcount_t nf_conntrack_generation;
73 static __read_mostly bool nf_conntrack_locks_all;
75 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
78 while (unlikely(nf_conntrack_locks_all)) {
80 spin_unlock_wait(&nf_conntrack_locks_all_lock);
84 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
86 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
88 h1 %= CONNTRACK_LOCKS;
89 h2 %= CONNTRACK_LOCKS;
90 spin_unlock(&nf_conntrack_locks[h1]);
92 spin_unlock(&nf_conntrack_locks[h2]);
95 /* return true if we need to recompute hashes (in case hash table was resized) */
96 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
97 unsigned int h2, unsigned int sequence)
99 h1 %= CONNTRACK_LOCKS;
100 h2 %= CONNTRACK_LOCKS;
102 nf_conntrack_lock(&nf_conntrack_locks[h1]);
104 spin_lock_nested(&nf_conntrack_locks[h2],
105 SINGLE_DEPTH_NESTING);
107 nf_conntrack_lock(&nf_conntrack_locks[h2]);
108 spin_lock_nested(&nf_conntrack_locks[h1],
109 SINGLE_DEPTH_NESTING);
111 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
112 nf_conntrack_double_unlock(h1, h2);
118 static void nf_conntrack_all_lock(void)
122 spin_lock(&nf_conntrack_locks_all_lock);
123 nf_conntrack_locks_all = true;
125 for (i = 0; i < CONNTRACK_LOCKS; i++) {
126 spin_unlock_wait(&nf_conntrack_locks[i]);
130 static void nf_conntrack_all_unlock(void)
132 nf_conntrack_locks_all = false;
133 spin_unlock(&nf_conntrack_locks_all_lock);
136 unsigned int nf_conntrack_htable_size __read_mostly;
137 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
139 unsigned int nf_conntrack_max __read_mostly;
140 EXPORT_SYMBOL_GPL(nf_conntrack_max);
142 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
143 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
145 static unsigned int nf_conntrack_hash_rnd __read_mostly;
147 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
151 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
153 /* The direction must be ignored, so we hash everything up to the
154 * destination ports (which is a multiple of 4) and treat the last
155 * three bytes manually.
157 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
158 return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
159 (((__force __u16)tuple->dst.u.all << 16) |
160 tuple->dst.protonum));
163 static u32 __hash_bucket(u32 hash, unsigned int size)
165 return reciprocal_scale(hash, size);
168 static u32 hash_bucket(u32 hash, const struct net *net)
170 return __hash_bucket(hash, net->ct.htable_size);
173 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
176 return __hash_bucket(hash_conntrack_raw(tuple), size);
179 static inline u_int32_t hash_conntrack(const struct net *net,
180 const struct nf_conntrack_tuple *tuple)
182 return __hash_conntrack(tuple, net->ct.htable_size);
186 nf_ct_get_tuple(const struct sk_buff *skb,
188 unsigned int dataoff,
192 struct nf_conntrack_tuple *tuple,
193 const struct nf_conntrack_l3proto *l3proto,
194 const struct nf_conntrack_l4proto *l4proto)
196 memset(tuple, 0, sizeof(*tuple));
198 tuple->src.l3num = l3num;
199 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
202 tuple->dst.protonum = protonum;
203 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
205 return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
207 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
209 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
211 struct net *net, struct nf_conntrack_tuple *tuple)
213 struct nf_conntrack_l3proto *l3proto;
214 struct nf_conntrack_l4proto *l4proto;
215 unsigned int protoff;
221 l3proto = __nf_ct_l3proto_find(l3num);
222 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
223 if (ret != NF_ACCEPT) {
228 l4proto = __nf_ct_l4proto_find(l3num, protonum);
230 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
236 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
239 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
240 const struct nf_conntrack_tuple *orig,
241 const struct nf_conntrack_l3proto *l3proto,
242 const struct nf_conntrack_l4proto *l4proto)
244 memset(inverse, 0, sizeof(*inverse));
246 inverse->src.l3num = orig->src.l3num;
247 if (l3proto->invert_tuple(inverse, orig) == 0)
250 inverse->dst.dir = !orig->dst.dir;
252 inverse->dst.protonum = orig->dst.protonum;
253 return l4proto->invert_tuple(inverse, orig);
255 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
258 clean_from_lists(struct nf_conn *ct)
260 pr_debug("clean_from_lists(%p)\n", ct);
261 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
262 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
264 /* Destroy all pending expectations */
265 nf_ct_remove_expectations(ct);
268 /* must be called with local_bh_disable */
269 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
271 struct ct_pcpu *pcpu;
273 /* add this conntrack to the (per cpu) dying list */
274 ct->cpu = smp_processor_id();
275 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
277 spin_lock(&pcpu->lock);
278 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
280 spin_unlock(&pcpu->lock);
283 /* must be called with local_bh_disable */
284 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
286 struct ct_pcpu *pcpu;
288 /* add this conntrack to the (per cpu) unconfirmed list */
289 ct->cpu = smp_processor_id();
290 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
292 spin_lock(&pcpu->lock);
293 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
295 spin_unlock(&pcpu->lock);
298 /* must be called with local_bh_disable */
299 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
301 struct ct_pcpu *pcpu;
303 /* We overload first tuple to link into unconfirmed or dying list.*/
304 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
306 spin_lock(&pcpu->lock);
307 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
308 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
309 spin_unlock(&pcpu->lock);
312 /* Released via destroy_conntrack() */
313 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
314 const struct nf_conntrack_zone *zone,
317 struct nf_conn *tmpl;
319 tmpl = kzalloc(sizeof(*tmpl), flags);
323 tmpl->status = IPS_TEMPLATE;
324 write_pnet(&tmpl->ct_net, net);
326 if (nf_ct_zone_add(tmpl, flags, zone) < 0)
329 atomic_set(&tmpl->ct_general.use, 0);
336 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
338 void nf_ct_tmpl_free(struct nf_conn *tmpl)
340 nf_ct_ext_destroy(tmpl);
341 nf_ct_ext_free(tmpl);
344 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
347 destroy_conntrack(struct nf_conntrack *nfct)
349 struct nf_conn *ct = (struct nf_conn *)nfct;
350 struct net *net = nf_ct_net(ct);
351 struct nf_conntrack_l4proto *l4proto;
353 pr_debug("destroy_conntrack(%p)\n", ct);
354 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
355 NF_CT_ASSERT(!timer_pending(&ct->timeout));
357 if (unlikely(nf_ct_is_template(ct))) {
362 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
363 if (l4proto && l4proto->destroy)
364 l4proto->destroy(ct);
369 /* Expectations will have been removed in clean_from_lists,
370 * except TFTP can create an expectation on the first packet,
371 * before connection is in the list, so we need to clean here,
374 nf_ct_remove_expectations(ct);
376 nf_ct_del_from_dying_or_unconfirmed_list(ct);
378 NF_CT_STAT_INC(net, delete);
382 nf_ct_put(ct->master);
384 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
385 nf_conntrack_free(ct);
388 static void nf_ct_delete_from_lists(struct nf_conn *ct)
390 struct net *net = nf_ct_net(ct);
391 unsigned int hash, reply_hash;
392 unsigned int sequence;
394 nf_ct_helper_destroy(ct);
398 sequence = read_seqcount_begin(&nf_conntrack_generation);
399 hash = hash_conntrack(net,
400 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
401 reply_hash = hash_conntrack(net,
402 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
403 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
405 clean_from_lists(ct);
406 nf_conntrack_double_unlock(hash, reply_hash);
408 nf_ct_add_to_dying_list(ct);
410 NF_CT_STAT_INC(net, delete_list);
414 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
416 struct nf_conn_tstamp *tstamp;
418 tstamp = nf_conn_tstamp_find(ct);
419 if (tstamp && tstamp->stop == 0)
420 tstamp->stop = ktime_get_real_ns();
422 if (nf_ct_is_dying(ct))
425 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
426 portid, report) < 0) {
427 /* destroy event was not delivered */
428 nf_ct_delete_from_lists(ct);
429 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
433 nf_conntrack_ecache_work(nf_ct_net(ct));
434 set_bit(IPS_DYING_BIT, &ct->status);
436 nf_ct_delete_from_lists(ct);
440 EXPORT_SYMBOL_GPL(nf_ct_delete);
442 static void death_by_timeout(unsigned long ul_conntrack)
444 nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
448 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
449 const struct nf_conntrack_tuple *tuple,
450 const struct nf_conntrack_zone *zone,
451 const struct net *net)
453 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
455 /* A conntrack can be recreated with the equal tuple,
456 * so we need to check that the conntrack is confirmed
458 return nf_ct_tuple_equal(tuple, &h->tuple) &&
459 nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
460 nf_ct_is_confirmed(ct) &&
461 net_eq(net, nf_ct_net(ct));
466 * - Caller must take a reference on returned object
467 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
469 static struct nf_conntrack_tuple_hash *
470 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
471 const struct nf_conntrack_tuple *tuple, u32 hash)
473 struct nf_conntrack_tuple_hash *h;
474 struct hlist_nulls_head *ct_hash;
475 struct hlist_nulls_node *n;
476 unsigned int bucket, sequence;
480 sequence = read_seqcount_begin(&nf_conntrack_generation);
481 bucket = hash_bucket(hash, net);
482 ct_hash = net->ct.hash;
483 } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
485 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
486 if (nf_ct_key_equal(h, tuple, zone, net)) {
487 NF_CT_STAT_INC_ATOMIC(net, found);
490 NF_CT_STAT_INC_ATOMIC(net, searched);
493 * if the nulls value we got at the end of this lookup is
494 * not the expected one, we must restart lookup.
495 * We probably met an item that was moved to another chain.
497 if (get_nulls_value(n) != bucket) {
498 NF_CT_STAT_INC_ATOMIC(net, search_restart);
505 /* Find a connection corresponding to a tuple. */
506 static struct nf_conntrack_tuple_hash *
507 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
508 const struct nf_conntrack_tuple *tuple, u32 hash)
510 struct nf_conntrack_tuple_hash *h;
515 h = ____nf_conntrack_find(net, zone, tuple, hash);
517 ct = nf_ct_tuplehash_to_ctrack(h);
518 if (unlikely(nf_ct_is_dying(ct) ||
519 !atomic_inc_not_zero(&ct->ct_general.use)))
522 if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
533 struct nf_conntrack_tuple_hash *
534 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
535 const struct nf_conntrack_tuple *tuple)
537 return __nf_conntrack_find_get(net, zone, tuple,
538 hash_conntrack_raw(tuple));
540 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
542 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
544 unsigned int reply_hash)
546 struct net *net = nf_ct_net(ct);
548 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
549 &net->ct.hash[hash]);
550 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
551 &net->ct.hash[reply_hash]);
555 nf_conntrack_hash_check_insert(struct nf_conn *ct)
557 const struct nf_conntrack_zone *zone;
558 struct net *net = nf_ct_net(ct);
559 unsigned int hash, reply_hash;
560 struct nf_conntrack_tuple_hash *h;
561 struct hlist_nulls_node *n;
562 unsigned int sequence;
564 zone = nf_ct_zone(ct);
568 sequence = read_seqcount_begin(&nf_conntrack_generation);
569 hash = hash_conntrack(net,
570 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
571 reply_hash = hash_conntrack(net,
572 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
573 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
575 /* See if there's one in the list already, including reverse */
576 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
577 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
581 hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
582 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
586 add_timer(&ct->timeout);
588 /* The caller holds a reference to this object */
589 atomic_set(&ct->ct_general.use, 2);
590 __nf_conntrack_hash_insert(ct, hash, reply_hash);
591 nf_conntrack_double_unlock(hash, reply_hash);
592 NF_CT_STAT_INC(net, insert);
597 nf_conntrack_double_unlock(hash, reply_hash);
598 NF_CT_STAT_INC(net, insert_failed);
602 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
604 /* Confirm a connection given skb; places it in hash table */
606 __nf_conntrack_confirm(struct sk_buff *skb)
608 const struct nf_conntrack_zone *zone;
609 unsigned int hash, reply_hash;
610 struct nf_conntrack_tuple_hash *h;
612 struct nf_conn_help *help;
613 struct nf_conn_tstamp *tstamp;
614 struct hlist_nulls_node *n;
615 enum ip_conntrack_info ctinfo;
617 unsigned int sequence;
619 ct = nf_ct_get(skb, &ctinfo);
622 /* ipt_REJECT uses nf_conntrack_attach to attach related
623 ICMP/TCP RST packets in other direction. Actual packet
624 which created connection will be IP_CT_NEW or for an
625 expected connection, IP_CT_RELATED. */
626 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
629 zone = nf_ct_zone(ct);
633 sequence = read_seqcount_begin(&nf_conntrack_generation);
634 /* reuse the hash saved before */
635 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
636 hash = hash_bucket(hash, net);
637 reply_hash = hash_conntrack(net,
638 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
640 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
642 /* We're not in hash table, and we refuse to set up related
643 * connections for unconfirmed conns. But packet copies and
644 * REJECT will give spurious warnings here.
646 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
648 /* No external references means no one else could have
651 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
652 pr_debug("Confirming conntrack %p\n", ct);
653 /* We have to check the DYING flag after unlink to prevent
654 * a race against nf_ct_get_next_corpse() possibly called from
655 * user context, else we insert an already 'dead' hash, blocking
656 * further use of that particular connection -JM.
658 nf_ct_del_from_dying_or_unconfirmed_list(ct);
660 if (unlikely(nf_ct_is_dying(ct)))
663 /* See if there's one in the list already, including reverse:
664 NAT could have grabbed it without realizing, since we're
665 not in the hash. If there is, we lost race. */
666 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
667 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
671 hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
672 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
676 /* Timer relative to confirmation time, not original
677 setting time, otherwise we'd get timer wrap in
678 weird delay cases. */
679 ct->timeout.expires += jiffies;
680 add_timer(&ct->timeout);
681 atomic_inc(&ct->ct_general.use);
682 ct->status |= IPS_CONFIRMED;
684 /* set conntrack timestamp, if enabled. */
685 tstamp = nf_conn_tstamp_find(ct);
687 if (skb->tstamp.tv64 == 0)
688 __net_timestamp(skb);
690 tstamp->start = ktime_to_ns(skb->tstamp);
692 /* Since the lookup is lockless, hash insertion must be done after
693 * starting the timer and setting the CONFIRMED bit. The RCU barriers
694 * guarantee that no other CPU can find the conntrack before the above
695 * stores are visible.
697 __nf_conntrack_hash_insert(ct, hash, reply_hash);
698 nf_conntrack_double_unlock(hash, reply_hash);
699 NF_CT_STAT_INC(net, insert);
702 help = nfct_help(ct);
703 if (help && help->helper)
704 nf_conntrack_event_cache(IPCT_HELPER, ct);
706 nf_conntrack_event_cache(master_ct(ct) ?
707 IPCT_RELATED : IPCT_NEW, ct);
711 nf_ct_add_to_dying_list(ct);
712 nf_conntrack_double_unlock(hash, reply_hash);
713 NF_CT_STAT_INC(net, insert_failed);
717 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
719 /* Returns true if a connection correspondings to the tuple (required
722 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
723 const struct nf_conn *ignored_conntrack)
725 struct net *net = nf_ct_net(ignored_conntrack);
726 const struct nf_conntrack_zone *zone;
727 struct nf_conntrack_tuple_hash *h;
728 struct hlist_nulls_head *ct_hash;
729 unsigned int hash, sequence;
730 struct hlist_nulls_node *n;
733 zone = nf_ct_zone(ignored_conntrack);
737 sequence = read_seqcount_begin(&nf_conntrack_generation);
738 hash = hash_conntrack(net, tuple);
739 ct_hash = net->ct.hash;
740 } while (read_seqcount_retry(&nf_conntrack_generation, sequence));
742 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
743 ct = nf_ct_tuplehash_to_ctrack(h);
744 if (ct != ignored_conntrack &&
745 nf_ct_key_equal(h, tuple, zone, net)) {
746 NF_CT_STAT_INC_ATOMIC(net, found);
750 NF_CT_STAT_INC_ATOMIC(net, searched);
756 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
758 #define NF_CT_EVICTION_RANGE 8
760 /* There's a small race here where we may free a just-assured
761 connection. Too bad: we're in trouble anyway. */
762 static noinline int early_drop(struct net *net, unsigned int _hash)
764 /* Use oldest entry, which is roughly LRU */
765 struct nf_conntrack_tuple_hash *h;
766 struct nf_conn *ct = NULL, *tmp;
767 struct hlist_nulls_node *n;
768 unsigned int i = 0, cnt = 0;
770 unsigned int hash, sequence;
775 sequence = read_seqcount_begin(&nf_conntrack_generation);
776 hash = hash_bucket(_hash, net);
777 for (; i < net->ct.htable_size; i++) {
778 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
779 nf_conntrack_lock(lockp);
780 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
784 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
786 tmp = nf_ct_tuplehash_to_ctrack(h);
787 if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
788 !nf_ct_is_dying(tmp) &&
789 atomic_inc_not_zero(&tmp->ct_general.use)) {
796 hash = (hash + 1) % net->ct.htable_size;
799 if (ct || cnt >= NF_CT_EVICTION_RANGE)
808 if (del_timer(&ct->timeout)) {
809 if (nf_ct_delete(ct, 0, 0)) {
811 NF_CT_STAT_INC_ATOMIC(net, early_drop);
818 static struct nf_conn *
819 __nf_conntrack_alloc(struct net *net,
820 const struct nf_conntrack_zone *zone,
821 const struct nf_conntrack_tuple *orig,
822 const struct nf_conntrack_tuple *repl,
827 /* We don't want any race condition at early drop stage */
828 atomic_inc(&net->ct.count);
830 if (nf_conntrack_max &&
831 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
832 if (!early_drop(net, hash)) {
833 atomic_dec(&net->ct.count);
834 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
835 return ERR_PTR(-ENOMEM);
840 * Do not use kmem_cache_zalloc(), as this cache uses
841 * SLAB_DESTROY_BY_RCU.
843 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
847 spin_lock_init(&ct->lock);
848 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
849 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
850 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
851 /* save hash for reusing when confirming */
852 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
854 /* Don't set timer yet: wait for confirmation */
855 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
856 write_pnet(&ct->ct_net, net);
857 memset(&ct->__nfct_init_offset[0], 0,
858 offsetof(struct nf_conn, proto) -
859 offsetof(struct nf_conn, __nfct_init_offset[0]));
861 if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
864 /* Because we use RCU lookups, we set ct_general.use to zero before
865 * this is inserted in any list.
867 atomic_set(&ct->ct_general.use, 0);
870 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
872 atomic_dec(&net->ct.count);
873 return ERR_PTR(-ENOMEM);
876 struct nf_conn *nf_conntrack_alloc(struct net *net,
877 const struct nf_conntrack_zone *zone,
878 const struct nf_conntrack_tuple *orig,
879 const struct nf_conntrack_tuple *repl,
882 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
884 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
886 void nf_conntrack_free(struct nf_conn *ct)
888 struct net *net = nf_ct_net(ct);
890 /* A freed object has refcnt == 0, that's
891 * the golden rule for SLAB_DESTROY_BY_RCU
893 NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
895 nf_ct_ext_destroy(ct);
897 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
898 smp_mb__before_atomic();
899 atomic_dec(&net->ct.count);
901 EXPORT_SYMBOL_GPL(nf_conntrack_free);
904 /* Allocate a new conntrack: we return -ENOMEM if classification
905 failed due to stress. Otherwise it really is unclassifiable. */
906 static struct nf_conntrack_tuple_hash *
907 init_conntrack(struct net *net, struct nf_conn *tmpl,
908 const struct nf_conntrack_tuple *tuple,
909 struct nf_conntrack_l3proto *l3proto,
910 struct nf_conntrack_l4proto *l4proto,
912 unsigned int dataoff, u32 hash)
915 struct nf_conn_help *help;
916 struct nf_conntrack_tuple repl_tuple;
917 struct nf_conntrack_ecache *ecache;
918 struct nf_conntrack_expect *exp = NULL;
919 const struct nf_conntrack_zone *zone;
920 struct nf_conn_timeout *timeout_ext;
921 struct nf_conntrack_zone tmp;
922 unsigned int *timeouts;
924 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
925 pr_debug("Can't invert tuple.\n");
929 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
930 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
933 return (struct nf_conntrack_tuple_hash *)ct;
935 if (tmpl && nfct_synproxy(tmpl)) {
936 nfct_seqadj_ext_add(ct);
937 nfct_synproxy_ext_add(ct);
940 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
942 timeouts = nf_ct_timeout_data(timeout_ext);
943 if (unlikely(!timeouts))
944 timeouts = l4proto->get_timeouts(net);
946 timeouts = l4proto->get_timeouts(net);
949 if (!l4proto->new(ct, skb, dataoff, timeouts)) {
950 nf_conntrack_free(ct);
951 pr_debug("can't track with proto module\n");
956 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
959 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
960 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
961 nf_ct_labels_ext_add(ct);
963 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
964 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
965 ecache ? ecache->expmask : 0,
969 if (net->ct.expect_count) {
970 spin_lock(&nf_conntrack_expect_lock);
971 exp = nf_ct_find_expectation(net, zone, tuple);
973 pr_debug("expectation arrives ct=%p exp=%p\n",
975 /* Welcome, Mr. Bond. We've been expecting you... */
976 __set_bit(IPS_EXPECTED_BIT, &ct->status);
977 /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
978 ct->master = exp->master;
980 help = nf_ct_helper_ext_add(ct, exp->helper,
983 rcu_assign_pointer(help->helper, exp->helper);
986 #ifdef CONFIG_NF_CONNTRACK_MARK
987 ct->mark = exp->master->mark;
989 #ifdef CONFIG_NF_CONNTRACK_SECMARK
990 ct->secmark = exp->master->secmark;
992 NF_CT_STAT_INC(net, expect_new);
994 spin_unlock(&nf_conntrack_expect_lock);
997 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
998 NF_CT_STAT_INC(net, new);
1001 /* Now it is inserted into the unconfirmed list, bump refcount */
1002 nf_conntrack_get(&ct->ct_general);
1003 nf_ct_add_to_unconfirmed_list(ct);
1009 exp->expectfn(ct, exp);
1010 nf_ct_expect_put(exp);
1013 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1016 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1017 static inline struct nf_conn *
1018 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1019 struct sk_buff *skb,
1020 unsigned int dataoff,
1023 struct nf_conntrack_l3proto *l3proto,
1024 struct nf_conntrack_l4proto *l4proto,
1026 enum ip_conntrack_info *ctinfo)
1028 const struct nf_conntrack_zone *zone;
1029 struct nf_conntrack_tuple tuple;
1030 struct nf_conntrack_tuple_hash *h;
1031 struct nf_conntrack_zone tmp;
1035 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1036 dataoff, l3num, protonum, net, &tuple, l3proto,
1038 pr_debug("Can't get tuple\n");
1042 /* look for tuple match */
1043 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1044 hash = hash_conntrack_raw(&tuple);
1045 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1047 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1048 skb, dataoff, hash);
1054 ct = nf_ct_tuplehash_to_ctrack(h);
1056 /* It exists; we have (non-exclusive) reference. */
1057 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1058 *ctinfo = IP_CT_ESTABLISHED_REPLY;
1059 /* Please set reply bit if this packet OK */
1062 /* Once we've had two way comms, always ESTABLISHED. */
1063 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1064 pr_debug("normal packet for %p\n", ct);
1065 *ctinfo = IP_CT_ESTABLISHED;
1066 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1067 pr_debug("related packet for %p\n", ct);
1068 *ctinfo = IP_CT_RELATED;
1070 pr_debug("new packet for %p\n", ct);
1071 *ctinfo = IP_CT_NEW;
1075 skb->nfct = &ct->ct_general;
1076 skb->nfctinfo = *ctinfo;
1081 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1082 struct sk_buff *skb)
1084 struct nf_conn *ct, *tmpl = NULL;
1085 enum ip_conntrack_info ctinfo;
1086 struct nf_conntrack_l3proto *l3proto;
1087 struct nf_conntrack_l4proto *l4proto;
1088 unsigned int *timeouts;
1089 unsigned int dataoff;
1095 /* Previously seen (loopback or untracked)? Ignore. */
1096 tmpl = (struct nf_conn *)skb->nfct;
1097 if (!nf_ct_is_template(tmpl)) {
1098 NF_CT_STAT_INC_ATOMIC(net, ignore);
1104 /* rcu_read_lock()ed by nf_hook_slow */
1105 l3proto = __nf_ct_l3proto_find(pf);
1106 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1107 &dataoff, &protonum);
1109 pr_debug("not prepared to track yet or error occurred\n");
1110 NF_CT_STAT_INC_ATOMIC(net, error);
1111 NF_CT_STAT_INC_ATOMIC(net, invalid);
1116 l4proto = __nf_ct_l4proto_find(pf, protonum);
1118 /* It may be an special packet, error, unclean...
1119 * inverse of the return code tells to the netfilter
1120 * core what to do with the packet. */
1121 if (l4proto->error != NULL) {
1122 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1125 NF_CT_STAT_INC_ATOMIC(net, error);
1126 NF_CT_STAT_INC_ATOMIC(net, invalid);
1130 /* ICMP[v6] protocol trackers may assign one conntrack. */
1135 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1136 l3proto, l4proto, &set_reply, &ctinfo);
1138 /* Not valid part of a connection */
1139 NF_CT_STAT_INC_ATOMIC(net, invalid);
1145 /* Too stressed to deal. */
1146 NF_CT_STAT_INC_ATOMIC(net, drop);
1151 NF_CT_ASSERT(skb->nfct);
1153 /* Decide what timeout policy we want to apply to this flow. */
1154 timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1156 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1158 /* Invalid: inverse of the return code tells
1159 * the netfilter core what to do */
1160 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1161 nf_conntrack_put(skb->nfct);
1163 NF_CT_STAT_INC_ATOMIC(net, invalid);
1164 if (ret == -NF_DROP)
1165 NF_CT_STAT_INC_ATOMIC(net, drop);
1170 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1171 nf_conntrack_event_cache(IPCT_REPLY, ct);
1174 /* Special case: we have to repeat this hook, assign the
1175 * template again to this packet. We assume that this packet
1176 * has no conntrack assigned. This is used by nf_ct_tcp. */
1177 if (ret == NF_REPEAT)
1178 skb->nfct = (struct nf_conntrack *)tmpl;
1185 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1187 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1188 const struct nf_conntrack_tuple *orig)
1193 ret = nf_ct_invert_tuple(inverse, orig,
1194 __nf_ct_l3proto_find(orig->src.l3num),
1195 __nf_ct_l4proto_find(orig->src.l3num,
1196 orig->dst.protonum));
1200 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1202 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1203 implicitly racy: see __nf_conntrack_confirm */
1204 void nf_conntrack_alter_reply(struct nf_conn *ct,
1205 const struct nf_conntrack_tuple *newreply)
1207 struct nf_conn_help *help = nfct_help(ct);
1209 /* Should be unconfirmed, so not in hash table yet */
1210 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1212 pr_debug("Altering reply tuple of %p to ", ct);
1213 nf_ct_dump_tuple(newreply);
1215 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1216 if (ct->master || (help && !hlist_empty(&help->expectations)))
1220 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1223 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1225 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1226 void __nf_ct_refresh_acct(struct nf_conn *ct,
1227 enum ip_conntrack_info ctinfo,
1228 const struct sk_buff *skb,
1229 unsigned long extra_jiffies,
1232 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1235 /* Only update if this is not a fixed timeout */
1236 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1239 /* If not in hash table, timer will not be active yet */
1240 if (!nf_ct_is_confirmed(ct)) {
1241 ct->timeout.expires = extra_jiffies;
1243 unsigned long newtime = jiffies + extra_jiffies;
1245 /* Only update the timeout if the new timeout is at least
1246 HZ jiffies from the old timeout. Need del_timer for race
1247 avoidance (may already be dying). */
1248 if (newtime - ct->timeout.expires >= HZ)
1249 mod_timer_pending(&ct->timeout, newtime);
1254 struct nf_conn_acct *acct;
1256 acct = nf_conn_acct_find(ct);
1258 struct nf_conn_counter *counter = acct->counter;
1260 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1261 atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
1265 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1267 bool __nf_ct_kill_acct(struct nf_conn *ct,
1268 enum ip_conntrack_info ctinfo,
1269 const struct sk_buff *skb,
1273 struct nf_conn_acct *acct;
1275 acct = nf_conn_acct_find(ct);
1277 struct nf_conn_counter *counter = acct->counter;
1279 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1280 atomic64_add(skb->len - skb_network_offset(skb),
1281 &counter[CTINFO2DIR(ctinfo)].bytes);
1285 if (del_timer(&ct->timeout)) {
1286 ct->timeout.function((unsigned long)ct);
1291 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1293 #ifdef CONFIG_NF_CONNTRACK_ZONES
1294 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1295 .len = sizeof(struct nf_conntrack_zone),
1296 .align = __alignof__(struct nf_conntrack_zone),
1297 .id = NF_CT_EXT_ZONE,
1301 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1303 #include <linux/netfilter/nfnetlink.h>
1304 #include <linux/netfilter/nfnetlink_conntrack.h>
1305 #include <linux/mutex.h>
1307 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1308 * in ip_conntrack_core, since we don't want the protocols to autoload
1309 * or depend on ctnetlink */
1310 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1311 const struct nf_conntrack_tuple *tuple)
1313 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1314 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1315 goto nla_put_failure;
1321 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1323 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1324 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1325 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1327 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1329 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1330 struct nf_conntrack_tuple *t)
1332 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1335 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1336 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1340 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1342 int nf_ct_port_nlattr_tuple_size(void)
1344 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1346 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1349 /* Used by ipt_REJECT and ip6t_REJECT. */
1350 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1353 enum ip_conntrack_info ctinfo;
1355 /* This ICMP is in reverse direction to the packet which caused it */
1356 ct = nf_ct_get(skb, &ctinfo);
1357 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1358 ctinfo = IP_CT_RELATED_REPLY;
1360 ctinfo = IP_CT_RELATED;
1362 /* Attach to new skbuff, and increment count */
1363 nskb->nfct = &ct->ct_general;
1364 nskb->nfctinfo = ctinfo;
1365 nf_conntrack_get(nskb->nfct);
1368 /* Bring out ya dead! */
1369 static struct nf_conn *
1370 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1371 void *data, unsigned int *bucket)
1373 struct nf_conntrack_tuple_hash *h;
1375 struct hlist_nulls_node *n;
1379 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1380 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1382 nf_conntrack_lock(lockp);
1383 if (*bucket < net->ct.htable_size) {
1384 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1385 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1387 ct = nf_ct_tuplehash_to_ctrack(h);
1388 if (net_eq(nf_ct_net(ct), net) &&
1398 for_each_possible_cpu(cpu) {
1399 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1401 spin_lock_bh(&pcpu->lock);
1402 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1403 ct = nf_ct_tuplehash_to_ctrack(h);
1405 set_bit(IPS_DYING_BIT, &ct->status);
1407 spin_unlock_bh(&pcpu->lock);
1412 atomic_inc(&ct->ct_general.use);
1418 void nf_ct_iterate_cleanup(struct net *net,
1419 int (*iter)(struct nf_conn *i, void *data),
1420 void *data, u32 portid, int report)
1423 unsigned int bucket = 0;
1427 if (atomic_read(&net->ct.count) == 0)
1430 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1431 /* Time to push up daises... */
1432 if (del_timer(&ct->timeout))
1433 nf_ct_delete(ct, portid, report);
1435 /* ... else the timer will get him soon. */
1441 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1443 static int kill_all(struct nf_conn *i, void *data)
1448 void nf_ct_free_hashtable(void *hash, unsigned int size)
1450 if (is_vmalloc_addr(hash))
1453 free_pages((unsigned long)hash,
1454 get_order(sizeof(struct hlist_head) * size));
1456 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1458 static int untrack_refs(void)
1462 for_each_possible_cpu(cpu) {
1463 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1465 cnt += atomic_read(&ct->ct_general.use) - 1;
1470 void nf_conntrack_cleanup_start(void)
1472 RCU_INIT_POINTER(ip_ct_attach, NULL);
1475 void nf_conntrack_cleanup_end(void)
1477 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1478 while (untrack_refs() > 0)
1481 #ifdef CONFIG_NF_CONNTRACK_ZONES
1482 nf_ct_extend_unregister(&nf_ct_zone_extend);
1484 nf_conntrack_proto_fini();
1485 nf_conntrack_seqadj_fini();
1486 nf_conntrack_labels_fini();
1487 nf_conntrack_helper_fini();
1488 nf_conntrack_timeout_fini();
1489 nf_conntrack_ecache_fini();
1490 nf_conntrack_tstamp_fini();
1491 nf_conntrack_acct_fini();
1492 nf_conntrack_expect_fini();
1496 * Mishearing the voices in his head, our hero wonders how he's
1497 * supposed to kill the mall.
1499 void nf_conntrack_cleanup_net(struct net *net)
1503 list_add(&net->exit_list, &single);
1504 nf_conntrack_cleanup_net_list(&single);
1507 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1513 * This makes sure all current packets have passed through
1514 * netfilter framework. Roll on, two-stage module
1520 list_for_each_entry(net, net_exit_list, exit_list) {
1521 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1522 if (atomic_read(&net->ct.count) != 0)
1527 goto i_see_dead_people;
1530 list_for_each_entry(net, net_exit_list, exit_list) {
1531 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1532 nf_conntrack_proto_pernet_fini(net);
1533 nf_conntrack_helper_pernet_fini(net);
1534 nf_conntrack_ecache_pernet_fini(net);
1535 nf_conntrack_tstamp_pernet_fini(net);
1536 nf_conntrack_acct_pernet_fini(net);
1537 nf_conntrack_expect_pernet_fini(net);
1538 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1539 kfree(net->ct.slabname);
1540 free_percpu(net->ct.stat);
1541 free_percpu(net->ct.pcpu_lists);
1545 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1547 struct hlist_nulls_head *hash;
1548 unsigned int nr_slots, i;
1551 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1552 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1553 sz = nr_slots * sizeof(struct hlist_nulls_head);
1554 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1560 for (i = 0; i < nr_slots; i++)
1561 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1565 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1567 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1570 unsigned int hashsize, old_size;
1571 struct hlist_nulls_head *hash, *old_hash;
1572 struct nf_conntrack_tuple_hash *h;
1575 if (current->nsproxy->net_ns != &init_net)
1578 /* On boot, we can set this without any fancy locking. */
1579 if (!nf_conntrack_htable_size)
1580 return param_set_uint(val, kp);
1582 rc = kstrtouint(val, 0, &hashsize);
1588 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1593 nf_conntrack_all_lock();
1594 write_seqcount_begin(&nf_conntrack_generation);
1596 /* Lookups in the old hash might happen in parallel, which means we
1597 * might get false negatives during connection lookup. New connections
1598 * created because of a false negative won't make it into the hash
1599 * though since that required taking the locks.
1602 for (i = 0; i < init_net.ct.htable_size; i++) {
1603 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1604 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1605 struct nf_conntrack_tuple_hash, hnnode);
1606 ct = nf_ct_tuplehash_to_ctrack(h);
1607 hlist_nulls_del_rcu(&h->hnnode);
1608 bucket = __hash_conntrack(&h->tuple, hashsize);
1609 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1612 old_size = init_net.ct.htable_size;
1613 old_hash = init_net.ct.hash;
1615 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1616 init_net.ct.hash = hash;
1618 write_seqcount_end(&nf_conntrack_generation);
1619 nf_conntrack_all_unlock();
1623 nf_ct_free_hashtable(old_hash, old_size);
1626 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1628 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1629 &nf_conntrack_htable_size, 0600);
1631 void nf_ct_untracked_status_or(unsigned long bits)
1635 for_each_possible_cpu(cpu)
1636 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1638 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1640 int nf_conntrack_init_start(void)
1645 seqcount_init(&nf_conntrack_generation);
1647 for (i = 0; i < CONNTRACK_LOCKS; i++)
1648 spin_lock_init(&nf_conntrack_locks[i]);
1650 if (!nf_conntrack_htable_size) {
1651 /* Idea from tcp.c: use 1/16384 of memory.
1652 * On i386: 32MB machine has 512 buckets.
1653 * >= 1GB machines have 16384 buckets.
1654 * >= 4GB machines have 65536 buckets.
1656 nf_conntrack_htable_size
1657 = (((totalram_pages << PAGE_SHIFT) / 16384)
1658 / sizeof(struct hlist_head));
1659 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1660 nf_conntrack_htable_size = 65536;
1661 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1662 nf_conntrack_htable_size = 16384;
1663 if (nf_conntrack_htable_size < 32)
1664 nf_conntrack_htable_size = 32;
1666 /* Use a max. factor of four by default to get the same max as
1667 * with the old struct list_heads. When a table size is given
1668 * we use the old value of 8 to avoid reducing the max.
1672 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1674 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1675 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1678 ret = nf_conntrack_expect_init();
1682 ret = nf_conntrack_acct_init();
1686 ret = nf_conntrack_tstamp_init();
1690 ret = nf_conntrack_ecache_init();
1694 ret = nf_conntrack_timeout_init();
1698 ret = nf_conntrack_helper_init();
1702 ret = nf_conntrack_labels_init();
1706 ret = nf_conntrack_seqadj_init();
1710 #ifdef CONFIG_NF_CONNTRACK_ZONES
1711 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1715 ret = nf_conntrack_proto_init();
1719 /* Set up fake conntrack: to never be deleted, not in any hashes */
1720 for_each_possible_cpu(cpu) {
1721 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1722 write_pnet(&ct->ct_net, &init_net);
1723 atomic_set(&ct->ct_general.use, 1);
1725 /* - and look it like as a confirmed connection */
1726 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1730 #ifdef CONFIG_NF_CONNTRACK_ZONES
1731 nf_ct_extend_unregister(&nf_ct_zone_extend);
1734 nf_conntrack_seqadj_fini();
1736 nf_conntrack_labels_fini();
1738 nf_conntrack_helper_fini();
1740 nf_conntrack_timeout_fini();
1742 nf_conntrack_ecache_fini();
1744 nf_conntrack_tstamp_fini();
1746 nf_conntrack_acct_fini();
1748 nf_conntrack_expect_fini();
1753 void nf_conntrack_init_end(void)
1755 /* For use by REJECT target */
1756 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1757 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1761 * We need to use special "null" values, not used in hash table
1763 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1764 #define DYING_NULLS_VAL ((1<<30)+1)
1765 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
1767 int nf_conntrack_init_net(struct net *net)
1772 atomic_set(&net->ct.count, 0);
1774 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1775 if (!net->ct.pcpu_lists)
1778 for_each_possible_cpu(cpu) {
1779 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1781 spin_lock_init(&pcpu->lock);
1782 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1783 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1786 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1788 goto err_pcpu_lists;
1790 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1791 if (!net->ct.slabname)
1794 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1795 sizeof(struct nf_conn), 0,
1796 SLAB_DESTROY_BY_RCU, NULL);
1797 if (!net->ct.nf_conntrack_cachep) {
1798 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1802 net->ct.htable_size = nf_conntrack_htable_size;
1803 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1804 if (!net->ct.hash) {
1805 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1808 ret = nf_conntrack_expect_pernet_init(net);
1811 ret = nf_conntrack_acct_pernet_init(net);
1814 ret = nf_conntrack_tstamp_pernet_init(net);
1817 ret = nf_conntrack_ecache_pernet_init(net);
1820 ret = nf_conntrack_helper_pernet_init(net);
1823 ret = nf_conntrack_proto_pernet_init(net);
1829 nf_conntrack_helper_pernet_fini(net);
1831 nf_conntrack_ecache_pernet_fini(net);
1833 nf_conntrack_tstamp_pernet_fini(net);
1835 nf_conntrack_acct_pernet_fini(net);
1837 nf_conntrack_expect_pernet_fini(net);
1839 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1841 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1843 kfree(net->ct.slabname);
1845 free_percpu(net->ct.stat);
1847 free_percpu(net->ct.pcpu_lists);