[nf_ct_frag6_consume_orig])
OVS_GREP_IFELSE([$KSRC/include/net/netfilter/ipv6/nf_defrag_ipv6.h],
[nf_ct_frag6_output])
+ OVS_GREP_IFELSE([$KSRC/include/net/netfilter/nf_nat.h], [nf_ct_nat_ext_add])
+ OVS_GREP_IFELSE([$KSRC/include/net/netfilter/nf_nat.h], [nf_nat_alloc_null_binding])
+ OVS_GREP_IFELSE([$KSRC/include/net/netfilter/nf_conntrack_seqadj.h], [nf_ct_seq_adjust])
OVS_GREP_IFELSE([$KSRC/include/linux/random.h], [prandom_u32])
OVS_GREP_IFELSE([$KSRC/include/linux/random.h], [prandom_u32_max])
linux/compat/include/net/netfilter/nf_conntrack_core.h \
linux/compat/include/net/netfilter/nf_conntrack_expect.h \
linux/compat/include/net/netfilter/nf_conntrack_labels.h \
+ linux/compat/include/net/netfilter/nf_conntrack_seqadj.h \
linux/compat/include/net/netfilter/nf_conntrack_zones.h \
+ linux/compat/include/net/netfilter/nf_nat.h \
linux/compat/include/net/netfilter/ipv6/nf_defrag_ipv6.h \
linux/compat/include/net/sctp/checksum.h
EXTRA_DIST += linux/compat/build-aux/export-check-whitelist
kfree(tmpl);
}
#define nf_ct_tmpl_free rpl_nf_ct_tmpl_free
-#endif /* HAVE_NF_CT_TMPL_ALLOC */
+
+static inline struct nf_conntrack_tuple_hash *
+rpl_nf_conntrack_find_get(struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return nf_conntrack_find_get(net, zone->id, tuple);
+}
+#define nf_conntrack_find_get rpl_nf_conntrack_find_get
+#endif /* HAVE_NF_CT_TMPL_ALLOC_TAKES_STRUCT_ZONE */
+
+#ifndef HAVE_NF_CT_GET_TUPLEPR_TAKES_STRUCT_NET
+static inline bool rpl_nf_ct_get_tuple(const struct sk_buff *skb,
+ unsigned int nhoff,
+ unsigned int dataoff, u_int16_t l3num,
+ u_int8_t protonum,
+ struct net *net,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_l4proto *l4proto)
+{
+ return nf_ct_get_tuple(skb, nhoff, dataoff, l3num, protonum, tuple,
+ l3proto, l4proto);
+}
+#define nf_ct_get_tuple rpl_nf_ct_get_tuple
+#endif /* HAVE_NF_CT_GET_TUPLEPR_TAKES_STRUCT_NET */
+
#endif /* _NF_CONNTRACK_CORE_WRAPPER_H */
--- /dev/null
+#ifndef _NF_CONNTRACK_SEQADJ_WRAPPER_H
+#define _NF_CONNTRACK_SEQADJ_WRAPPER_H
+
+#ifdef HAVE_NF_CT_SEQ_ADJUST
+#include_next <net/netfilter/nf_conntrack_seqadj.h>
+#else
+
+#include <net/netfilter/nf_nat_helper.h>
+
+/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
+static inline int
+nf_ct_seq_adjust(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff)
+{
+ typeof(nf_nat_seq_adjust_hook) seq_adjust;
+
+ seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
+ if (!seq_adjust ||
+ !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif /* HAVE_NF_CT_SEQ_ADJUST */
+
+#endif /* _NF_CONNTRACK_SEQADJ_WRAPPER_H */
--- /dev/null
+#ifndef _NF_NAT_WRAPPER_H
+#define _NF_NAT_WRAPPER_H
+
+#include_next <net/netfilter/nf_nat.h>
+
+#ifndef HAVE_NF_CT_NAT_EXT_ADD
+
+static inline struct nf_conn_nat *
+nf_ct_nat_ext_add(struct nf_conn *ct)
+{
+ struct nf_conn_nat *nat = nfct_nat(ct);
+ if (nat)
+ return nat;
+
+ if (!nf_ct_is_confirmed(ct))
+ nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+
+ return nat;
+}
+#endif /* HAVE_NF_CT_NAT_EXT_ADD */
+
+#ifndef HAVE_NF_NAT_ALLOC_NULL_BINDING
+static inline unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+ /* Force range to this IP; let proto decide mapping for
+ * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+ * Use reply in case it's already been mangled (eg local packet).
+ */
+ union nf_inet_addr ip =
+ (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
+ struct nf_nat_range range = {
+ .flags = NF_NAT_RANGE_MAP_IPS,
+ .min_addr = ip,
+ .max_addr = ip,
+ };
+ return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+
+#endif /* HAVE_NF_NAT_ALLOC_NULL_BINDING */
+
+#endif /* _NF_NAT_WRAPPER_H */