1 #ifndef __NET_IP_WRAPPER_H
2 #define __NET_IP_WRAPPER_H 1
4 #include_next <net/ip.h>
7 #include <linux/version.h>
9 #ifndef HAVE_INET_GET_LOCAL_PORT_RANGE_USING_NET
10 static inline void rpl_inet_get_local_port_range(struct net *net, int *low,
13 inet_get_local_port_range(low, high);
15 #define inet_get_local_port_range rpl_inet_get_local_port_range
19 #ifndef IPSKB_FRAG_PMTU
20 #define IPSKB_FRAG_PMTU BIT(6)
23 /* IPv4 datagram length is stored into 16bit field (tot_len) */
25 #define IP_MAX_MTU 0xFFFFU
28 #ifndef HAVE_IP_SKB_DST_MTU
29 static inline bool rpl_ip_sk_use_pmtu(const struct sock *sk)
31 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
33 #define ip_sk_use_pmtu rpl_ip_sk_use_pmtu
35 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
39 struct net *net = dev_net(dst->dev);
41 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
42 dst_metric_locked(dst, RTAX_MTU) ||
47 return min(dst->dev->mtu, IP_MAX_MTU);
50 static inline unsigned int rpl_ip_skb_dst_mtu(const struct sk_buff *skb)
52 if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
53 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
54 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
56 return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
59 #define ip_skb_dst_mtu rpl_ip_skb_dst_mtu
60 #endif /* HAVE_IP_SKB_DST_MTU */
62 #ifdef HAVE_IP_FRAGMENT_TAKES_SOCK
63 #define OVS_VPORT_OUTPUT_PARAMS struct sock *sock, struct sk_buff *skb
65 #define OVS_VPORT_OUTPUT_PARAMS struct sk_buff *skb
68 /* Prior to upstream commit d6b915e29f4a ("ip_fragment: don't forward
69 * defragmented DF packet"), IPCB(skb)->frag_max_size was not always populated
70 * correctly, which would lead to reassembled packets not being refragmented.
71 * So, we backport all of ip_defrag() in these cases.
73 #ifndef HAVE_CORRECT_MRU_HANDLING
75 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
76 static inline bool ip_defrag_user_in_between(u32 user,
77 enum ip_defrag_users lower_bond,
78 enum ip_defrag_users upper_bond)
80 return user >= lower_bond && user <= upper_bond;
84 int rpl_ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
85 int (*output)(OVS_VPORT_OUTPUT_PARAMS));
86 #define ip_do_fragment rpl_ip_do_fragment
88 /* If backporting IP defrag, then init/exit functions need to be called from
89 * compat_{in,ex}it() to prepare the backported fragmentation cache. In this
90 * case we declare the functions which are defined in
91 * datapath/linux/compat/ip_fragment.c. */
92 int rpl_ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
93 #define ip_defrag rpl_ip_defrag
94 int __init rpl_ipfrag_init(void);
95 void rpl_ipfrag_fini(void);
97 #else /* HAVE_CORRECT_MRU_HANDLING */
99 #ifndef HAVE_IP_DO_FRAGMENT_TAKES_NET
100 static inline int rpl_ip_do_fragment(struct net *net, struct sock *sk,
102 int (*output)(OVS_VPORT_OUTPUT_PARAMS))
104 return ip_do_fragment(sk, skb, output);
106 #define ip_do_fragment rpl_ip_do_fragment
107 #endif /* IP_DO_FRAGMENT_TAKES_NET */
109 /* We have no good way to detect the presence of upstream commit 8282f27449bf
110 * ("inet: frag: Always orphan skbs inside ip_defrag()"), but it should be
111 * always included in kernels 4.5+. */
112 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
113 static inline int rpl_ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
116 #ifndef HAVE_IP_DEFRAG_TAKES_NET
117 return ip_defrag(skb, user);
119 return ip_defrag(net, skb, user);
122 #define ip_defrag rpl_ip_defrag
125 /* If we can use upstream defrag then we can rely on the upstream
126 * defrag module to init/exit correctly. In this case the calls in
127 * compat_{in,ex}it() can be no-ops. */
128 static inline int rpl_ipfrag_init(void) { return 0; }
129 static inline void rpl_ipfrag_fini(void) { }
130 #endif /* HAVE_CORRECT_MRU_HANDLING */
132 #define ipfrag_init rpl_ipfrag_init
133 #define ipfrag_fini rpl_ipfrag_fini