1 #ifndef __NET_IP_TUNNELS_WRAPPER_H
2 #define __NET_IP_TUNNELS_WRAPPER_H 1
4 #include <linux/version.h>
6 #ifdef HAVE_METADATA_DST
7 /* Block all ip_tunnel functions.
8 * Only function that do not depend on ip_tunnel structure can
9 * be used. Those needs to be explicitly defined in this header file. */
10 #include_next <net/ip_tunnels.h>
13 #include <linux/if_tunnel.h>
14 #include <linux/types.h>
15 #include <net/dsfield.h>
17 #include <net/inet_ecn.h>
19 #include <net/rtnetlink.h>
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
22 #define __iptunnel_pull_header rpl___iptunnel_pull_header
23 int rpl___iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
24 __be16 inner_proto, bool raw_proto, bool xnet);
26 #define iptunnel_pull_header rpl_iptunnel_pull_header
27 static inline int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
28 __be16 inner_proto, bool xnet)
30 return rpl___iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
34 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
35 struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
36 bool csum_help, int gso_type_mask,
37 void (*fix_segment)(struct sk_buff *));
39 #define iptunnel_xmit rpl_iptunnel_xmit
40 int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
41 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
42 __be16 df, bool xnet);
46 #define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
47 iptunnel_handle_offloads(skb, csum_help, gso_type_mask)
49 #define rpl_iptunnel_xmit iptunnel_xmit
50 int rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
51 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
52 __be16 df, bool xnet);
56 /* This is not required for OVS on kernel older than 3.18, but gre.h
57 * header file needs this declaration for function gre_handle_offloads().
58 * So it is defined for all kernel version.
60 #define rpl_iptunnel_handle_offloads iptunnel_handle_offloads
61 struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
65 #define TUNNEL_CSUM __cpu_to_be16(0x01)
66 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
67 #define TUNNEL_KEY __cpu_to_be16(0x04)
68 #define TUNNEL_SEQ __cpu_to_be16(0x08)
69 #define TUNNEL_STRICT __cpu_to_be16(0x10)
70 #define TUNNEL_REC __cpu_to_be16(0x20)
71 #define TUNNEL_VERSION __cpu_to_be16(0x40)
72 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
82 #define PACKET_REJECT 1
85 #ifndef TUNNEL_DONT_FRAGMENT
86 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
90 #define TUNNEL_OAM __cpu_to_be16(0x0200)
91 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
94 #ifndef TUNNEL_GENEVE_OPT
95 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
98 #ifndef TUNNEL_VXLAN_OPT
99 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
102 /* Older kernels defined TUNNEL_OPTIONS_PRESENT to GENEVE only */
103 #undef TUNNEL_OPTIONS_PRESENT
104 #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
106 #define skb_is_encapsulated ovs_skb_is_encapsulated
107 bool ovs_skb_is_encapsulated(struct sk_buff *skb);
109 #ifndef HAVE_METADATA_DST
110 /* Used to memset ip_tunnel padding. */
111 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
113 /* Used to memset ipv4 address padding. */
114 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
115 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
116 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
117 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
119 struct ip_tunnel_key {
132 u8 tos; /* TOS for IPv4, TC for IPv6 */
133 u8 ttl; /* TTL for IPv4, HL for IPv6 */
138 /* Flags for ip_tunnel_info mode. */
139 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
140 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
142 struct ip_tunnel_info {
143 struct ip_tunnel_key key;
148 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info)
150 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
153 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
158 static inline void ip_tunnel_info_opts_get(void *to,
159 const struct ip_tunnel_info *info)
161 memcpy(to, info + 1, info->options_len);
164 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
165 const void *from, int len)
167 memcpy(ip_tunnel_info_opts(info), from, len);
168 info->options_len = len;
171 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
172 __be32 saddr, __be32 daddr,
174 __be16 tp_src, __be16 tp_dst,
175 __be64 tun_id, __be16 tun_flags)
177 key->tun_id = tun_id;
178 key->u.ipv4.src = saddr;
179 key->u.ipv4.dst = daddr;
180 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
181 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
184 key->tun_flags = tun_flags;
186 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
187 * the upper tunnel are used.
188 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
190 key->tp_src = tp_src;
191 key->tp_dst = tp_dst;
193 /* Clear struct padding. */
194 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
195 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
196 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
199 #define ip_tunnel_collect_metadata() true
202 #define ip_tunnel rpl_ip_tunnel
205 struct net_device *dev;
206 struct net *net; /* netns for packet i/o */
208 int err_count; /* Number of arrived ICMP errors */
209 unsigned long err_time; /* Time when the last ICMP error
213 /* These four fields used only by GRE */
214 u32 i_seqno; /* The last seen seqno */
215 u32 o_seqno; /* The last output seqno */
216 int tun_hlen; /* Precalculated header length */
219 struct ip_tunnel_parm parms;
221 int encap_hlen; /* Encap header length (FOU,GUE) */
222 int hlen; /* tun_hlen + encap_hlen */
228 #define ip_tunnel_net rpl_ip_tunnel_net
229 struct ip_tunnel_net {
230 struct ip_tunnel __rcu *collect_md_tun;
231 struct rtnl_link_ops *rtnl_ops;
235 #ifndef HAVE_PCPU_SW_NETSTATS
236 #define ip_tunnel_get_stats64 rpl_ip_tunnel_get_stats64
238 #define rpl_ip_tunnel_get_stats64 ip_tunnel_get_stats64
240 struct rtnl_link_stats64 *rpl_ip_tunnel_get_stats64(struct net_device *dev,
241 struct rtnl_link_stats64 *tot);
243 #define ip_tunnel_get_dsfield rpl_ip_tunnel_get_dsfield
244 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
245 const struct sk_buff *skb)
247 if (skb->protocol == htons(ETH_P_IP))
249 else if (skb->protocol == htons(ETH_P_IPV6))
250 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
255 #define ip_tunnel_ecn_encap rpl_ip_tunnel_ecn_encap
256 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
257 const struct sk_buff *skb)
259 u8 inner = ip_tunnel_get_dsfield(iph, skb);
261 return INET_ECN_encapsulate(tos, inner);
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
265 #define iptunnel_xmit_stats(err, stats, dummy) \
268 (stats)->tx_bytes += err; \
269 (stats)->tx_packets++; \
270 } else if (err < 0) { \
271 (stats)->tx_errors++; \
272 (stats)->tx_aborted_errors++; \
274 (stats)->tx_dropped++; \
279 #define iptunnel_xmit_stats rpl_iptunnel_xmit_stats
280 static inline void iptunnel_xmit_stats(int err,
281 struct net_device_stats *err_stats,
282 struct pcpu_sw_netstats __percpu *stats)
285 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
287 u64_stats_update_begin(&tstats->syncp);
288 tstats->tx_bytes += err;
289 tstats->tx_packets++;
290 u64_stats_update_end(&tstats->syncp);
291 } else if (err < 0) {
292 err_stats->tx_errors++;
293 err_stats->tx_aborted_errors++;
295 err_stats->tx_dropped++;
300 #define ip_tunnel_init rpl_ip_tunnel_init
301 int rpl_ip_tunnel_init(struct net_device *dev);
303 #define ip_tunnel_uninit rpl_ip_tunnel_uninit
304 void rpl_ip_tunnel_uninit(struct net_device *dev);
306 #define ip_tunnel_change_mtu rpl_ip_tunnel_change_mtu
307 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
309 #define ip_tunnel_newlink rpl_ip_tunnel_newlink
310 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
311 struct ip_tunnel_parm *p);
313 #define ip_tunnel_dellink rpl_ip_tunnel_dellink
314 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
315 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
317 void rpl_ip_tunnel_dellink(struct net_device *dev);
320 #define ip_tunnel_init_net rpl_ip_tunnel_init_net
321 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
322 struct rtnl_link_ops *ops, char *devname);
324 #define ip_tunnel_delete_net rpl_ip_tunnel_delete_net
325 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
327 #define ip_tunnel_setup rpl_ip_tunnel_setup
328 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id);
330 #define ip_tunnel_get_iflink rpl_ip_tunnel_get_iflink
331 int rpl_ip_tunnel_get_iflink(const struct net_device *dev);
333 #define ip_tunnel_get_link_net rpl_ip_tunnel_get_link_net
334 struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev);
335 #endif /* HAVE_METADATA_DST */
337 #ifndef HAVE___IP_TUNNEL_CHANGE_MTU
338 #define __ip_tunnel_change_mtu rpl___ip_tunnel_change_mtu
339 int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
342 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
344 if (skb_is_gso(skb)) {
347 err = skb_unclone(skb, GFP_ATOMIC);
350 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
354 skb->encapsulation = 0;
357 #endif /* __NET_IP_TUNNELS_H */