1 #ifndef __NET_IP_TUNNELS_WRAPPER_H
2 #define __NET_IP_TUNNELS_WRAPPER_H 1
4 #include <linux/version.h>
6 #ifdef USE_UPSTREAM_TUNNEL
7 /* Block all ip_tunnel functions.
8 * Only function that do not depend on ip_tunnel structure can
9 * be used. Those needs to be explicitly defined in this header file. */
10 #include_next <net/ip_tunnels.h>
13 #include <linux/if_tunnel.h>
14 #include <linux/types.h>
15 #include <net/dsfield.h>
16 #include <net/dst_cache.h>
18 #include <net/inet_ecn.h>
20 #include <net/rtnetlink.h>
22 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
23 #define __iptunnel_pull_header rpl___iptunnel_pull_header
24 int rpl___iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
25 __be16 inner_proto, bool raw_proto, bool xnet);
27 #define iptunnel_pull_header rpl_iptunnel_pull_header
28 static inline int rpl_iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
29 __be16 inner_proto, bool xnet)
31 return rpl___iptunnel_pull_header(skb, hdr_len, inner_proto, false, xnet);
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
36 int ovs_iptunnel_handle_offloads(struct sk_buff *skb,
37 bool csum_help, int gso_type_mask,
38 void (*fix_segment)(struct sk_buff *));
40 /* This is is required to compile upstream gre.h. gre_handle_offloads()
41 * is defined in gre.h and needs iptunnel_handle_offloads(). This provides
42 * default signature for this function.
43 * rpl prefix is to make OVS build happy.
45 #define iptunnel_handle_offloads rpl_iptunnel_handle_offloads
46 struct sk_buff *rpl_iptunnel_handle_offloads(struct sk_buff *skb,
51 #define ovs_iptunnel_handle_offloads(skb, csum_help, gso_type_mask, fix_segment) \
52 iptunnel_handle_offloads(skb, gso_type_mask)
55 #define iptunnel_xmit rpl_iptunnel_xmit
56 void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
57 __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl,
58 __be16 df, bool xnet);
61 #define TUNNEL_CSUM __cpu_to_be16(0x01)
62 #define TUNNEL_ROUTING __cpu_to_be16(0x02)
63 #define TUNNEL_KEY __cpu_to_be16(0x04)
64 #define TUNNEL_SEQ __cpu_to_be16(0x08)
65 #define TUNNEL_STRICT __cpu_to_be16(0x10)
66 #define TUNNEL_REC __cpu_to_be16(0x20)
67 #define TUNNEL_VERSION __cpu_to_be16(0x40)
68 #define TUNNEL_NO_KEY __cpu_to_be16(0x80)
78 #define PACKET_REJECT 1
81 #ifndef TUNNEL_DONT_FRAGMENT
82 #define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
86 #define TUNNEL_OAM __cpu_to_be16(0x0200)
87 #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
90 #ifndef TUNNEL_GENEVE_OPT
91 #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
94 #ifndef TUNNEL_VXLAN_OPT
95 #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
98 /* Older kernels defined TUNNEL_OPTIONS_PRESENT to GENEVE only */
99 #undef TUNNEL_OPTIONS_PRESENT
100 #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT)
102 #define skb_is_encapsulated ovs_skb_is_encapsulated
103 bool ovs_skb_is_encapsulated(struct sk_buff *skb);
105 #ifndef USE_UPSTREAM_TUNNEL
106 /* Used to memset ip_tunnel padding. */
107 #define IP_TUNNEL_KEY_SIZE offsetofend(struct ip_tunnel_key, tp_dst)
109 /* Used to memset ipv4 address padding. */
110 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
111 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \
112 (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
113 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
115 struct ip_tunnel_key {
128 u8 tos; /* TOS for IPv4, TC for IPv6 */
129 u8 ttl; /* TTL for IPv4, HL for IPv6 */
130 __be32 label; /* Flow Label for IPv6 */
135 /* Flags for ip_tunnel_info mode. */
136 #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
137 #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
139 struct ip_tunnel_info {
140 struct ip_tunnel_key key;
141 struct dst_cache dst_cache;
146 static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info)
148 return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
151 static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
156 static inline void ip_tunnel_info_opts_get(void *to,
157 const struct ip_tunnel_info *info)
159 memcpy(to, info + 1, info->options_len);
162 static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
163 const void *from, int len)
165 memcpy(ip_tunnel_info_opts(info), from, len);
166 info->options_len = len;
169 static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
170 __be32 saddr, __be32 daddr,
171 u8 tos, u8 ttl, __be32 label,
172 __be16 tp_src, __be16 tp_dst,
173 __be64 tun_id, __be16 tun_flags)
175 key->tun_id = tun_id;
176 key->u.ipv4.src = saddr;
177 key->u.ipv4.dst = daddr;
178 memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
179 0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
183 key->tun_flags = tun_flags;
185 /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
186 * the upper tunnel are used.
187 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
189 key->tp_src = tp_src;
190 key->tp_dst = tp_dst;
192 /* Clear struct padding. */
193 if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
194 memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
195 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
198 #define ip_tunnel_collect_metadata() true
200 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
201 #define TUNNEL_NOCACHE 0
204 ip_tunnel_dst_cache_usable(const struct sk_buff *skb,
205 const struct ip_tunnel_info *info)
211 if (info->key.tun_flags & TUNNEL_NOCACHE)
218 #define ip_tunnel rpl_ip_tunnel
221 struct net_device *dev;
222 struct net *net; /* netns for packet i/o */
224 int err_count; /* Number of arrived ICMP errors */
225 unsigned long err_time; /* Time when the last ICMP error
229 /* These four fields used only by GRE */
230 u32 i_seqno; /* The last seen seqno */
231 u32 o_seqno; /* The last output seqno */
232 int tun_hlen; /* Precalculated header length */
235 struct ip_tunnel_parm parms;
237 int encap_hlen; /* Encap header length (FOU,GUE) */
238 int hlen; /* tun_hlen + encap_hlen */
244 #define ip_tunnel_net rpl_ip_tunnel_net
245 struct ip_tunnel_net {
246 struct ip_tunnel __rcu *collect_md_tun;
247 struct rtnl_link_ops *rtnl_ops;
251 #ifndef HAVE_PCPU_SW_NETSTATS
252 #define ip_tunnel_get_stats64 rpl_ip_tunnel_get_stats64
254 #define rpl_ip_tunnel_get_stats64 ip_tunnel_get_stats64
256 struct rtnl_link_stats64 *rpl_ip_tunnel_get_stats64(struct net_device *dev,
257 struct rtnl_link_stats64 *tot);
259 #define ip_tunnel_get_dsfield rpl_ip_tunnel_get_dsfield
260 static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
261 const struct sk_buff *skb)
263 if (skb->protocol == htons(ETH_P_IP))
265 else if (skb->protocol == htons(ETH_P_IPV6))
266 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
271 #define ip_tunnel_ecn_encap rpl_ip_tunnel_ecn_encap
272 static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
273 const struct sk_buff *skb)
275 u8 inner = ip_tunnel_get_dsfield(iph, skb);
277 return INET_ECN_encapsulate(tos, inner);
280 static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
283 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
285 u64_stats_update_begin(&tstats->syncp);
286 tstats->tx_bytes += pkt_len;
287 tstats->tx_packets++;
288 u64_stats_update_end(&tstats->syncp);
291 struct net_device_stats *err_stats = &dev->stats;
294 err_stats->tx_errors++;
295 err_stats->tx_aborted_errors++;
297 err_stats->tx_dropped++;
302 #define ip_tunnel_init rpl_ip_tunnel_init
303 int rpl_ip_tunnel_init(struct net_device *dev);
305 #define ip_tunnel_uninit rpl_ip_tunnel_uninit
306 void rpl_ip_tunnel_uninit(struct net_device *dev);
308 #define ip_tunnel_change_mtu rpl_ip_tunnel_change_mtu
309 int rpl_ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
311 #define ip_tunnel_newlink rpl_ip_tunnel_newlink
312 int rpl_ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
313 struct ip_tunnel_parm *p);
315 #define ip_tunnel_dellink rpl_ip_tunnel_dellink
316 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
317 void rpl_ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
319 void rpl_ip_tunnel_dellink(struct net_device *dev);
322 #define ip_tunnel_init_net rpl_ip_tunnel_init_net
323 int rpl_ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
324 struct rtnl_link_ops *ops, char *devname);
326 #define ip_tunnel_delete_net rpl_ip_tunnel_delete_net
327 void rpl_ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
329 #define ip_tunnel_setup rpl_ip_tunnel_setup
330 void rpl_ip_tunnel_setup(struct net_device *dev, int net_id);
332 #define ip_tunnel_get_iflink rpl_ip_tunnel_get_iflink
333 int rpl_ip_tunnel_get_iflink(const struct net_device *dev);
335 #define ip_tunnel_get_link_net rpl_ip_tunnel_get_link_net
336 struct net *rpl_ip_tunnel_get_link_net(const struct net_device *dev);
337 #endif /* USE_UPSTREAM_TUNNEL */
339 #ifndef HAVE___IP_TUNNEL_CHANGE_MTU
340 #define __ip_tunnel_change_mtu rpl___ip_tunnel_change_mtu
341 int rpl___ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
344 static inline int iptunnel_pull_offloads(struct sk_buff *skb)
346 if (skb_is_gso(skb)) {
349 err = skb_unclone(skb, GFP_ATOMIC);
352 skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
356 skb->encapsulation = 0;
359 #endif /* __NET_IP_TUNNELS_H */