datapath: Fix net exit.
[cascardo/ovs.git] / datapath / vport-gre.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #include <linux/kconfig.h>
20 #if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/if.h>
24 #include <linux/skbuff.h>
25 #include <linux/ip.h>
26 #include <linux/if_tunnel.h>
27 #include <linux/if_vlan.h>
28 #include <linux/in.h>
29 #include <linux/in_route.h>
30 #include <linux/inetdevice.h>
31 #include <linux/jhash.h>
32 #include <linux/list.h>
33 #include <linux/kernel.h>
34 #include <linux/workqueue.h>
35 #include <linux/rculist.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include <net/route.h>
39 #include <net/xfrm.h>
40
41 #include <net/icmp.h>
42 #include <net/ip.h>
43 #include <net/ip_tunnels.h>
44 #include <net/gre.h>
45 #include <net/protocol.h>
46
47 #include "datapath.h"
48 #include "vport.h"
49
50 /* Returns the least-significant 32 bits of a __be64. */
51 static __be32 be64_get_low32(__be64 x)
52 {
53 #ifdef __BIG_ENDIAN
54         return (__force __be32)x;
55 #else
56         return (__force __be32)((__force u64)x >> 32);
57 #endif
58 }
59
60 static __be16 filter_tnl_flags(__be16 flags)
61 {
62         return flags & (TUNNEL_CSUM | TUNNEL_KEY);
63 }
64
65 static struct sk_buff *__build_header(struct sk_buff *skb,
66                                       int tunnel_hlen,
67                                       __be32 seq, __be16 gre64_flag)
68 {
69         const struct ovs_key_ipv4_tunnel *tun_key;
70         struct tnl_ptk_info tpi;
71
72         tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
73         skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
74         if (IS_ERR(skb))
75                 return skb;
76
77         tpi.flags = filter_tnl_flags(tun_key->tun_flags) | gre64_flag;
78
79         tpi.proto = htons(ETH_P_TEB);
80         tpi.key = be64_get_low32(tun_key->tun_id);
81         tpi.seq = seq;
82         gre_build_header(skb, &tpi, tunnel_hlen);
83
84         return skb;
85 }
86
87 static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
88 {
89 #ifdef __BIG_ENDIAN
90         return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
91 #else
92         return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
93 #endif
94 }
95
96 /* Called with rcu_read_lock and BH disabled. */
97 static int gre_rcv(struct sk_buff *skb,
98                    const struct tnl_ptk_info *tpi)
99 {
100         struct ovs_tunnel_info tun_info;
101         struct ovs_net *ovs_net;
102         struct vport *vport;
103         __be64 key;
104
105         ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
106         if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
107                 vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
108         else
109                 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
110         if (unlikely(!vport))
111                 return PACKET_REJECT;
112
113         key = key_to_tunnel_id(tpi->key, tpi->seq);
114         ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
115                                filter_tnl_flags(tpi->flags), NULL, 0);
116
117         ovs_vport_receive(vport, skb, &tun_info);
118         return PACKET_RCVD;
119 }
120
121 /* Called with rcu_read_lock and BH disabled. */
122 static int gre_err(struct sk_buff *skb, u32 info,
123                    const struct tnl_ptk_info *tpi)
124 {
125         struct ovs_net *ovs_net;
126         struct vport *vport;
127
128         ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
129         if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
130                 vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
131         else
132                 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
133
134         if (unlikely(!vport))
135                 return PACKET_REJECT;
136         else
137                 return PACKET_RCVD;
138 }
139
140 static int __send(struct vport *vport, struct sk_buff *skb,
141                   int tunnel_hlen,
142                   __be32 seq, __be16 gre64_flag)
143 {
144         struct ovs_key_ipv4_tunnel *tun_key;
145         struct rtable *rt;
146         int min_headroom;
147         __be16 df;
148         __be32 saddr;
149         int err;
150
151         /* Route lookup */
152         tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
153         saddr = tun_key->ipv4_src;
154         rt = find_route(ovs_dp_get_net(vport->dp),
155                         &saddr, tun_key->ipv4_dst,
156                         IPPROTO_GRE, tun_key->ipv4_tos,
157                         skb->mark);
158         if (IS_ERR(rt)) {
159                 err = PTR_ERR(rt);
160                 goto error;
161         }
162
163         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
164                         + tunnel_hlen + sizeof(struct iphdr)
165                         + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
166
167         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
168                 int head_delta = SKB_DATA_ALIGN(min_headroom -
169                                                 skb_headroom(skb) +
170                                                 16);
171                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
172                                         0, GFP_ATOMIC);
173                 if (unlikely(err))
174                         goto err_free_rt;
175         }
176
177         if (skb_vlan_tag_present(skb)) {
178                 if (unlikely(!vlan_insert_tag_set_proto(skb,
179                                                         skb->vlan_proto,
180                                                         skb_vlan_tag_get(skb)))) {
181                         err = -ENOMEM;
182                         skb = NULL;
183                         goto err_free_rt;
184                 }
185                 vlan_set_tci(skb, 0);
186         }
187
188         /* Push Tunnel header. */
189         skb = __build_header(skb, tunnel_hlen, seq, gre64_flag);
190         if (IS_ERR(skb)) {
191                 err = PTR_ERR(skb);
192                 skb = NULL;
193                 goto err_free_rt;
194         }
195
196         df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
197         skb->ignore_df = 1;
198
199         return iptunnel_xmit(skb->sk, rt, skb, saddr,
200                              tun_key->ipv4_dst, IPPROTO_GRE,
201                              tun_key->ipv4_tos,
202                              tun_key->ipv4_ttl, df, false);
203 err_free_rt:
204         ip_rt_put(rt);
205 error:
206         kfree_skb(skb);
207         return err;
208 }
209
210 static struct gre_cisco_protocol gre_protocol = {
211         .handler        = gre_rcv,
212         .err_handler    = gre_err,
213         .priority       = 1,
214 };
215
216 static int gre_ports;
217 static int gre_init(void)
218 {
219         int err;
220
221         gre_ports++;
222         if (gre_ports > 1)
223                 return 0;
224
225         err = gre_cisco_register(&gre_protocol);
226         if (err)
227                 pr_warn("cannot register gre protocol handler\n");
228
229         return err;
230 }
231
232 static void gre_exit(void)
233 {
234         gre_ports--;
235         if (gre_ports > 0)
236                 return;
237
238         gre_cisco_unregister(&gre_protocol);
239 }
240
241 static const char *gre_get_name(const struct vport *vport)
242 {
243         return vport_priv(vport);
244 }
245
246 static struct vport *gre_create(const struct vport_parms *parms)
247 {
248         struct net *net = ovs_dp_get_net(parms->dp);
249         struct ovs_net *ovs_net;
250         struct vport *vport;
251         int err;
252
253         err = gre_init();
254         if (err)
255                 return ERR_PTR(err);
256
257         ovs_net = net_generic(net, ovs_net_id);
258         if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
259                 vport = ERR_PTR(-EEXIST);
260                 goto error;
261         }
262
263         vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
264         if (IS_ERR(vport))
265                 goto error;
266
267         strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
268         rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
269         return vport;
270
271 error:
272         gre_exit();
273         return vport;
274 }
275
276 static void gre_tnl_destroy(struct vport *vport)
277 {
278         struct net *net = ovs_dp_get_net(vport->dp);
279         struct ovs_net *ovs_net;
280
281         ovs_net = net_generic(net, ovs_net_id);
282
283         RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
284         ovs_vport_deferred_free(vport);
285         gre_exit();
286 }
287
288 static int gre_send(struct vport *vport, struct sk_buff *skb)
289 {
290         int hlen;
291
292         if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
293                 kfree_skb(skb);
294                 return -EINVAL;
295         }
296
297         hlen = ip_gre_calc_hlen(OVS_CB(skb)->egress_tun_info->tunnel.tun_flags);
298
299         return __send(vport, skb, hlen, 0, 0);
300 }
301
302 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
303                                    struct ovs_tunnel_info *egress_tun_info)
304 {
305         return ovs_tunnel_get_egress_info(egress_tun_info,
306                                           ovs_dp_get_net(vport->dp),
307                                           OVS_CB(skb)->egress_tun_info,
308                                           IPPROTO_GRE, skb->mark, 0, 0);
309 }
310
311 const struct vport_ops ovs_gre_vport_ops = {
312         .type                   = OVS_VPORT_TYPE_GRE,
313         .create                 = gre_create,
314         .destroy                = gre_tnl_destroy,
315         .get_name               = gre_get_name,
316         .send                   = gre_send,
317         .get_egress_tun_info    = gre_get_egress_tun_info,
318 };
319
320 /* GRE64 vport. */
321 static struct vport *gre64_create(const struct vport_parms *parms)
322 {
323         struct net *net = ovs_dp_get_net(parms->dp);
324         struct ovs_net *ovs_net;
325         struct vport *vport;
326         int err;
327
328         err = gre_init();
329         if (err)
330                 return ERR_PTR(err);
331
332         ovs_net = net_generic(net, ovs_net_id);
333         if (ovsl_dereference(ovs_net->vport_net.gre64_vport)) {
334                 vport = ERR_PTR(-EEXIST);
335                 goto error;
336         }
337
338         vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre64_vport_ops, parms);
339         if (IS_ERR(vport))
340                 goto error;
341
342         strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
343         rcu_assign_pointer(ovs_net->vport_net.gre64_vport, vport);
344         return vport;
345 error:
346         gre_exit();
347         return vport;
348 }
349
350 static void gre64_tnl_destroy(struct vport *vport)
351 {
352         struct net *net = ovs_dp_get_net(vport->dp);
353         struct ovs_net *ovs_net;
354
355         ovs_net = net_generic(net, ovs_net_id);
356
357         rcu_assign_pointer(ovs_net->vport_net.gre64_vport, NULL);
358         ovs_vport_deferred_free(vport);
359         gre_exit();
360 }
361
362 static __be32 be64_get_high32(__be64 x)
363 {
364 #ifdef __BIG_ENDIAN
365         return (__force __be32)((__force u64)x >> 32);
366 #else
367         return (__force __be32)x;
368 #endif
369 }
370
371 static int gre64_send(struct vport *vport, struct sk_buff *skb)
372 {
373         int hlen = GRE_HEADER_SECTION +         /* GRE Hdr */
374                    GRE_HEADER_SECTION +         /* GRE Key */
375                    GRE_HEADER_SECTION;          /* GRE SEQ */
376         __be32 seq;
377
378         if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
379                 kfree_skb(skb);
380                 return -EINVAL;
381         }
382
383         if (OVS_CB(skb)->egress_tun_info->tunnel.tun_flags & TUNNEL_CSUM)
384                 hlen += GRE_HEADER_SECTION;
385
386         seq = be64_get_high32(OVS_CB(skb)->egress_tun_info->tunnel.tun_id);
387         return __send(vport, skb, hlen, seq, (TUNNEL_KEY|TUNNEL_SEQ));
388 }
389
390 const struct vport_ops ovs_gre64_vport_ops = {
391         .type                   = OVS_VPORT_TYPE_GRE64,
392         .create                 = gre64_create,
393         .destroy                = gre64_tnl_destroy,
394         .get_name               = gre_get_name,
395         .send                   = gre64_send,
396         .get_egress_tun_info    = gre_get_egress_tun_info,
397 };
398 #endif