2 * GRE over IPv6 protocol decoder.
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/mroute.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35 #include <linux/hash.h>
36 #include <linux/if_tunnel.h>
37 #include <linux/ip6_tunnel.h>
42 #include <net/protocol.h>
43 #include <net/addrconf.h>
45 #include <net/checksum.h>
46 #include <net/dsfield.h>
47 #include <net/inet_ecn.h>
49 #include <net/net_namespace.h>
50 #include <net/netns/generic.h>
51 #include <net/rtnetlink.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_tunnel.h>
59 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
60 #define IPV6_TCLASS_SHIFT 20
62 #define HASH_SIZE_SHIFT 5
63 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
65 static int ip6gre_net_id __read_mostly;
67 struct ip6_tnl __rcu *tunnels[4][HASH_SIZE];
69 struct net_device *fb_tunnel_dev;
72 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
73 static int ip6gre_tunnel_init(struct net_device *dev);
74 static void ip6gre_tunnel_setup(struct net_device *dev);
75 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
76 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
78 /* Tunnel hash table */
88 We require exact key match i.e. if a key is present in packet
89 it will match only tunnel with the same key; if it is not present,
90 it will match only keyless tunnel.
92 All keysless packets, if not matched configured keyless tunnels
93 will match fallback tunnel.
96 #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1))
97 static u32 HASH_ADDR(const struct in6_addr *addr)
99 u32 hash = ipv6_addr_hash(addr);
101 return hash_32(hash, HASH_SIZE_SHIFT);
104 #define tunnels_r_l tunnels[3]
105 #define tunnels_r tunnels[2]
106 #define tunnels_l tunnels[1]
107 #define tunnels_wc tunnels[0]
109 * Locking : hash tables are protected by RCU and RTNL
112 #define for_each_ip_tunnel_rcu(start) \
113 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
115 /* often modified stats are per cpu, other are shared (netdev->stats) */
121 struct u64_stats_sync syncp;
124 static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
125 struct rtnl_link_stats64 *tot)
129 for_each_possible_cpu(i) {
130 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
131 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
135 start = u64_stats_fetch_begin_bh(&tstats->syncp);
136 rx_packets = tstats->rx_packets;
137 tx_packets = tstats->tx_packets;
138 rx_bytes = tstats->rx_bytes;
139 tx_bytes = tstats->tx_bytes;
140 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
142 tot->rx_packets += rx_packets;
143 tot->tx_packets += tx_packets;
144 tot->rx_bytes += rx_bytes;
145 tot->tx_bytes += tx_bytes;
148 tot->multicast = dev->stats.multicast;
149 tot->rx_crc_errors = dev->stats.rx_crc_errors;
150 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
151 tot->rx_length_errors = dev->stats.rx_length_errors;
152 tot->rx_errors = dev->stats.rx_errors;
153 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
154 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
155 tot->tx_dropped = dev->stats.tx_dropped;
156 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
157 tot->tx_errors = dev->stats.tx_errors;
162 /* Given src, dst and key, find appropriate for input tunnel. */
164 static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
165 const struct in6_addr *remote, const struct in6_addr *local,
166 __be32 key, __be16 gre_proto)
168 struct net *net = dev_net(dev);
169 int link = dev->ifindex;
170 unsigned int h0 = HASH_ADDR(remote);
171 unsigned int h1 = HASH_KEY(key);
172 struct ip6_tnl *t, *cand = NULL;
173 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
174 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
175 ARPHRD_ETHER : ARPHRD_IP6GRE;
176 int score, cand_score = 4;
178 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
179 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
180 !ipv6_addr_equal(remote, &t->parms.raddr) ||
181 key != t->parms.i_key ||
182 !(t->dev->flags & IFF_UP))
185 if (t->dev->type != ARPHRD_IP6GRE &&
186 t->dev->type != dev_type)
190 if (t->parms.link != link)
192 if (t->dev->type != dev_type)
197 if (score < cand_score) {
203 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
204 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
205 key != t->parms.i_key ||
206 !(t->dev->flags & IFF_UP))
209 if (t->dev->type != ARPHRD_IP6GRE &&
210 t->dev->type != dev_type)
214 if (t->parms.link != link)
216 if (t->dev->type != dev_type)
221 if (score < cand_score) {
227 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
228 if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
229 (!ipv6_addr_equal(local, &t->parms.raddr) ||
230 !ipv6_addr_is_multicast(local))) ||
231 key != t->parms.i_key ||
232 !(t->dev->flags & IFF_UP))
235 if (t->dev->type != ARPHRD_IP6GRE &&
236 t->dev->type != dev_type)
240 if (t->parms.link != link)
242 if (t->dev->type != dev_type)
247 if (score < cand_score) {
253 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
254 if (t->parms.i_key != key ||
255 !(t->dev->flags & IFF_UP))
258 if (t->dev->type != ARPHRD_IP6GRE &&
259 t->dev->type != dev_type)
263 if (t->parms.link != link)
265 if (t->dev->type != dev_type)
270 if (score < cand_score) {
279 dev = ign->fb_tunnel_dev;
280 if (dev->flags & IFF_UP)
281 return netdev_priv(dev);
286 static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
287 const struct __ip6_tnl_parm *p)
289 const struct in6_addr *remote = &p->raddr;
290 const struct in6_addr *local = &p->laddr;
291 unsigned int h = HASH_KEY(p->i_key);
294 if (!ipv6_addr_any(local))
296 if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
298 h ^= HASH_ADDR(remote);
301 return &ign->tunnels[prio][h];
304 static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
305 const struct ip6_tnl *t)
307 return __ip6gre_bucket(ign, &t->parms);
310 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
312 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
314 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
315 rcu_assign_pointer(*tp, t);
318 static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
320 struct ip6_tnl __rcu **tp;
321 struct ip6_tnl *iter;
323 for (tp = ip6gre_bucket(ign, t);
324 (iter = rtnl_dereference(*tp)) != NULL;
327 rcu_assign_pointer(*tp, t->next);
333 static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
334 const struct __ip6_tnl_parm *parms,
337 const struct in6_addr *remote = &parms->raddr;
338 const struct in6_addr *local = &parms->laddr;
339 __be32 key = parms->i_key;
340 int link = parms->link;
342 struct ip6_tnl __rcu **tp;
343 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
345 for (tp = __ip6gre_bucket(ign, parms);
346 (t = rtnl_dereference(*tp)) != NULL;
348 if (ipv6_addr_equal(local, &t->parms.laddr) &&
349 ipv6_addr_equal(remote, &t->parms.raddr) &&
350 key == t->parms.i_key &&
351 link == t->parms.link &&
352 type == t->dev->type)
358 static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
359 const struct __ip6_tnl_parm *parms, int create)
361 struct ip6_tnl *t, *nt;
362 struct net_device *dev;
364 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
366 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
371 strlcpy(name, parms->name, IFNAMSIZ);
373 strcpy(name, "ip6gre%d");
375 dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
379 dev_net_set(dev, net);
381 nt = netdev_priv(dev);
383 dev->rtnl_link_ops = &ip6gre_link_ops;
386 ip6gre_tnl_link_config(nt, 1);
388 if (register_netdevice(dev) < 0)
391 /* Can use a lockless transmit, unless we generate output sequences */
392 if (!(nt->parms.o_flags & GRE_SEQ))
393 dev->features |= NETIF_F_LLTX;
396 ip6gre_tunnel_link(ign, nt);
404 static void ip6gre_tunnel_uninit(struct net_device *dev)
406 struct net *net = dev_net(dev);
407 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
409 ip6gre_tunnel_unlink(ign, netdev_priv(dev));
414 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
415 u8 type, u8 code, int offset, __be32 info)
417 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
418 __be16 *p = (__be16 *)(skb->data + offset);
419 int grehlen = offset + 4;
424 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
425 if (flags&(GRE_VERSION|GRE_ROUTING))
434 /* If only 8 bytes returned, keyed message will be dropped here */
435 if (!pskb_may_pull(skb, grehlen))
437 ipv6h = (const struct ipv6hdr *)skb->data;
438 p = (__be16 *)(skb->data + offset);
442 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
444 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
451 struct ipv6_tlv_tnl_enc_lim *tel;
453 case ICMPV6_DEST_UNREACH:
454 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
457 case ICMPV6_TIME_EXCEED:
458 if (code == ICMPV6_EXC_HOPLIMIT) {
459 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
463 case ICMPV6_PARAMPROB:
465 if (code == ICMPV6_HDR_FIELD)
466 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
468 if (teli && teli == info - 2) {
469 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
470 if (tel->encap_limit == 0) {
471 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
475 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
479 case ICMPV6_PKT_TOOBIG:
481 if (mtu < IPV6_MIN_MTU)
487 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
491 t->err_time = jiffies;
496 static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t,
497 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
499 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
501 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
502 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
504 if (INET_ECN_is_ce(dsfield))
505 IP_ECN_set_ce(ip_hdr(skb));
508 static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t,
509 const struct ipv6hdr *ipv6h, struct sk_buff *skb)
511 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
512 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
514 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
515 IP6_ECN_set_ce(ipv6_hdr(skb));
518 static int ip6gre_rcv(struct sk_buff *skb)
520 const struct ipv6hdr *ipv6h;
526 struct ip6_tnl *tunnel;
530 if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
533 ipv6h = ipv6_hdr(skb);
535 flags = *(__be16 *)h;
537 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
538 /* - Version must be 0.
539 - We do not support routing headers.
541 if (flags&(GRE_VERSION|GRE_ROUTING))
544 if (flags&GRE_CSUM) {
545 switch (skb->ip_summed) {
546 case CHECKSUM_COMPLETE:
547 csum = csum_fold(skb->csum);
553 csum = __skb_checksum_complete(skb);
554 skb->ip_summed = CHECKSUM_COMPLETE;
559 key = *(__be32 *)(h + offset);
563 seqno = ntohl(*(__be32 *)(h + offset));
568 gre_proto = *(__be16 *)(h + 2);
571 tunnel = ip6gre_tunnel_lookup(skb->dev,
572 &ipv6h->saddr, &ipv6h->daddr, key,
575 struct pcpu_tstats *tstats;
577 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
580 if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) {
581 tunnel->dev->stats.rx_dropped++;
587 skb->protocol = gre_proto;
588 /* WCCP version 1 and 2 protocol decoding.
589 * - Change protocol to IP
590 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
592 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
593 skb->protocol = htons(ETH_P_IP);
594 if ((*(h + offset) & 0xF0) != 0x40)
598 skb->mac_header = skb->network_header;
599 __pskb_pull(skb, offset);
600 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
601 skb->pkt_type = PACKET_HOST;
603 if (((flags&GRE_CSUM) && csum) ||
604 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
605 tunnel->dev->stats.rx_crc_errors++;
606 tunnel->dev->stats.rx_errors++;
609 if (tunnel->parms.i_flags&GRE_SEQ) {
610 if (!(flags&GRE_SEQ) ||
612 (s32)(seqno - tunnel->i_seqno) < 0)) {
613 tunnel->dev->stats.rx_fifo_errors++;
614 tunnel->dev->stats.rx_errors++;
617 tunnel->i_seqno = seqno + 1;
620 /* Warning: All skb pointers will be invalidated! */
621 if (tunnel->dev->type == ARPHRD_ETHER) {
622 if (!pskb_may_pull(skb, ETH_HLEN)) {
623 tunnel->dev->stats.rx_length_errors++;
624 tunnel->dev->stats.rx_errors++;
628 ipv6h = ipv6_hdr(skb);
629 skb->protocol = eth_type_trans(skb, tunnel->dev);
630 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
633 tstats = this_cpu_ptr(tunnel->dev->tstats);
634 u64_stats_update_begin(&tstats->syncp);
635 tstats->rx_packets++;
636 tstats->rx_bytes += skb->len;
637 u64_stats_update_end(&tstats->syncp);
639 __skb_tunnel_rx(skb, tunnel->dev);
641 skb_reset_network_header(skb);
642 if (skb->protocol == htons(ETH_P_IP))
643 ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb);
644 else if (skb->protocol == htons(ETH_P_IPV6))
645 ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb);
652 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
661 struct ipv6_tel_txoption {
662 struct ipv6_txoptions ops;
666 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
668 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
670 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
672 opt->dst_opt[4] = encap_limit;
673 opt->dst_opt[5] = IPV6_TLV_PADN;
676 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
677 opt->ops.opt_nflen = 8;
680 static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
681 struct net_device *dev,
687 struct net *net = dev_net(dev);
688 struct ip6_tnl *tunnel = netdev_priv(dev);
689 struct net_device *tdev; /* Device to other host */
690 struct ipv6hdr *ipv6h; /* Our new IP header */
691 unsigned int max_headroom; /* The extra header space needed */
693 struct ipv6_tel_txoption opt;
695 struct dst_entry *dst = NULL, *ndst = NULL;
696 struct net_device_stats *stats = &tunnel->dev->stats;
700 struct sk_buff *new_skb;
702 if (dev->type == ARPHRD_ETHER)
703 IPCB(skb)->flags = 0;
705 if (dev->header_ops && dev->type == ARPHRD_IP6GRE) {
707 ipv6h = (struct ipv6hdr *)skb->data;
708 fl6->daddr = ipv6h->daddr;
710 gre_hlen = tunnel->hlen;
711 fl6->daddr = tunnel->parms.raddr;
714 if (!fl6->flowi6_mark)
715 dst = ip6_tnl_dst_check(tunnel);
718 ndst = ip6_route_output(net, NULL, fl6);
721 goto tx_err_link_failure;
722 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
726 goto tx_err_link_failure;
735 net_warn_ratelimited("%s: Local routing loop detected!\n",
737 goto tx_err_dst_release;
740 mtu = dst_mtu(dst) - sizeof(*ipv6h);
741 if (encap_limit >= 0) {
745 if (mtu < IPV6_MIN_MTU)
748 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
749 if (skb->len > mtu) {
752 goto tx_err_dst_release;
755 if (tunnel->err_count > 0) {
756 if (time_before(jiffies,
757 tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) {
760 dst_link_failure(skb);
762 tunnel->err_count = 0;
765 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
767 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
768 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
769 new_skb = skb_realloc_headroom(skb, max_headroom);
770 if (max_headroom > dev->needed_headroom)
771 dev->needed_headroom = max_headroom;
773 goto tx_err_dst_release;
776 skb_set_owner_w(new_skb, skb->sk);
783 if (fl6->flowi6_mark) {
784 skb_dst_set(skb, dst);
787 skb_dst_set_noref(skb, dst);
790 skb->transport_header = skb->network_header;
793 if (encap_limit >= 0) {
794 init_tel_txopt(&opt, encap_limit);
795 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
798 skb_push(skb, gre_hlen);
799 skb_reset_network_header(skb);
802 * Push down and install the IP header.
804 ipv6h = ipv6_hdr(skb);
805 *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
806 dsfield = INET_ECN_encapsulate(0, dsfield);
807 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
808 ipv6h->hop_limit = tunnel->parms.hop_limit;
809 ipv6h->nexthdr = proto;
810 ipv6h->saddr = fl6->saddr;
811 ipv6h->daddr = fl6->daddr;
813 ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
814 ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
815 htons(ETH_P_TEB) : skb->protocol;
817 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
818 __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
820 if (tunnel->parms.o_flags&GRE_SEQ) {
822 *ptr = htonl(tunnel->o_seqno);
825 if (tunnel->parms.o_flags&GRE_KEY) {
826 *ptr = tunnel->parms.o_key;
829 if (tunnel->parms.o_flags&GRE_CSUM) {
831 *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1),
832 skb->len - sizeof(struct ipv6hdr));
838 err = ip6_local_out(skb);
840 if (net_xmit_eval(err) == 0) {
841 struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
843 tstats->tx_bytes += pkt_len;
844 tstats->tx_packets++;
847 stats->tx_aborted_errors++;
851 ip6_tnl_dst_store(tunnel, ndst);
855 stats->tx_carrier_errors++;
856 dst_link_failure(skb);
862 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
864 struct ip6_tnl *t = netdev_priv(dev);
865 const struct iphdr *iph = ip_hdr(skb);
866 int encap_limit = -1;
872 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
873 encap_limit = t->parms.encap_limit;
875 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
876 fl6.flowi6_proto = IPPROTO_IPIP;
878 dsfield = ipv4_get_dsfield(iph);
880 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
881 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
883 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
884 fl6.flowi6_mark = skb->mark;
886 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
888 /* XXX: send ICMP error even if DF is not set. */
889 if (err == -EMSGSIZE)
890 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
898 static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
900 struct ip6_tnl *t = netdev_priv(dev);
901 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
902 int encap_limit = -1;
909 if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
912 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
914 struct ipv6_tlv_tnl_enc_lim *tel;
915 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
916 if (tel->encap_limit == 0) {
917 icmpv6_send(skb, ICMPV6_PARAMPROB,
918 ICMPV6_HDR_FIELD, offset + 2);
921 encap_limit = tel->encap_limit - 1;
922 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
923 encap_limit = t->parms.encap_limit;
925 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
926 fl6.flowi6_proto = IPPROTO_IPV6;
928 dsfield = ipv6_get_dsfield(ipv6h);
929 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
930 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
931 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
932 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
933 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
934 fl6.flowi6_mark = skb->mark;
936 err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
938 if (err == -EMSGSIZE)
939 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
947 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
948 * @t: the outgoing tunnel device
949 * @hdr: IPv6 header from the incoming packet
952 * Avoid trivial tunneling loop by checking that tunnel exit-point
953 * doesn't match source of incoming packet.
960 static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
961 const struct ipv6hdr *hdr)
963 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
966 static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
968 struct ip6_tnl *t = netdev_priv(dev);
969 int encap_limit = -1;
974 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
975 encap_limit = t->parms.encap_limit;
977 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
978 fl6.flowi6_proto = skb->protocol;
980 err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
985 static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
986 struct net_device *dev)
988 struct ip6_tnl *t = netdev_priv(dev);
989 struct net_device_stats *stats = &t->dev->stats;
992 if (!ip6_tnl_xmit_ctl(t))
995 switch (skb->protocol) {
996 case htons(ETH_P_IP):
997 ret = ip6gre_xmit_ipv4(skb, dev);
999 case htons(ETH_P_IPV6):
1000 ret = ip6gre_xmit_ipv6(skb, dev);
1003 ret = ip6gre_xmit_other(skb, dev);
1010 return NETDEV_TX_OK;
1014 stats->tx_dropped++;
1016 return NETDEV_TX_OK;
1019 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1021 struct net_device *dev = t->dev;
1022 struct __ip6_tnl_parm *p = &t->parms;
1023 struct flowi6 *fl6 = &t->fl.u.ip6;
1024 int addend = sizeof(struct ipv6hdr) + 4;
1026 if (dev->type != ARPHRD_ETHER) {
1027 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1028 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1031 /* Set up flowi template */
1032 fl6->saddr = p->laddr;
1033 fl6->daddr = p->raddr;
1034 fl6->flowi6_oif = p->link;
1037 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1038 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1039 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1040 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1042 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1043 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1045 if (p->flags&IP6_TNL_F_CAP_XMIT &&
1046 p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1047 dev->flags |= IFF_POINTOPOINT;
1049 dev->flags &= ~IFF_POINTOPOINT;
1051 dev->iflink = p->link;
1053 /* Precalculate GRE options length */
1054 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1055 if (t->parms.o_flags&GRE_CSUM)
1057 if (t->parms.o_flags&GRE_KEY)
1059 if (t->parms.o_flags&GRE_SEQ)
1063 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1064 int strict = (ipv6_addr_type(&p->raddr) &
1065 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1067 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1068 &p->raddr, &p->laddr,
1075 dev->hard_header_len = rt->dst.dev->hard_header_len + addend;
1078 dev->mtu = rt->dst.dev->mtu - addend;
1079 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1082 if (dev->mtu < IPV6_MIN_MTU)
1083 dev->mtu = IPV6_MIN_MTU;
1086 dst_release(&rt->dst);
1092 static int ip6gre_tnl_change(struct ip6_tnl *t,
1093 const struct __ip6_tnl_parm *p, int set_mtu)
1095 t->parms.laddr = p->laddr;
1096 t->parms.raddr = p->raddr;
1097 t->parms.flags = p->flags;
1098 t->parms.hop_limit = p->hop_limit;
1099 t->parms.encap_limit = p->encap_limit;
1100 t->parms.flowinfo = p->flowinfo;
1101 t->parms.link = p->link;
1102 t->parms.proto = p->proto;
1103 t->parms.i_key = p->i_key;
1104 t->parms.o_key = p->o_key;
1105 t->parms.i_flags = p->i_flags;
1106 t->parms.o_flags = p->o_flags;
1107 ip6_tnl_dst_reset(t);
1108 ip6gre_tnl_link_config(t, set_mtu);
1112 static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1113 const struct ip6_tnl_parm2 *u)
1115 p->laddr = u->laddr;
1116 p->raddr = u->raddr;
1117 p->flags = u->flags;
1118 p->hop_limit = u->hop_limit;
1119 p->encap_limit = u->encap_limit;
1120 p->flowinfo = u->flowinfo;
1122 p->i_key = u->i_key;
1123 p->o_key = u->o_key;
1124 p->i_flags = u->i_flags;
1125 p->o_flags = u->o_flags;
1126 memcpy(p->name, u->name, sizeof(u->name));
1129 static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1130 const struct __ip6_tnl_parm *p)
1132 u->proto = IPPROTO_GRE;
1133 u->laddr = p->laddr;
1134 u->raddr = p->raddr;
1135 u->flags = p->flags;
1136 u->hop_limit = p->hop_limit;
1137 u->encap_limit = p->encap_limit;
1138 u->flowinfo = p->flowinfo;
1140 u->i_key = p->i_key;
1141 u->o_key = p->o_key;
1142 u->i_flags = p->i_flags;
1143 u->o_flags = p->o_flags;
1144 memcpy(u->name, p->name, sizeof(u->name));
1147 static int ip6gre_tunnel_ioctl(struct net_device *dev,
1148 struct ifreq *ifr, int cmd)
1151 struct ip6_tnl_parm2 p;
1152 struct __ip6_tnl_parm p1;
1154 struct net *net = dev_net(dev);
1155 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1160 if (dev == ign->fb_tunnel_dev) {
1161 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1165 ip6gre_tnl_parm_from_user(&p1, &p);
1166 t = ip6gre_tunnel_locate(net, &p1, 0);
1169 t = netdev_priv(dev);
1170 ip6gre_tnl_parm_to_user(&p, &t->parms);
1171 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1178 if (!capable(CAP_NET_ADMIN))
1182 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1186 if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1189 if (!(p.i_flags&GRE_KEY))
1191 if (!(p.o_flags&GRE_KEY))
1194 ip6gre_tnl_parm_from_user(&p1, &p);
1195 t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1197 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1199 if (t->dev != dev) {
1204 t = netdev_priv(dev);
1206 ip6gre_tunnel_unlink(ign, t);
1208 ip6gre_tnl_change(t, &p1, 1);
1209 ip6gre_tunnel_link(ign, t);
1210 netdev_state_change(dev);
1217 ip6gre_tnl_parm_to_user(&p, &t->parms);
1218 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1221 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1226 if (!capable(CAP_NET_ADMIN))
1229 if (dev == ign->fb_tunnel_dev) {
1231 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1234 ip6gre_tnl_parm_from_user(&p1, &p);
1235 t = ip6gre_tunnel_locate(net, &p1, 0);
1239 if (t == netdev_priv(ign->fb_tunnel_dev))
1243 unregister_netdevice(dev);
1255 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1257 struct ip6_tnl *tunnel = netdev_priv(dev);
1259 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1265 static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1266 unsigned short type,
1267 const void *daddr, const void *saddr, unsigned int len)
1269 struct ip6_tnl *t = netdev_priv(dev);
1270 struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
1271 __be16 *p = (__be16 *)(ipv6h+1);
1273 *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
1274 ipv6h->hop_limit = t->parms.hop_limit;
1275 ipv6h->nexthdr = NEXTHDR_GRE;
1276 ipv6h->saddr = t->parms.laddr;
1277 ipv6h->daddr = t->parms.raddr;
1279 p[0] = t->parms.o_flags;
1283 * Set the source hardware address.
1287 memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1289 memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1290 if (!ipv6_addr_any(&ipv6h->daddr))
1296 static int ip6gre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1298 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb_mac_header(skb);
1299 memcpy(haddr, &ipv6h->saddr, sizeof(struct in6_addr));
1300 return sizeof(struct in6_addr);
1303 static const struct header_ops ip6gre_header_ops = {
1304 .create = ip6gre_header,
1305 .parse = ip6gre_header_parse,
1308 static const struct net_device_ops ip6gre_netdev_ops = {
1309 .ndo_init = ip6gre_tunnel_init,
1310 .ndo_uninit = ip6gre_tunnel_uninit,
1311 .ndo_start_xmit = ip6gre_tunnel_xmit,
1312 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1313 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1314 .ndo_get_stats64 = ip6gre_get_stats64,
1317 static void ip6gre_dev_free(struct net_device *dev)
1319 free_percpu(dev->tstats);
1323 static void ip6gre_tunnel_setup(struct net_device *dev)
1327 dev->netdev_ops = &ip6gre_netdev_ops;
1328 dev->destructor = ip6gre_dev_free;
1330 dev->type = ARPHRD_IP6GRE;
1331 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4;
1332 dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4;
1333 t = netdev_priv(dev);
1334 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1336 dev->flags |= IFF_NOARP;
1338 dev->addr_len = sizeof(struct in6_addr);
1339 dev->features |= NETIF_F_NETNS_LOCAL;
1340 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1343 static int ip6gre_tunnel_init(struct net_device *dev)
1345 struct ip6_tnl *tunnel;
1347 tunnel = netdev_priv(dev);
1350 strcpy(tunnel->parms.name, dev->name);
1352 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1353 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1355 if (ipv6_addr_any(&tunnel->parms.raddr))
1356 dev->header_ops = &ip6gre_header_ops;
1358 dev->tstats = alloc_percpu(struct pcpu_tstats);
1365 static void ip6gre_fb_tunnel_init(struct net_device *dev)
1367 struct ip6_tnl *tunnel = netdev_priv(dev);
1370 strcpy(tunnel->parms.name, dev->name);
1372 tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1378 static struct inet6_protocol ip6gre_protocol __read_mostly = {
1379 .handler = ip6gre_rcv,
1380 .err_handler = ip6gre_err,
1381 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1384 static void ip6gre_destroy_tunnels(struct ip6gre_net *ign,
1385 struct list_head *head)
1389 for (prio = 0; prio < 4; prio++) {
1391 for (h = 0; h < HASH_SIZE; h++) {
1394 t = rtnl_dereference(ign->tunnels[prio][h]);
1397 unregister_netdevice_queue(t->dev, head);
1398 t = rtnl_dereference(t->next);
1404 static int __net_init ip6gre_init_net(struct net *net)
1406 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1409 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1410 ip6gre_tunnel_setup);
1411 if (!ign->fb_tunnel_dev) {
1415 dev_net_set(ign->fb_tunnel_dev, net);
1417 ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1418 ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1420 err = register_netdev(ign->fb_tunnel_dev);
1424 rcu_assign_pointer(ign->tunnels_wc[0],
1425 netdev_priv(ign->fb_tunnel_dev));
1429 ip6gre_dev_free(ign->fb_tunnel_dev);
1434 static void __net_exit ip6gre_exit_net(struct net *net)
1436 struct ip6gre_net *ign;
1439 ign = net_generic(net, ip6gre_net_id);
1441 ip6gre_destroy_tunnels(ign, &list);
1442 unregister_netdevice_many(&list);
1446 static struct pernet_operations ip6gre_net_ops = {
1447 .init = ip6gre_init_net,
1448 .exit = ip6gre_exit_net,
1449 .id = &ip6gre_net_id,
1450 .size = sizeof(struct ip6gre_net),
1453 static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1461 if (data[IFLA_GRE_IFLAGS])
1462 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1463 if (data[IFLA_GRE_OFLAGS])
1464 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1465 if (flags & (GRE_VERSION|GRE_ROUTING))
1471 static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1473 struct in6_addr daddr;
1475 if (tb[IFLA_ADDRESS]) {
1476 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1478 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1479 return -EADDRNOTAVAIL;
1485 if (data[IFLA_GRE_REMOTE]) {
1486 nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1487 if (ipv6_addr_any(&daddr))
1492 return ip6gre_tunnel_validate(tb, data);
1496 static void ip6gre_netlink_parms(struct nlattr *data[],
1497 struct __ip6_tnl_parm *parms)
1499 memset(parms, 0, sizeof(*parms));
1504 if (data[IFLA_GRE_LINK])
1505 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1507 if (data[IFLA_GRE_IFLAGS])
1508 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1510 if (data[IFLA_GRE_OFLAGS])
1511 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1513 if (data[IFLA_GRE_IKEY])
1514 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1516 if (data[IFLA_GRE_OKEY])
1517 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1519 if (data[IFLA_GRE_LOCAL])
1520 nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr));
1522 if (data[IFLA_GRE_REMOTE])
1523 nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr));
1525 if (data[IFLA_GRE_TTL])
1526 parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1528 if (data[IFLA_GRE_ENCAP_LIMIT])
1529 parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1531 if (data[IFLA_GRE_FLOWINFO])
1532 parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
1534 if (data[IFLA_GRE_FLAGS])
1535 parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1538 static int ip6gre_tap_init(struct net_device *dev)
1540 struct ip6_tnl *tunnel;
1542 tunnel = netdev_priv(dev);
1545 strcpy(tunnel->parms.name, dev->name);
1547 ip6gre_tnl_link_config(tunnel, 1);
1549 dev->tstats = alloc_percpu(struct pcpu_tstats);
1556 static const struct net_device_ops ip6gre_tap_netdev_ops = {
1557 .ndo_init = ip6gre_tap_init,
1558 .ndo_uninit = ip6gre_tunnel_uninit,
1559 .ndo_start_xmit = ip6gre_tunnel_xmit,
1560 .ndo_set_mac_address = eth_mac_addr,
1561 .ndo_validate_addr = eth_validate_addr,
1562 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1563 .ndo_get_stats64 = ip6gre_get_stats64,
1566 static void ip6gre_tap_setup(struct net_device *dev)
1571 dev->netdev_ops = &ip6gre_tap_netdev_ops;
1572 dev->destructor = ip6gre_dev_free;
1575 dev->features |= NETIF_F_NETNS_LOCAL;
1578 static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1579 struct nlattr *tb[], struct nlattr *data[])
1582 struct net *net = dev_net(dev);
1583 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1586 nt = netdev_priv(dev);
1587 ip6gre_netlink_parms(data, &nt->parms);
1589 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1592 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1593 eth_hw_addr_random(dev);
1596 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1598 /* Can use a lockless transmit, unless we generate output sequences */
1599 if (!(nt->parms.o_flags & GRE_SEQ))
1600 dev->features |= NETIF_F_LLTX;
1602 err = register_netdevice(dev);
1607 ip6gre_tunnel_link(ign, nt);
1613 static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1614 struct nlattr *data[])
1616 struct ip6_tnl *t, *nt;
1617 struct net *net = dev_net(dev);
1618 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1619 struct __ip6_tnl_parm p;
1621 if (dev == ign->fb_tunnel_dev)
1624 nt = netdev_priv(dev);
1625 ip6gre_netlink_parms(data, &p);
1627 t = ip6gre_tunnel_locate(net, &p, 0);
1635 ip6gre_tunnel_unlink(ign, t);
1636 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1637 ip6gre_tunnel_link(ign, t);
1638 netdev_state_change(dev);
1644 static size_t ip6gre_get_size(const struct net_device *dev)
1649 /* IFLA_GRE_IFLAGS */
1651 /* IFLA_GRE_OFLAGS */
1657 /* IFLA_GRE_LOCAL */
1659 /* IFLA_GRE_REMOTE */
1665 /* IFLA_GRE_ENCAP_LIMIT */
1667 /* IFLA_GRE_FLOWINFO */
1669 /* IFLA_GRE_FLAGS */
1674 static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1676 struct ip6_tnl *t = netdev_priv(dev);
1677 struct __ip6_tnl_parm *p = &t->parms;
1679 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1680 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1681 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1682 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1683 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1684 nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) ||
1685 nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) ||
1686 nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
1687 /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/
1688 nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
1689 nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
1690 nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags))
1691 goto nla_put_failure;
1698 static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
1699 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1700 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1701 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1702 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1703 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1704 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) },
1705 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) },
1706 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1707 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
1708 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
1709 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
1712 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
1714 .maxtype = IFLA_GRE_MAX,
1715 .policy = ip6gre_policy,
1716 .priv_size = sizeof(struct ip6_tnl),
1717 .setup = ip6gre_tunnel_setup,
1718 .validate = ip6gre_tunnel_validate,
1719 .newlink = ip6gre_newlink,
1720 .changelink = ip6gre_changelink,
1721 .get_size = ip6gre_get_size,
1722 .fill_info = ip6gre_fill_info,
1725 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
1726 .kind = "ip6gretap",
1727 .maxtype = IFLA_GRE_MAX,
1728 .policy = ip6gre_policy,
1729 .priv_size = sizeof(struct ip6_tnl),
1730 .setup = ip6gre_tap_setup,
1731 .validate = ip6gre_tap_validate,
1732 .newlink = ip6gre_newlink,
1733 .changelink = ip6gre_changelink,
1734 .get_size = ip6gre_get_size,
1735 .fill_info = ip6gre_fill_info,
1739 * And now the modules code and kernel interface.
1742 static int __init ip6gre_init(void)
1746 pr_info("GRE over IPv6 tunneling driver\n");
1748 err = register_pernet_device(&ip6gre_net_ops);
1752 err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
1754 pr_info("%s: can't add protocol\n", __func__);
1755 goto add_proto_failed;
1758 err = rtnl_link_register(&ip6gre_link_ops);
1760 goto rtnl_link_failed;
1762 err = rtnl_link_register(&ip6gre_tap_ops);
1764 goto tap_ops_failed;
1770 rtnl_link_unregister(&ip6gre_link_ops);
1772 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1774 unregister_pernet_device(&ip6gre_net_ops);
1778 static void __exit ip6gre_fini(void)
1780 rtnl_link_unregister(&ip6gre_tap_ops);
1781 rtnl_link_unregister(&ip6gre_link_ops);
1782 inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
1783 unregister_pernet_device(&ip6gre_net_ops);
1786 module_init(ip6gre_init);
1787 module_exit(ip6gre_fini);
1788 MODULE_LICENSE("GPL");
1789 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
1790 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
1791 MODULE_ALIAS_RTNL_LINK("ip6gre");
1792 MODULE_ALIAS_NETDEV("ip6gre0");