2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
58 struct list_head list;
61 struct sock *mroute6_sk;
62 struct timer_list ipmr_expire_timer;
63 struct list_head mfc6_unres_queue;
64 struct list_head mfc6_cache_array[MFC6_LINES];
65 struct mif_device vif6_table[MAXMIFS];
67 atomic_t cache_resolve_queue_len;
68 bool mroute_do_assert;
70 #ifdef CONFIG_IPV6_PIMSM_V2
71 int mroute_reg_vif_num;
76 struct fib_rule common;
80 struct mr6_table *mrt;
83 /* Big lock, protecting vif table, mrt cache and mroute socket state.
84 Note that the changes are semaphored via rtnl_lock.
87 static DEFINE_RWLOCK(mrt_lock);
90 * Multicast router control variables
93 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95 /* Special spinlock for queue of unresolved entries */
96 static DEFINE_SPINLOCK(mfc_unres_lock);
98 /* We return to original Alan's scheme. Hash table of resolved
99 entries is changed only in process context and protected
100 with weak lock mrt_lock. Queue of unresolved entries is protected
101 with strong spinlock mfc_unres_lock.
103 In this case data path is free of exclusive locks at all.
106 static struct kmem_cache *mrt_cachep __read_mostly;
108 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
109 static void ip6mr_free_table(struct mr6_table *mrt);
111 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
112 struct sk_buff *skb, struct mfc6_cache *cache);
113 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
114 mifi_t mifi, int assert);
115 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
116 struct mfc6_cache *c, struct rtmsg *rtm);
117 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
119 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
120 struct netlink_callback *cb);
121 static void mroute_clean_tables(struct mr6_table *mrt);
122 static void ipmr_expire_process(unsigned long arg);
124 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
125 #define ip6mr_for_each_table(mrt, net) \
126 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
128 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
130 struct mr6_table *mrt;
132 ip6mr_for_each_table(mrt, net) {
139 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
140 struct mr6_table **mrt)
143 struct ip6mr_result res;
144 struct fib_lookup_arg arg = {
146 .flags = FIB_LOOKUP_NOREF,
149 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
150 flowi6_to_flowi(flp6), 0, &arg);
157 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
158 int flags, struct fib_lookup_arg *arg)
160 struct ip6mr_result *res = arg->result;
161 struct mr6_table *mrt;
163 switch (rule->action) {
166 case FR_ACT_UNREACHABLE:
168 case FR_ACT_PROHIBIT:
170 case FR_ACT_BLACKHOLE:
175 mrt = ip6mr_get_table(rule->fr_net, rule->table);
182 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
187 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
191 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
192 struct fib_rule_hdr *frh, struct nlattr **tb)
197 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
203 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
204 struct fib_rule_hdr *frh)
212 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
213 .family = RTNL_FAMILY_IP6MR,
214 .rule_size = sizeof(struct ip6mr_rule),
215 .addr_size = sizeof(struct in6_addr),
216 .action = ip6mr_rule_action,
217 .match = ip6mr_rule_match,
218 .configure = ip6mr_rule_configure,
219 .compare = ip6mr_rule_compare,
220 .default_pref = fib_default_rule_pref,
221 .fill = ip6mr_rule_fill,
222 .nlgroup = RTNLGRP_IPV6_RULE,
223 .policy = ip6mr_rule_policy,
224 .owner = THIS_MODULE,
227 static int __net_init ip6mr_rules_init(struct net *net)
229 struct fib_rules_ops *ops;
230 struct mr6_table *mrt;
233 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
237 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
239 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
245 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
249 net->ipv6.mr6_rules_ops = ops;
255 fib_rules_unregister(ops);
259 static void __net_exit ip6mr_rules_exit(struct net *net)
261 struct mr6_table *mrt, *next;
264 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
265 list_del(&mrt->list);
266 ip6mr_free_table(mrt);
269 fib_rules_unregister(net->ipv6.mr6_rules_ops);
272 #define ip6mr_for_each_table(mrt, net) \
273 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
277 return net->ipv6.mrt6;
280 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
281 struct mr6_table **mrt)
283 *mrt = net->ipv6.mrt6;
287 static int __net_init ip6mr_rules_init(struct net *net)
289 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
290 return net->ipv6.mrt6 ? 0 : -ENOMEM;
293 static void __net_exit ip6mr_rules_exit(struct net *net)
296 ip6mr_free_table(net->ipv6.mrt6);
297 net->ipv6.mrt6 = NULL;
302 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
304 struct mr6_table *mrt;
307 mrt = ip6mr_get_table(net, id);
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
315 write_pnet(&mrt->net, net);
317 /* Forwarding cache */
318 for (i = 0; i < MFC6_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
321 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 mrt->mroute_reg_vif_num = -1;
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
335 static void ip6mr_free_table(struct mr6_table *mrt)
337 del_timer(&mrt->ipmr_expire_timer);
338 mroute_clean_tables(mrt);
342 #ifdef CONFIG_PROC_FS
344 struct ipmr_mfc_iter {
345 struct seq_net_private p;
346 struct mr6_table *mrt;
347 struct list_head *cache;
352 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 struct ipmr_mfc_iter *it, loff_t pos)
355 struct mr6_table *mrt = it->mrt;
356 struct mfc6_cache *mfc;
358 read_lock(&mrt_lock);
359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
360 it->cache = &mrt->mfc6_cache_array[it->ct];
361 list_for_each_entry(mfc, it->cache, list)
365 read_unlock(&mrt_lock);
367 spin_lock_bh(&mfc_unres_lock);
368 it->cache = &mrt->mfc6_unres_queue;
369 list_for_each_entry(mfc, it->cache, list)
372 spin_unlock_bh(&mfc_unres_lock);
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 struct ipmr_vif_iter {
383 struct seq_net_private p;
384 struct mr6_table *mrt;
388 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 struct ipmr_vif_iter *iter,
392 struct mr6_table *mrt = iter->mrt;
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 if (!MIF_EXISTS(mrt, iter->ct))
398 return &mrt->vif6_table[iter->ct];
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
406 struct ipmr_vif_iter *iter = seq->private;
407 struct net *net = seq_file_net(seq);
408 struct mr6_table *mrt;
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
412 return ERR_PTR(-ENOENT);
416 read_lock(&mrt_lock);
417 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
421 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
423 struct ipmr_vif_iter *iter = seq->private;
424 struct net *net = seq_file_net(seq);
425 struct mr6_table *mrt = iter->mrt;
428 if (v == SEQ_START_TOKEN)
429 return ip6mr_vif_seq_idx(net, iter, 0);
431 while (++iter->ct < mrt->maxvif) {
432 if (!MIF_EXISTS(mrt, iter->ct))
434 return &mrt->vif6_table[iter->ct];
439 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
442 read_unlock(&mrt_lock);
445 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
447 struct ipmr_vif_iter *iter = seq->private;
448 struct mr6_table *mrt = iter->mrt;
450 if (v == SEQ_START_TOKEN) {
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
454 const struct mif_device *vif = v;
455 const char *name = vif->dev ? vif->dev->name : "none";
458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 vif - mrt->vif6_table,
460 name, vif->bytes_in, vif->pkt_in,
461 vif->bytes_out, vif->pkt_out,
467 static const struct seq_operations ip6mr_vif_seq_ops = {
468 .start = ip6mr_vif_seq_start,
469 .next = ip6mr_vif_seq_next,
470 .stop = ip6mr_vif_seq_stop,
471 .show = ip6mr_vif_seq_show,
474 static int ip6mr_vif_open(struct inode *inode, struct file *file)
476 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 sizeof(struct ipmr_vif_iter));
480 static const struct file_operations ip6mr_vif_fops = {
481 .owner = THIS_MODULE,
482 .open = ip6mr_vif_open,
485 .release = seq_release_net,
488 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
490 struct ipmr_mfc_iter *it = seq->private;
491 struct net *net = seq_file_net(seq);
492 struct mr6_table *mrt;
494 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
496 return ERR_PTR(-ENOENT);
499 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
503 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505 struct mfc6_cache *mfc = v;
506 struct ipmr_mfc_iter *it = seq->private;
507 struct net *net = seq_file_net(seq);
508 struct mr6_table *mrt = it->mrt;
512 if (v == SEQ_START_TOKEN)
513 return ipmr_mfc_seq_idx(net, seq->private, 0);
515 if (mfc->list.next != it->cache)
516 return list_entry(mfc->list.next, struct mfc6_cache, list);
518 if (it->cache == &mrt->mfc6_unres_queue)
521 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
523 while (++it->ct < MFC6_LINES) {
524 it->cache = &mrt->mfc6_cache_array[it->ct];
525 if (list_empty(it->cache))
527 return list_first_entry(it->cache, struct mfc6_cache, list);
530 /* exhausted cache_array, show unresolved */
531 read_unlock(&mrt_lock);
532 it->cache = &mrt->mfc6_unres_queue;
535 spin_lock_bh(&mfc_unres_lock);
536 if (!list_empty(it->cache))
537 return list_first_entry(it->cache, struct mfc6_cache, list);
540 spin_unlock_bh(&mfc_unres_lock);
546 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
548 struct ipmr_mfc_iter *it = seq->private;
549 struct mr6_table *mrt = it->mrt;
551 if (it->cache == &mrt->mfc6_unres_queue)
552 spin_unlock_bh(&mfc_unres_lock);
553 else if (it->cache == mrt->mfc6_cache_array)
554 read_unlock(&mrt_lock);
557 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
561 if (v == SEQ_START_TOKEN) {
565 "Iif Pkts Bytes Wrong Oifs\n");
567 const struct mfc6_cache *mfc = v;
568 const struct ipmr_mfc_iter *it = seq->private;
569 struct mr6_table *mrt = it->mrt;
571 seq_printf(seq, "%pI6 %pI6 %-3hd",
572 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
575 if (it->cache != &mrt->mfc6_unres_queue) {
576 seq_printf(seq, " %8lu %8lu %8lu",
578 mfc->mfc_un.res.bytes,
579 mfc->mfc_un.res.wrong_if);
580 for (n = mfc->mfc_un.res.minvif;
581 n < mfc->mfc_un.res.maxvif; n++) {
582 if (MIF_EXISTS(mrt, n) &&
583 mfc->mfc_un.res.ttls[n] < 255)
586 n, mfc->mfc_un.res.ttls[n]);
589 /* unresolved mfc_caches don't contain
590 * pkt, bytes and wrong_if values
592 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
599 static const struct seq_operations ipmr_mfc_seq_ops = {
600 .start = ipmr_mfc_seq_start,
601 .next = ipmr_mfc_seq_next,
602 .stop = ipmr_mfc_seq_stop,
603 .show = ipmr_mfc_seq_show,
606 static int ipmr_mfc_open(struct inode *inode, struct file *file)
608 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
609 sizeof(struct ipmr_mfc_iter));
612 static const struct file_operations ip6mr_mfc_fops = {
613 .owner = THIS_MODULE,
614 .open = ipmr_mfc_open,
617 .release = seq_release_net,
621 #ifdef CONFIG_IPV6_PIMSM_V2
623 static int pim6_rcv(struct sk_buff *skb)
625 struct pimreghdr *pim;
626 struct ipv6hdr *encap;
627 struct net_device *reg_dev = NULL;
628 struct net *net = dev_net(skb->dev);
629 struct mr6_table *mrt;
630 struct flowi6 fl6 = {
631 .flowi6_iif = skb->dev->ifindex,
632 .flowi6_mark = skb->mark,
636 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
639 pim = (struct pimreghdr *)skb_transport_header(skb);
640 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
641 (pim->flags & PIM_NULL_REGISTER) ||
642 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
643 sizeof(*pim), IPPROTO_PIM,
644 csum_partial((void *)pim, sizeof(*pim), 0)) &&
645 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
648 /* check if the inner packet is destined to mcast group */
649 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
652 if (!ipv6_addr_is_multicast(&encap->daddr) ||
653 encap->payload_len == 0 ||
654 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
657 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
659 reg_vif_num = mrt->mroute_reg_vif_num;
661 read_lock(&mrt_lock);
662 if (reg_vif_num >= 0)
663 reg_dev = mrt->vif6_table[reg_vif_num].dev;
666 read_unlock(&mrt_lock);
671 skb->mac_header = skb->network_header;
672 skb_pull(skb, (u8 *)encap - skb->data);
673 skb_reset_network_header(skb);
674 skb->protocol = htons(ETH_P_IPV6);
675 skb->ip_summed = CHECKSUM_NONE;
677 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
688 static const struct inet6_protocol pim6_protocol = {
692 /* Service routines creating virtual interfaces: PIMREG */
694 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
695 struct net_device *dev)
697 struct net *net = dev_net(dev);
698 struct mr6_table *mrt;
699 struct flowi6 fl6 = {
700 .flowi6_oif = dev->ifindex,
701 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
702 .flowi6_mark = skb->mark,
706 err = ip6mr_fib_lookup(net, &fl6, &mrt);
712 read_lock(&mrt_lock);
713 dev->stats.tx_bytes += skb->len;
714 dev->stats.tx_packets++;
715 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
716 read_unlock(&mrt_lock);
721 static const struct net_device_ops reg_vif_netdev_ops = {
722 .ndo_start_xmit = reg_vif_xmit,
725 static void reg_vif_setup(struct net_device *dev)
727 dev->type = ARPHRD_PIMREG;
728 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
729 dev->flags = IFF_NOARP;
730 dev->netdev_ops = ®_vif_netdev_ops;
731 dev->destructor = free_netdev;
732 dev->features |= NETIF_F_NETNS_LOCAL;
735 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
737 struct net_device *dev;
740 if (mrt->id == RT6_TABLE_DFLT)
741 sprintf(name, "pim6reg");
743 sprintf(name, "pim6reg%u", mrt->id);
745 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
749 dev_net_set(dev, net);
751 if (register_netdevice(dev)) {
764 /* allow the register to be completed before unregistering. */
768 unregister_netdevice(dev);
777 static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
779 struct mif_device *v;
780 struct net_device *dev;
781 struct inet6_dev *in6_dev;
783 if (vifi < 0 || vifi >= mrt->maxvif)
784 return -EADDRNOTAVAIL;
786 v = &mrt->vif6_table[vifi];
788 write_lock_bh(&mrt_lock);
793 write_unlock_bh(&mrt_lock);
794 return -EADDRNOTAVAIL;
797 #ifdef CONFIG_IPV6_PIMSM_V2
798 if (vifi == mrt->mroute_reg_vif_num)
799 mrt->mroute_reg_vif_num = -1;
802 if (vifi + 1 == mrt->maxvif) {
804 for (tmp = vifi - 1; tmp >= 0; tmp--) {
805 if (MIF_EXISTS(mrt, tmp))
808 mrt->maxvif = tmp + 1;
811 write_unlock_bh(&mrt_lock);
813 dev_set_allmulti(dev, -1);
815 in6_dev = __in6_dev_get(dev);
817 in6_dev->cnf.mc_forwarding--;
818 inet6_netconf_notify_devconf(dev_net(dev),
819 NETCONFA_MC_FORWARDING,
820 dev->ifindex, &in6_dev->cnf);
823 if (v->flags & MIFF_REGISTER)
824 unregister_netdevice_queue(dev, head);
830 static inline void ip6mr_cache_free(struct mfc6_cache *c)
832 kmem_cache_free(mrt_cachep, c);
835 /* Destroy an unresolved cache entry, killing queued skbs
836 and reporting error to netlink readers.
839 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
841 struct net *net = read_pnet(&mrt->net);
844 atomic_dec(&mrt->cache_resolve_queue_len);
846 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
847 if (ipv6_hdr(skb)->version == 0) {
848 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
849 nlh->nlmsg_type = NLMSG_ERROR;
850 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
851 skb_trim(skb, nlh->nlmsg_len);
852 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
853 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
862 /* Timer process for all the unresolved queue. */
864 static void ipmr_do_expire_process(struct mr6_table *mrt)
866 unsigned long now = jiffies;
867 unsigned long expires = 10 * HZ;
868 struct mfc6_cache *c, *next;
870 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
871 if (time_after(c->mfc_un.unres.expires, now)) {
873 unsigned long interval = c->mfc_un.unres.expires - now;
874 if (interval < expires)
880 mr6_netlink_event(mrt, c, RTM_DELROUTE);
881 ip6mr_destroy_unres(mrt, c);
884 if (!list_empty(&mrt->mfc6_unres_queue))
885 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
888 static void ipmr_expire_process(unsigned long arg)
890 struct mr6_table *mrt = (struct mr6_table *)arg;
892 if (!spin_trylock(&mfc_unres_lock)) {
893 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
897 if (!list_empty(&mrt->mfc6_unres_queue))
898 ipmr_do_expire_process(mrt);
900 spin_unlock(&mfc_unres_lock);
903 /* Fill oifs list. It is called under write locked mrt_lock. */
905 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
910 cache->mfc_un.res.minvif = MAXMIFS;
911 cache->mfc_un.res.maxvif = 0;
912 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
914 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
915 if (MIF_EXISTS(mrt, vifi) &&
916 ttls[vifi] && ttls[vifi] < 255) {
917 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
918 if (cache->mfc_un.res.minvif > vifi)
919 cache->mfc_un.res.minvif = vifi;
920 if (cache->mfc_un.res.maxvif <= vifi)
921 cache->mfc_un.res.maxvif = vifi + 1;
926 static int mif6_add(struct net *net, struct mr6_table *mrt,
927 struct mif6ctl *vifc, int mrtsock)
929 int vifi = vifc->mif6c_mifi;
930 struct mif_device *v = &mrt->vif6_table[vifi];
931 struct net_device *dev;
932 struct inet6_dev *in6_dev;
936 if (MIF_EXISTS(mrt, vifi))
939 switch (vifc->mif6c_flags) {
940 #ifdef CONFIG_IPV6_PIMSM_V2
943 * Special Purpose VIF in PIM
944 * All the packets will be sent to the daemon
946 if (mrt->mroute_reg_vif_num >= 0)
948 dev = ip6mr_reg_vif(net, mrt);
951 err = dev_set_allmulti(dev, 1);
953 unregister_netdevice(dev);
960 dev = dev_get_by_index(net, vifc->mif6c_pifi);
962 return -EADDRNOTAVAIL;
963 err = dev_set_allmulti(dev, 1);
973 in6_dev = __in6_dev_get(dev);
975 in6_dev->cnf.mc_forwarding++;
976 inet6_netconf_notify_devconf(dev_net(dev),
977 NETCONFA_MC_FORWARDING,
978 dev->ifindex, &in6_dev->cnf);
982 * Fill in the VIF structures
984 v->rate_limit = vifc->vifc_rate_limit;
985 v->flags = vifc->mif6c_flags;
987 v->flags |= VIFF_STATIC;
988 v->threshold = vifc->vifc_threshold;
993 v->link = dev->ifindex;
994 if (v->flags & MIFF_REGISTER)
995 v->link = dev_get_iflink(dev);
997 /* And finish update writing critical data */
998 write_lock_bh(&mrt_lock);
1000 #ifdef CONFIG_IPV6_PIMSM_V2
1001 if (v->flags & MIFF_REGISTER)
1002 mrt->mroute_reg_vif_num = vifi;
1004 if (vifi + 1 > mrt->maxvif)
1005 mrt->maxvif = vifi + 1;
1006 write_unlock_bh(&mrt_lock);
1010 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1011 const struct in6_addr *origin,
1012 const struct in6_addr *mcastgrp)
1014 int line = MFC6_HASH(mcastgrp, origin);
1015 struct mfc6_cache *c;
1017 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1018 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1019 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1025 /* Look for a (*,*,oif) entry */
1026 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1029 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1030 struct mfc6_cache *c;
1032 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1033 if (ipv6_addr_any(&c->mf6c_origin) &&
1034 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1035 (c->mfc_un.res.ttls[mifi] < 255))
1041 /* Look for a (*,G) entry */
1042 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1043 struct in6_addr *mcastgrp,
1046 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1047 struct mfc6_cache *c, *proxy;
1049 if (ipv6_addr_any(mcastgrp))
1052 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1053 if (ipv6_addr_any(&c->mf6c_origin) &&
1054 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1055 if (c->mfc_un.res.ttls[mifi] < 255)
1058 /* It's ok if the mifi is part of the static tree */
1059 proxy = ip6mr_cache_find_any_parent(mrt,
1061 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1066 return ip6mr_cache_find_any_parent(mrt, mifi);
1070 * Allocate a multicast cache entry
1072 static struct mfc6_cache *ip6mr_cache_alloc(void)
1074 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1077 c->mfc_un.res.minvif = MAXMIFS;
1081 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1083 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1086 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1087 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1092 * A cache entry has gone into a resolved state from queued
1095 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1096 struct mfc6_cache *uc, struct mfc6_cache *c)
1098 struct sk_buff *skb;
1101 * Play the pending entries through our router
1104 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1105 if (ipv6_hdr(skb)->version == 0) {
1106 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1108 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1109 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1111 nlh->nlmsg_type = NLMSG_ERROR;
1112 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1113 skb_trim(skb, nlh->nlmsg_len);
1114 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1116 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1118 ip6_mr_forward(net, mrt, skb, c);
1123 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
1124 * expects the following bizarre scheme.
1126 * Called under mrt_lock.
1129 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1130 mifi_t mifi, int assert)
1132 struct sk_buff *skb;
1133 struct mrt6msg *msg;
1136 #ifdef CONFIG_IPV6_PIMSM_V2
1137 if (assert == MRT6MSG_WHOLEPKT)
1138 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1142 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1147 /* I suppose that internal messages
1148 * do not require checksums */
1150 skb->ip_summed = CHECKSUM_UNNECESSARY;
1152 #ifdef CONFIG_IPV6_PIMSM_V2
1153 if (assert == MRT6MSG_WHOLEPKT) {
1154 /* Ugly, but we have no choice with this interface.
1155 Duplicate old header, fix length etc.
1156 And all this only to mangle msg->im6_msgtype and
1157 to set msg->im6_mbz to "mbz" :-)
1159 skb_push(skb, -skb_network_offset(pkt));
1161 skb_push(skb, sizeof(*msg));
1162 skb_reset_transport_header(skb);
1163 msg = (struct mrt6msg *)skb_transport_header(skb);
1165 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1166 msg->im6_mif = mrt->mroute_reg_vif_num;
1168 msg->im6_src = ipv6_hdr(pkt)->saddr;
1169 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1171 skb->ip_summed = CHECKSUM_UNNECESSARY;
1176 * Copy the IP header
1179 skb_put(skb, sizeof(struct ipv6hdr));
1180 skb_reset_network_header(skb);
1181 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1186 skb_put(skb, sizeof(*msg));
1187 skb_reset_transport_header(skb);
1188 msg = (struct mrt6msg *)skb_transport_header(skb);
1191 msg->im6_msgtype = assert;
1192 msg->im6_mif = mifi;
1194 msg->im6_src = ipv6_hdr(pkt)->saddr;
1195 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1197 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1198 skb->ip_summed = CHECKSUM_UNNECESSARY;
1201 if (!mrt->mroute6_sk) {
1207 * Deliver to user space multicast routing algorithms
1209 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1211 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1219 * Queue a packet for resolution. It gets locked cache entry!
1223 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1227 struct mfc6_cache *c;
1229 spin_lock_bh(&mfc_unres_lock);
1230 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1231 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1232 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1240 * Create a new entry if allowable
1243 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1244 (c = ip6mr_cache_alloc_unres()) == NULL) {
1245 spin_unlock_bh(&mfc_unres_lock);
1252 * Fill in the new cache entry
1254 c->mf6c_parent = -1;
1255 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1256 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1259 * Reflect first query at pim6sd
1261 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1263 /* If the report failed throw the cache entry
1266 spin_unlock_bh(&mfc_unres_lock);
1268 ip6mr_cache_free(c);
1273 atomic_inc(&mrt->cache_resolve_queue_len);
1274 list_add(&c->list, &mrt->mfc6_unres_queue);
1275 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1277 ipmr_do_expire_process(mrt);
1281 * See if we can append the packet
1283 if (c->mfc_un.unres.unresolved.qlen > 3) {
1287 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1291 spin_unlock_bh(&mfc_unres_lock);
1296 * MFC6 cache manipulation by user space
1299 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1303 struct mfc6_cache *c, *next;
1305 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1307 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1308 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1309 ipv6_addr_equal(&c->mf6c_mcastgrp,
1310 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1311 (parent == -1 || parent == c->mf6c_parent)) {
1312 write_lock_bh(&mrt_lock);
1314 write_unlock_bh(&mrt_lock);
1316 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1317 ip6mr_cache_free(c);
1324 static int ip6mr_device_event(struct notifier_block *this,
1325 unsigned long event, void *ptr)
1327 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1328 struct net *net = dev_net(dev);
1329 struct mr6_table *mrt;
1330 struct mif_device *v;
1334 if (event != NETDEV_UNREGISTER)
1337 ip6mr_for_each_table(mrt, net) {
1338 v = &mrt->vif6_table[0];
1339 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1341 mif6_delete(mrt, ct, &list);
1344 unregister_netdevice_many(&list);
1349 static struct notifier_block ip6_mr_notifier = {
1350 .notifier_call = ip6mr_device_event
1354 * Setup for IP multicast routing
1357 static int __net_init ip6mr_net_init(struct net *net)
1361 err = ip6mr_rules_init(net);
1365 #ifdef CONFIG_PROC_FS
1367 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1369 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1370 goto proc_cache_fail;
1375 #ifdef CONFIG_PROC_FS
1377 remove_proc_entry("ip6_mr_vif", net->proc_net);
1379 ip6mr_rules_exit(net);
1385 static void __net_exit ip6mr_net_exit(struct net *net)
1387 #ifdef CONFIG_PROC_FS
1388 remove_proc_entry("ip6_mr_cache", net->proc_net);
1389 remove_proc_entry("ip6_mr_vif", net->proc_net);
1391 ip6mr_rules_exit(net);
1394 static struct pernet_operations ip6mr_net_ops = {
1395 .init = ip6mr_net_init,
1396 .exit = ip6mr_net_exit,
1399 int __init ip6_mr_init(void)
1403 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1404 sizeof(struct mfc6_cache),
1405 0, SLAB_HWCACHE_ALIGN,
1410 err = register_pernet_subsys(&ip6mr_net_ops);
1412 goto reg_pernet_fail;
1414 err = register_netdevice_notifier(&ip6_mr_notifier);
1416 goto reg_notif_fail;
1417 #ifdef CONFIG_IPV6_PIMSM_V2
1418 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1419 pr_err("%s: can't add PIM protocol\n", __func__);
1421 goto add_proto_fail;
1424 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1425 ip6mr_rtm_dumproute, NULL);
1427 #ifdef CONFIG_IPV6_PIMSM_V2
1429 unregister_netdevice_notifier(&ip6_mr_notifier);
1432 unregister_pernet_subsys(&ip6mr_net_ops);
1434 kmem_cache_destroy(mrt_cachep);
1438 void ip6_mr_cleanup(void)
1440 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1441 #ifdef CONFIG_IPV6_PIMSM_V2
1442 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1444 unregister_netdevice_notifier(&ip6_mr_notifier);
1445 unregister_pernet_subsys(&ip6mr_net_ops);
1446 kmem_cache_destroy(mrt_cachep);
1449 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1450 struct mf6cctl *mfc, int mrtsock, int parent)
1454 struct mfc6_cache *uc, *c;
1455 unsigned char ttls[MAXMIFS];
1458 if (mfc->mf6cc_parent >= MAXMIFS)
1461 memset(ttls, 255, MAXMIFS);
1462 for (i = 0; i < MAXMIFS; i++) {
1463 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1468 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1470 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1471 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1472 ipv6_addr_equal(&c->mf6c_mcastgrp,
1473 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1474 (parent == -1 || parent == mfc->mf6cc_parent)) {
1481 write_lock_bh(&mrt_lock);
1482 c->mf6c_parent = mfc->mf6cc_parent;
1483 ip6mr_update_thresholds(mrt, c, ttls);
1485 c->mfc_flags |= MFC_STATIC;
1486 write_unlock_bh(&mrt_lock);
1487 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1491 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1492 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1495 c = ip6mr_cache_alloc();
1499 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1500 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1501 c->mf6c_parent = mfc->mf6cc_parent;
1502 ip6mr_update_thresholds(mrt, c, ttls);
1504 c->mfc_flags |= MFC_STATIC;
1506 write_lock_bh(&mrt_lock);
1507 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1508 write_unlock_bh(&mrt_lock);
1511 * Check to see if we resolved a queued list. If so we
1512 * need to send on the frames and tidy up.
1515 spin_lock_bh(&mfc_unres_lock);
1516 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1517 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1518 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1519 list_del(&uc->list);
1520 atomic_dec(&mrt->cache_resolve_queue_len);
1525 if (list_empty(&mrt->mfc6_unres_queue))
1526 del_timer(&mrt->ipmr_expire_timer);
1527 spin_unlock_bh(&mfc_unres_lock);
1530 ip6mr_cache_resolve(net, mrt, uc, c);
1531 ip6mr_cache_free(uc);
1533 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1538 * Close the multicast socket, and clear the vif tables etc
1541 static void mroute_clean_tables(struct mr6_table *mrt)
1545 struct mfc6_cache *c, *next;
1548 * Shut down all active vif entries
1550 for (i = 0; i < mrt->maxvif; i++) {
1551 if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
1552 mif6_delete(mrt, i, &list);
1554 unregister_netdevice_many(&list);
1559 for (i = 0; i < MFC6_LINES; i++) {
1560 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1561 if (c->mfc_flags & MFC_STATIC)
1563 write_lock_bh(&mrt_lock);
1565 write_unlock_bh(&mrt_lock);
1567 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1568 ip6mr_cache_free(c);
1572 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1573 spin_lock_bh(&mfc_unres_lock);
1574 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1576 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1577 ip6mr_destroy_unres(mrt, c);
1579 spin_unlock_bh(&mfc_unres_lock);
1583 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1586 struct net *net = sock_net(sk);
1589 write_lock_bh(&mrt_lock);
1590 if (likely(mrt->mroute6_sk == NULL)) {
1591 mrt->mroute6_sk = sk;
1592 net->ipv6.devconf_all->mc_forwarding++;
1593 inet6_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1594 NETCONFA_IFINDEX_ALL,
1595 net->ipv6.devconf_all);
1599 write_unlock_bh(&mrt_lock);
1606 int ip6mr_sk_done(struct sock *sk)
1609 struct net *net = sock_net(sk);
1610 struct mr6_table *mrt;
1613 ip6mr_for_each_table(mrt, net) {
1614 if (sk == mrt->mroute6_sk) {
1615 write_lock_bh(&mrt_lock);
1616 mrt->mroute6_sk = NULL;
1617 net->ipv6.devconf_all->mc_forwarding--;
1618 inet6_netconf_notify_devconf(net,
1619 NETCONFA_MC_FORWARDING,
1620 NETCONFA_IFINDEX_ALL,
1621 net->ipv6.devconf_all);
1622 write_unlock_bh(&mrt_lock);
1624 mroute_clean_tables(mrt);
1634 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1636 struct mr6_table *mrt;
1637 struct flowi6 fl6 = {
1638 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1639 .flowi6_oif = skb->dev->ifindex,
1640 .flowi6_mark = skb->mark,
1643 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1646 return mrt->mroute6_sk;
1650 * Socket options and virtual interface manipulation. The whole
1651 * virtual interface system is a complete heap, but unfortunately
1652 * that's how BSD mrouted happens to think. Maybe one day with a proper
1653 * MOSPF/PIM router set up we can clean this up.
1656 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1658 int ret, parent = 0;
1662 struct net *net = sock_net(sk);
1663 struct mr6_table *mrt;
1665 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1669 if (optname != MRT6_INIT) {
1670 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1676 if (sk->sk_type != SOCK_RAW ||
1677 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1679 if (optlen < sizeof(int))
1682 return ip6mr_sk_init(mrt, sk);
1685 return ip6mr_sk_done(sk);
1688 if (optlen < sizeof(vif))
1690 if (copy_from_user(&vif, optval, sizeof(vif)))
1692 if (vif.mif6c_mifi >= MAXMIFS)
1695 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1700 if (optlen < sizeof(mifi_t))
1702 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1705 ret = mif6_delete(mrt, mifi, NULL);
1710 * Manipulate the forwarding caches. These live
1711 * in a sort of kernel/user symbiosis.
1716 case MRT6_ADD_MFC_PROXY:
1717 case MRT6_DEL_MFC_PROXY:
1718 if (optlen < sizeof(mfc))
1720 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1723 parent = mfc.mf6cc_parent;
1725 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1726 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1728 ret = ip6mr_mfc_add(net, mrt, &mfc,
1729 sk == mrt->mroute6_sk, parent);
1734 * Control PIM assert (to activate pim will activate assert)
1740 if (optlen != sizeof(v))
1742 if (get_user(v, (int __user *)optval))
1744 mrt->mroute_do_assert = v;
1748 #ifdef CONFIG_IPV6_PIMSM_V2
1753 if (optlen != sizeof(v))
1755 if (get_user(v, (int __user *)optval))
1760 if (v != mrt->mroute_do_pim) {
1761 mrt->mroute_do_pim = v;
1762 mrt->mroute_do_assert = v;
1769 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1774 if (optlen != sizeof(u32))
1776 if (get_user(v, (u32 __user *)optval))
1778 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1779 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1781 if (sk == mrt->mroute6_sk)
1786 if (!ip6mr_new_table(net, v))
1788 raw6_sk(sk)->ip6mr_table = v;
1794 * Spurious command, or MRT6_VERSION which you cannot
1798 return -ENOPROTOOPT;
1803 * Getsock opt support for the multicast routing system.
1806 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1811 struct net *net = sock_net(sk);
1812 struct mr6_table *mrt;
1814 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1824 val = mrt->mroute_do_pim;
1828 val = mrt->mroute_do_assert;
1831 return -ENOPROTOOPT;
1834 if (get_user(olr, optlen))
1837 olr = min_t(int, olr, sizeof(int));
1841 if (put_user(olr, optlen))
1843 if (copy_to_user(optval, &val, olr))
1849 * The IP multicast ioctl support routines.
1852 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1854 struct sioc_sg_req6 sr;
1855 struct sioc_mif_req6 vr;
1856 struct mif_device *vif;
1857 struct mfc6_cache *c;
1858 struct net *net = sock_net(sk);
1859 struct mr6_table *mrt;
1861 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1866 case SIOCGETMIFCNT_IN6:
1867 if (copy_from_user(&vr, arg, sizeof(vr)))
1869 if (vr.mifi >= mrt->maxvif)
1871 read_lock(&mrt_lock);
1872 vif = &mrt->vif6_table[vr.mifi];
1873 if (MIF_EXISTS(mrt, vr.mifi)) {
1874 vr.icount = vif->pkt_in;
1875 vr.ocount = vif->pkt_out;
1876 vr.ibytes = vif->bytes_in;
1877 vr.obytes = vif->bytes_out;
1878 read_unlock(&mrt_lock);
1880 if (copy_to_user(arg, &vr, sizeof(vr)))
1884 read_unlock(&mrt_lock);
1885 return -EADDRNOTAVAIL;
1886 case SIOCGETSGCNT_IN6:
1887 if (copy_from_user(&sr, arg, sizeof(sr)))
1890 read_lock(&mrt_lock);
1891 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1893 sr.pktcnt = c->mfc_un.res.pkt;
1894 sr.bytecnt = c->mfc_un.res.bytes;
1895 sr.wrong_if = c->mfc_un.res.wrong_if;
1896 read_unlock(&mrt_lock);
1898 if (copy_to_user(arg, &sr, sizeof(sr)))
1902 read_unlock(&mrt_lock);
1903 return -EADDRNOTAVAIL;
1905 return -ENOIOCTLCMD;
1909 #ifdef CONFIG_COMPAT
1910 struct compat_sioc_sg_req6 {
1911 struct sockaddr_in6 src;
1912 struct sockaddr_in6 grp;
1913 compat_ulong_t pktcnt;
1914 compat_ulong_t bytecnt;
1915 compat_ulong_t wrong_if;
1918 struct compat_sioc_mif_req6 {
1920 compat_ulong_t icount;
1921 compat_ulong_t ocount;
1922 compat_ulong_t ibytes;
1923 compat_ulong_t obytes;
1926 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1928 struct compat_sioc_sg_req6 sr;
1929 struct compat_sioc_mif_req6 vr;
1930 struct mif_device *vif;
1931 struct mfc6_cache *c;
1932 struct net *net = sock_net(sk);
1933 struct mr6_table *mrt;
1935 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1940 case SIOCGETMIFCNT_IN6:
1941 if (copy_from_user(&vr, arg, sizeof(vr)))
1943 if (vr.mifi >= mrt->maxvif)
1945 read_lock(&mrt_lock);
1946 vif = &mrt->vif6_table[vr.mifi];
1947 if (MIF_EXISTS(mrt, vr.mifi)) {
1948 vr.icount = vif->pkt_in;
1949 vr.ocount = vif->pkt_out;
1950 vr.ibytes = vif->bytes_in;
1951 vr.obytes = vif->bytes_out;
1952 read_unlock(&mrt_lock);
1954 if (copy_to_user(arg, &vr, sizeof(vr)))
1958 read_unlock(&mrt_lock);
1959 return -EADDRNOTAVAIL;
1960 case SIOCGETSGCNT_IN6:
1961 if (copy_from_user(&sr, arg, sizeof(sr)))
1964 read_lock(&mrt_lock);
1965 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1967 sr.pktcnt = c->mfc_un.res.pkt;
1968 sr.bytecnt = c->mfc_un.res.bytes;
1969 sr.wrong_if = c->mfc_un.res.wrong_if;
1970 read_unlock(&mrt_lock);
1972 if (copy_to_user(arg, &sr, sizeof(sr)))
1976 read_unlock(&mrt_lock);
1977 return -EADDRNOTAVAIL;
1979 return -ENOIOCTLCMD;
1984 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1986 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1987 IPSTATS_MIB_OUTFORWDATAGRAMS);
1988 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTOCTETS, skb->len);
1990 return dst_output(skb);
1994 * Processing handlers for ip6mr_forward
1997 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
1998 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2000 struct ipv6hdr *ipv6h;
2001 struct mif_device *vif = &mrt->vif6_table[vifi];
2002 struct net_device *dev;
2003 struct dst_entry *dst;
2009 #ifdef CONFIG_IPV6_PIMSM_V2
2010 if (vif->flags & MIFF_REGISTER) {
2012 vif->bytes_out += skb->len;
2013 vif->dev->stats.tx_bytes += skb->len;
2014 vif->dev->stats.tx_packets++;
2015 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2020 ipv6h = ipv6_hdr(skb);
2022 fl6 = (struct flowi6) {
2023 .flowi6_oif = vif->link,
2024 .daddr = ipv6h->daddr,
2027 dst = ip6_route_output(net, NULL, &fl6);
2034 skb_dst_set(skb, dst);
2037 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2038 * not only before forwarding, but after forwarding on all output
2039 * interfaces. It is clear, if mrouter runs a multicasting
2040 * program, it should receive packets not depending to what interface
2041 * program is joined.
2042 * If we will not make it, the program will have to join on all
2043 * interfaces. On the other hand, multihoming host (or router, but
2044 * not mrouter) cannot join to more than one interface - it will
2045 * result in receiving multiple packets.
2050 vif->bytes_out += skb->len;
2052 /* We are about to write */
2053 /* XXX: extension headers? */
2054 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2057 ipv6h = ipv6_hdr(skb);
2060 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2062 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dev,
2063 ip6mr_forward2_finish);
2070 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2074 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2075 if (mrt->vif6_table[ct].dev == dev)
2081 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2082 struct sk_buff *skb, struct mfc6_cache *cache)
2086 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2088 vif = cache->mf6c_parent;
2089 cache->mfc_un.res.pkt++;
2090 cache->mfc_un.res.bytes += skb->len;
2092 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2093 struct mfc6_cache *cache_proxy;
2095 /* For an (*,G) entry, we only check that the incoming
2096 * interface is part of the static tree.
2098 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2100 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2105 * Wrong interface: drop packet and (maybe) send PIM assert.
2107 if (mrt->vif6_table[vif].dev != skb->dev) {
2108 cache->mfc_un.res.wrong_if++;
2110 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2111 /* pimsm uses asserts, when switching from RPT to SPT,
2112 so that we cannot check that packet arrived on an oif.
2113 It is bad, but otherwise we would need to move pretty
2114 large chunk of pimd to kernel. Ough... --ANK
2116 (mrt->mroute_do_pim ||
2117 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2119 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2120 cache->mfc_un.res.last_assert = jiffies;
2121 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2127 mrt->vif6_table[vif].pkt_in++;
2128 mrt->vif6_table[vif].bytes_in += skb->len;
2133 if (ipv6_addr_any(&cache->mf6c_origin) &&
2134 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2135 if (true_vifi >= 0 &&
2136 true_vifi != cache->mf6c_parent &&
2137 ipv6_hdr(skb)->hop_limit >
2138 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2139 /* It's an (*,*) entry and the packet is not coming from
2140 * the upstream: forward the packet to the upstream
2143 psend = cache->mf6c_parent;
2148 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2149 /* For (*,G) entry, don't forward to the incoming interface */
2150 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2151 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2153 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2155 ip6mr_forward2(net, mrt, skb2, cache, psend);
2162 ip6mr_forward2(net, mrt, skb, cache, psend);
2172 * Multicast packets for forwarding arrive here
2175 int ip6_mr_input(struct sk_buff *skb)
2177 struct mfc6_cache *cache;
2178 struct net *net = dev_net(skb->dev);
2179 struct mr6_table *mrt;
2180 struct flowi6 fl6 = {
2181 .flowi6_iif = skb->dev->ifindex,
2182 .flowi6_mark = skb->mark,
2186 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2192 read_lock(&mrt_lock);
2193 cache = ip6mr_cache_find(mrt,
2194 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2196 int vif = ip6mr_find_vif(mrt, skb->dev);
2199 cache = ip6mr_cache_find_any(mrt,
2200 &ipv6_hdr(skb)->daddr,
2205 * No usable cache entry
2210 vif = ip6mr_find_vif(mrt, skb->dev);
2212 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2213 read_unlock(&mrt_lock);
2217 read_unlock(&mrt_lock);
2222 ip6_mr_forward(net, mrt, skb, cache);
2224 read_unlock(&mrt_lock);
2230 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2231 struct mfc6_cache *c, struct rtmsg *rtm)
2234 struct rtnexthop *nhp;
2235 struct nlattr *mp_attr;
2236 struct rta_mfc_stats mfcs;
2238 /* If cache is unresolved, don't try to parse IIF and OIF */
2239 if (c->mf6c_parent >= MAXMIFS)
2242 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2243 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2245 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2249 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2250 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2251 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2253 nla_nest_cancel(skb, mp_attr);
2257 nhp->rtnh_flags = 0;
2258 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2259 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2260 nhp->rtnh_len = sizeof(*nhp);
2264 nla_nest_end(skb, mp_attr);
2266 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2267 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2268 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2269 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2272 rtm->rtm_type = RTN_MULTICAST;
2276 int ip6mr_get_route(struct net *net,
2277 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
2280 struct mr6_table *mrt;
2281 struct mfc6_cache *cache;
2282 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2284 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2288 read_lock(&mrt_lock);
2289 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2290 if (!cache && skb->dev) {
2291 int vif = ip6mr_find_vif(mrt, skb->dev);
2294 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2299 struct sk_buff *skb2;
2300 struct ipv6hdr *iph;
2301 struct net_device *dev;
2305 read_unlock(&mrt_lock);
2310 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2311 read_unlock(&mrt_lock);
2315 /* really correct? */
2316 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2318 read_unlock(&mrt_lock);
2322 skb_reset_transport_header(skb2);
2324 skb_put(skb2, sizeof(struct ipv6hdr));
2325 skb_reset_network_header(skb2);
2327 iph = ipv6_hdr(skb2);
2330 iph->flow_lbl[0] = 0;
2331 iph->flow_lbl[1] = 0;
2332 iph->flow_lbl[2] = 0;
2333 iph->payload_len = 0;
2334 iph->nexthdr = IPPROTO_NONE;
2336 iph->saddr = rt->rt6i_src.addr;
2337 iph->daddr = rt->rt6i_dst.addr;
2339 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2340 read_unlock(&mrt_lock);
2345 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
2346 cache->mfc_flags |= MFC_NOTIFY;
2348 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2349 read_unlock(&mrt_lock);
2353 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2354 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2357 struct nlmsghdr *nlh;
2361 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2365 rtm = nlmsg_data(nlh);
2366 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2367 rtm->rtm_dst_len = 128;
2368 rtm->rtm_src_len = 128;
2370 rtm->rtm_table = mrt->id;
2371 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2372 goto nla_put_failure;
2373 rtm->rtm_type = RTN_MULTICAST;
2374 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2375 if (c->mfc_flags & MFC_STATIC)
2376 rtm->rtm_protocol = RTPROT_STATIC;
2378 rtm->rtm_protocol = RTPROT_MROUTED;
2381 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2382 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2383 goto nla_put_failure;
2384 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2385 /* do not break the dump if cache is unresolved */
2386 if (err < 0 && err != -ENOENT)
2387 goto nla_put_failure;
2389 nlmsg_end(skb, nlh);
2393 nlmsg_cancel(skb, nlh);
2397 static int mr6_msgsize(bool unresolved, int maxvif)
2400 NLMSG_ALIGN(sizeof(struct rtmsg))
2401 + nla_total_size(4) /* RTA_TABLE */
2402 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2403 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2408 + nla_total_size(4) /* RTA_IIF */
2409 + nla_total_size(0) /* RTA_MULTIPATH */
2410 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2412 + nla_total_size(sizeof(struct rta_mfc_stats))
2418 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2421 struct net *net = read_pnet(&mrt->net);
2422 struct sk_buff *skb;
2425 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2430 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2434 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2440 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2443 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2445 struct net *net = sock_net(skb->sk);
2446 struct mr6_table *mrt;
2447 struct mfc6_cache *mfc;
2448 unsigned int t = 0, s_t;
2449 unsigned int h = 0, s_h;
2450 unsigned int e = 0, s_e;
2456 read_lock(&mrt_lock);
2457 ip6mr_for_each_table(mrt, net) {
2462 for (h = s_h; h < MFC6_LINES; h++) {
2463 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2466 if (ip6mr_fill_mroute(mrt, skb,
2467 NETLINK_CB(cb->skb).portid,
2477 spin_lock_bh(&mfc_unres_lock);
2478 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2481 if (ip6mr_fill_mroute(mrt, skb,
2482 NETLINK_CB(cb->skb).portid,
2486 spin_unlock_bh(&mfc_unres_lock);
2492 spin_unlock_bh(&mfc_unres_lock);
2499 read_unlock(&mrt_lock);