net: ipmr: factor out common vif init code
[cascardo/linux.git] / net / ipv4 / ipmr.c
1 /*
2  *      IP multicast routing support for mrouted 3.6/3.8
3  *
4  *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5  *        Linux Consultancy and Custom Driver Development
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  *
12  *      Fixes:
13  *      Michael Chastain        :       Incorrect size of copying.
14  *      Alan Cox                :       Added the cache manager code
15  *      Alan Cox                :       Fixed the clone/copy bug and device race.
16  *      Mike McLagan            :       Routing by source
17  *      Malcolm Beattie         :       Buffer handling fixes.
18  *      Alexey Kuznetsov        :       Double buffer free and other fixes.
19  *      SVR Anand               :       Fixed several multicast bugs and problems.
20  *      Alexey Kuznetsov        :       Status, optimisations and more.
21  *      Brad Parker             :       Better behaviour on mrouted upcall
22  *                                      overflow.
23  *      Carlos Picoto           :       PIMv1 Support
24  *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
25  *                                      Relax this requirement to work with older peers.
26  *
27  */
28
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/mm.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
39 #include <linux/in.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
51 #include <net/ip.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
55 #include <net/sock.h>
56 #include <net/icmp.h>
57 #include <net/udp.h>
58 #include <net/raw.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69
70 struct mr_table {
71         struct list_head        list;
72         possible_net_t          net;
73         u32                     id;
74         struct sock __rcu       *mroute_sk;
75         struct timer_list       ipmr_expire_timer;
76         struct list_head        mfc_unres_queue;
77         struct list_head        mfc_cache_array[MFC_LINES];
78         struct vif_device       vif_table[MAXVIFS];
79         int                     maxvif;
80         atomic_t                cache_resolve_queue_len;
81         bool                    mroute_do_assert;
82         bool                    mroute_do_pim;
83         int                     mroute_reg_vif_num;
84 };
85
86 struct ipmr_rule {
87         struct fib_rule         common;
88 };
89
90 struct ipmr_result {
91         struct mr_table         *mrt;
92 };
93
94 static inline bool pimsm_enabled(void)
95 {
96         return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2);
97 }
98
99 /* Big lock, protecting vif table, mrt cache and mroute socket state.
100  * Note that the changes are semaphored via rtnl_lock.
101  */
102
103 static DEFINE_RWLOCK(mrt_lock);
104
105 /* Multicast router control variables */
106
107 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
108
109 /* Special spinlock for queue of unresolved entries */
110 static DEFINE_SPINLOCK(mfc_unres_lock);
111
112 /* We return to original Alan's scheme. Hash table of resolved
113  * entries is changed only in process context and protected
114  * with weak lock mrt_lock. Queue of unresolved entries is protected
115  * with strong spinlock mfc_unres_lock.
116  *
117  * In this case data path is free of exclusive locks at all.
118  */
119
120 static struct kmem_cache *mrt_cachep __read_mostly;
121
122 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
123 static void ipmr_free_table(struct mr_table *mrt);
124
125 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
126                           struct sk_buff *skb, struct mfc_cache *cache,
127                           int local);
128 static int ipmr_cache_report(struct mr_table *mrt,
129                              struct sk_buff *pkt, vifi_t vifi, int assert);
130 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
131                               struct mfc_cache *c, struct rtmsg *rtm);
132 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
133                                  int cmd);
134 static void mroute_clean_tables(struct mr_table *mrt);
135 static void ipmr_expire_process(unsigned long arg);
136
137 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
138 #define ipmr_for_each_table(mrt, net) \
139         list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
140
141 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
142 {
143         struct mr_table *mrt;
144
145         ipmr_for_each_table(mrt, net) {
146                 if (mrt->id == id)
147                         return mrt;
148         }
149         return NULL;
150 }
151
152 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
153                            struct mr_table **mrt)
154 {
155         int err;
156         struct ipmr_result res;
157         struct fib_lookup_arg arg = {
158                 .result = &res,
159                 .flags = FIB_LOOKUP_NOREF,
160         };
161
162         err = fib_rules_lookup(net->ipv4.mr_rules_ops,
163                                flowi4_to_flowi(flp4), 0, &arg);
164         if (err < 0)
165                 return err;
166         *mrt = res.mrt;
167         return 0;
168 }
169
170 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
171                             int flags, struct fib_lookup_arg *arg)
172 {
173         struct ipmr_result *res = arg->result;
174         struct mr_table *mrt;
175
176         switch (rule->action) {
177         case FR_ACT_TO_TBL:
178                 break;
179         case FR_ACT_UNREACHABLE:
180                 return -ENETUNREACH;
181         case FR_ACT_PROHIBIT:
182                 return -EACCES;
183         case FR_ACT_BLACKHOLE:
184         default:
185                 return -EINVAL;
186         }
187
188         mrt = ipmr_get_table(rule->fr_net, rule->table);
189         if (!mrt)
190                 return -EAGAIN;
191         res->mrt = mrt;
192         return 0;
193 }
194
195 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
196 {
197         return 1;
198 }
199
200 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
201         FRA_GENERIC_POLICY,
202 };
203
204 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
205                                struct fib_rule_hdr *frh, struct nlattr **tb)
206 {
207         return 0;
208 }
209
210 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
211                              struct nlattr **tb)
212 {
213         return 1;
214 }
215
216 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
217                           struct fib_rule_hdr *frh)
218 {
219         frh->dst_len = 0;
220         frh->src_len = 0;
221         frh->tos     = 0;
222         return 0;
223 }
224
225 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
226         .family         = RTNL_FAMILY_IPMR,
227         .rule_size      = sizeof(struct ipmr_rule),
228         .addr_size      = sizeof(u32),
229         .action         = ipmr_rule_action,
230         .match          = ipmr_rule_match,
231         .configure      = ipmr_rule_configure,
232         .compare        = ipmr_rule_compare,
233         .fill           = ipmr_rule_fill,
234         .nlgroup        = RTNLGRP_IPV4_RULE,
235         .policy         = ipmr_rule_policy,
236         .owner          = THIS_MODULE,
237 };
238
239 static int __net_init ipmr_rules_init(struct net *net)
240 {
241         struct fib_rules_ops *ops;
242         struct mr_table *mrt;
243         int err;
244
245         ops = fib_rules_register(&ipmr_rules_ops_template, net);
246         if (IS_ERR(ops))
247                 return PTR_ERR(ops);
248
249         INIT_LIST_HEAD(&net->ipv4.mr_tables);
250
251         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
252         if (IS_ERR(mrt)) {
253                 err = PTR_ERR(mrt);
254                 goto err1;
255         }
256
257         err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
258         if (err < 0)
259                 goto err2;
260
261         net->ipv4.mr_rules_ops = ops;
262         return 0;
263
264 err2:
265         ipmr_free_table(mrt);
266 err1:
267         fib_rules_unregister(ops);
268         return err;
269 }
270
271 static void __net_exit ipmr_rules_exit(struct net *net)
272 {
273         struct mr_table *mrt, *next;
274
275         rtnl_lock();
276         list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
277                 list_del(&mrt->list);
278                 ipmr_free_table(mrt);
279         }
280         fib_rules_unregister(net->ipv4.mr_rules_ops);
281         rtnl_unlock();
282 }
283 #else
284 #define ipmr_for_each_table(mrt, net) \
285         for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
286
287 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
288 {
289         return net->ipv4.mrt;
290 }
291
292 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
293                            struct mr_table **mrt)
294 {
295         *mrt = net->ipv4.mrt;
296         return 0;
297 }
298
299 static int __net_init ipmr_rules_init(struct net *net)
300 {
301         struct mr_table *mrt;
302
303         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
304         if (IS_ERR(mrt))
305                 return PTR_ERR(mrt);
306         net->ipv4.mrt = mrt;
307         return 0;
308 }
309
310 static void __net_exit ipmr_rules_exit(struct net *net)
311 {
312         rtnl_lock();
313         ipmr_free_table(net->ipv4.mrt);
314         net->ipv4.mrt = NULL;
315         rtnl_unlock();
316 }
317 #endif
318
319 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
320 {
321         struct mr_table *mrt;
322         unsigned int i;
323
324         /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
325         if (id != RT_TABLE_DEFAULT && id >= 1000000000)
326                 return ERR_PTR(-EINVAL);
327
328         mrt = ipmr_get_table(net, id);
329         if (mrt)
330                 return mrt;
331
332         mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
333         if (!mrt)
334                 return ERR_PTR(-ENOMEM);
335         write_pnet(&mrt->net, net);
336         mrt->id = id;
337
338         /* Forwarding cache */
339         for (i = 0; i < MFC_LINES; i++)
340                 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
341
342         INIT_LIST_HEAD(&mrt->mfc_unres_queue);
343
344         setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
345                     (unsigned long)mrt);
346
347         mrt->mroute_reg_vif_num = -1;
348 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
349         list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
350 #endif
351         return mrt;
352 }
353
354 static void ipmr_free_table(struct mr_table *mrt)
355 {
356         del_timer_sync(&mrt->ipmr_expire_timer);
357         mroute_clean_tables(mrt);
358         kfree(mrt);
359 }
360
361 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
362
363 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
364 {
365         struct net *net = dev_net(dev);
366
367         dev_close(dev);
368
369         dev = __dev_get_by_name(net, "tunl0");
370         if (dev) {
371                 const struct net_device_ops *ops = dev->netdev_ops;
372                 struct ifreq ifr;
373                 struct ip_tunnel_parm p;
374
375                 memset(&p, 0, sizeof(p));
376                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
377                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
378                 p.iph.version = 4;
379                 p.iph.ihl = 5;
380                 p.iph.protocol = IPPROTO_IPIP;
381                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
382                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
383
384                 if (ops->ndo_do_ioctl) {
385                         mm_segment_t oldfs = get_fs();
386
387                         set_fs(KERNEL_DS);
388                         ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
389                         set_fs(oldfs);
390                 }
391         }
392 }
393
394 /* Initialize ipmr pimreg/tunnel in_device */
395 static bool ipmr_init_vif_indev(const struct net_device *dev)
396 {
397         struct in_device *in_dev;
398
399         ASSERT_RTNL();
400
401         in_dev = __in_dev_get_rtnl(dev);
402         if (!in_dev)
403                 return false;
404         ipv4_devconf_setall(in_dev);
405         neigh_parms_data_state_setall(in_dev->arp_parms);
406         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
407
408         return true;
409 }
410
411 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
412 {
413         struct net_device  *dev;
414
415         dev = __dev_get_by_name(net, "tunl0");
416
417         if (dev) {
418                 const struct net_device_ops *ops = dev->netdev_ops;
419                 int err;
420                 struct ifreq ifr;
421                 struct ip_tunnel_parm p;
422
423                 memset(&p, 0, sizeof(p));
424                 p.iph.daddr = v->vifc_rmt_addr.s_addr;
425                 p.iph.saddr = v->vifc_lcl_addr.s_addr;
426                 p.iph.version = 4;
427                 p.iph.ihl = 5;
428                 p.iph.protocol = IPPROTO_IPIP;
429                 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
430                 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
431
432                 if (ops->ndo_do_ioctl) {
433                         mm_segment_t oldfs = get_fs();
434
435                         set_fs(KERNEL_DS);
436                         err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
437                         set_fs(oldfs);
438                 } else {
439                         err = -EOPNOTSUPP;
440                 }
441                 dev = NULL;
442
443                 if (err == 0 &&
444                     (dev = __dev_get_by_name(net, p.name)) != NULL) {
445                         dev->flags |= IFF_MULTICAST;
446                         if (!ipmr_init_vif_indev(dev))
447                                 goto failure;
448                         if (dev_open(dev))
449                                 goto failure;
450                         dev_hold(dev);
451                 }
452         }
453         return dev;
454
455 failure:
456         /* allow the register to be completed before unregistering. */
457         rtnl_unlock();
458         rtnl_lock();
459
460         unregister_netdevice(dev);
461         return NULL;
462 }
463
464 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
465 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
466 {
467         struct net *net = dev_net(dev);
468         struct mr_table *mrt;
469         struct flowi4 fl4 = {
470                 .flowi4_oif     = dev->ifindex,
471                 .flowi4_iif     = skb->skb_iif ? : LOOPBACK_IFINDEX,
472                 .flowi4_mark    = skb->mark,
473         };
474         int err;
475
476         err = ipmr_fib_lookup(net, &fl4, &mrt);
477         if (err < 0) {
478                 kfree_skb(skb);
479                 return err;
480         }
481
482         read_lock(&mrt_lock);
483         dev->stats.tx_bytes += skb->len;
484         dev->stats.tx_packets++;
485         ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
486         read_unlock(&mrt_lock);
487         kfree_skb(skb);
488         return NETDEV_TX_OK;
489 }
490
491 static int reg_vif_get_iflink(const struct net_device *dev)
492 {
493         return 0;
494 }
495
496 static const struct net_device_ops reg_vif_netdev_ops = {
497         .ndo_start_xmit = reg_vif_xmit,
498         .ndo_get_iflink = reg_vif_get_iflink,
499 };
500
501 static void reg_vif_setup(struct net_device *dev)
502 {
503         dev->type               = ARPHRD_PIMREG;
504         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
505         dev->flags              = IFF_NOARP;
506         dev->netdev_ops         = &reg_vif_netdev_ops;
507         dev->destructor         = free_netdev;
508         dev->features           |= NETIF_F_NETNS_LOCAL;
509 }
510
511 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
512 {
513         struct net_device *dev;
514         char name[IFNAMSIZ];
515
516         if (mrt->id == RT_TABLE_DEFAULT)
517                 sprintf(name, "pimreg");
518         else
519                 sprintf(name, "pimreg%u", mrt->id);
520
521         dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
522
523         if (!dev)
524                 return NULL;
525
526         dev_net_set(dev, net);
527
528         if (register_netdevice(dev)) {
529                 free_netdev(dev);
530                 return NULL;
531         }
532
533         if (!ipmr_init_vif_indev(dev))
534                 goto failure;
535         if (dev_open(dev))
536                 goto failure;
537
538         dev_hold(dev);
539
540         return dev;
541
542 failure:
543         /* allow the register to be completed before unregistering. */
544         rtnl_unlock();
545         rtnl_lock();
546
547         unregister_netdevice(dev);
548         return NULL;
549 }
550
551 /* called with rcu_read_lock() */
552 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
553                      unsigned int pimlen)
554 {
555         struct net_device *reg_dev = NULL;
556         struct iphdr *encap;
557
558         encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
559         /* Check that:
560          * a. packet is really sent to a multicast group
561          * b. packet is not a NULL-REGISTER
562          * c. packet is not truncated
563          */
564         if (!ipv4_is_multicast(encap->daddr) ||
565             encap->tot_len == 0 ||
566             ntohs(encap->tot_len) + pimlen > skb->len)
567                 return 1;
568
569         read_lock(&mrt_lock);
570         if (mrt->mroute_reg_vif_num >= 0)
571                 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
572         read_unlock(&mrt_lock);
573
574         if (!reg_dev)
575                 return 1;
576
577         skb->mac_header = skb->network_header;
578         skb_pull(skb, (u8 *)encap - skb->data);
579         skb_reset_network_header(skb);
580         skb->protocol = htons(ETH_P_IP);
581         skb->ip_summed = CHECKSUM_NONE;
582
583         skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
584
585         netif_rx(skb);
586
587         return NET_RX_SUCCESS;
588 }
589 #else
590 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
591 {
592         return NULL;
593 }
594 #endif
595
596 /**
597  *      vif_delete - Delete a VIF entry
598  *      @notify: Set to 1, if the caller is a notifier_call
599  */
600 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
601                       struct list_head *head)
602 {
603         struct vif_device *v;
604         struct net_device *dev;
605         struct in_device *in_dev;
606
607         if (vifi < 0 || vifi >= mrt->maxvif)
608                 return -EADDRNOTAVAIL;
609
610         v = &mrt->vif_table[vifi];
611
612         write_lock_bh(&mrt_lock);
613         dev = v->dev;
614         v->dev = NULL;
615
616         if (!dev) {
617                 write_unlock_bh(&mrt_lock);
618                 return -EADDRNOTAVAIL;
619         }
620
621         if (vifi == mrt->mroute_reg_vif_num)
622                 mrt->mroute_reg_vif_num = -1;
623
624         if (vifi + 1 == mrt->maxvif) {
625                 int tmp;
626
627                 for (tmp = vifi - 1; tmp >= 0; tmp--) {
628                         if (VIF_EXISTS(mrt, tmp))
629                                 break;
630                 }
631                 mrt->maxvif = tmp+1;
632         }
633
634         write_unlock_bh(&mrt_lock);
635
636         dev_set_allmulti(dev, -1);
637
638         in_dev = __in_dev_get_rtnl(dev);
639         if (in_dev) {
640                 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
641                 inet_netconf_notify_devconf(dev_net(dev),
642                                             NETCONFA_MC_FORWARDING,
643                                             dev->ifindex, &in_dev->cnf);
644                 ip_rt_multicast_event(in_dev);
645         }
646
647         if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
648                 unregister_netdevice_queue(dev, head);
649
650         dev_put(dev);
651         return 0;
652 }
653
654 static void ipmr_cache_free_rcu(struct rcu_head *head)
655 {
656         struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
657
658         kmem_cache_free(mrt_cachep, c);
659 }
660
661 static inline void ipmr_cache_free(struct mfc_cache *c)
662 {
663         call_rcu(&c->rcu, ipmr_cache_free_rcu);
664 }
665
666 /* Destroy an unresolved cache entry, killing queued skbs
667  * and reporting error to netlink readers.
668  */
669 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
670 {
671         struct net *net = read_pnet(&mrt->net);
672         struct sk_buff *skb;
673         struct nlmsgerr *e;
674
675         atomic_dec(&mrt->cache_resolve_queue_len);
676
677         while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
678                 if (ip_hdr(skb)->version == 0) {
679                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
680                         nlh->nlmsg_type = NLMSG_ERROR;
681                         nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
682                         skb_trim(skb, nlh->nlmsg_len);
683                         e = nlmsg_data(nlh);
684                         e->error = -ETIMEDOUT;
685                         memset(&e->msg, 0, sizeof(e->msg));
686
687                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
688                 } else {
689                         kfree_skb(skb);
690                 }
691         }
692
693         ipmr_cache_free(c);
694 }
695
696 /* Timer process for the unresolved queue. */
697 static void ipmr_expire_process(unsigned long arg)
698 {
699         struct mr_table *mrt = (struct mr_table *)arg;
700         unsigned long now;
701         unsigned long expires;
702         struct mfc_cache *c, *next;
703
704         if (!spin_trylock(&mfc_unres_lock)) {
705                 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
706                 return;
707         }
708
709         if (list_empty(&mrt->mfc_unres_queue))
710                 goto out;
711
712         now = jiffies;
713         expires = 10*HZ;
714
715         list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
716                 if (time_after(c->mfc_un.unres.expires, now)) {
717                         unsigned long interval = c->mfc_un.unres.expires - now;
718                         if (interval < expires)
719                                 expires = interval;
720                         continue;
721                 }
722
723                 list_del(&c->list);
724                 mroute_netlink_event(mrt, c, RTM_DELROUTE);
725                 ipmr_destroy_unres(mrt, c);
726         }
727
728         if (!list_empty(&mrt->mfc_unres_queue))
729                 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
730
731 out:
732         spin_unlock(&mfc_unres_lock);
733 }
734
735 /* Fill oifs list. It is called under write locked mrt_lock. */
736 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
737                                    unsigned char *ttls)
738 {
739         int vifi;
740
741         cache->mfc_un.res.minvif = MAXVIFS;
742         cache->mfc_un.res.maxvif = 0;
743         memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
744
745         for (vifi = 0; vifi < mrt->maxvif; vifi++) {
746                 if (VIF_EXISTS(mrt, vifi) &&
747                     ttls[vifi] && ttls[vifi] < 255) {
748                         cache->mfc_un.res.ttls[vifi] = ttls[vifi];
749                         if (cache->mfc_un.res.minvif > vifi)
750                                 cache->mfc_un.res.minvif = vifi;
751                         if (cache->mfc_un.res.maxvif <= vifi)
752                                 cache->mfc_un.res.maxvif = vifi + 1;
753                 }
754         }
755 }
756
757 static int vif_add(struct net *net, struct mr_table *mrt,
758                    struct vifctl *vifc, int mrtsock)
759 {
760         int vifi = vifc->vifc_vifi;
761         struct vif_device *v = &mrt->vif_table[vifi];
762         struct net_device *dev;
763         struct in_device *in_dev;
764         int err;
765
766         /* Is vif busy ? */
767         if (VIF_EXISTS(mrt, vifi))
768                 return -EADDRINUSE;
769
770         switch (vifc->vifc_flags) {
771         case VIFF_REGISTER:
772                 if (!pimsm_enabled())
773                         return -EINVAL;
774                 /* Special Purpose VIF in PIM
775                  * All the packets will be sent to the daemon
776                  */
777                 if (mrt->mroute_reg_vif_num >= 0)
778                         return -EADDRINUSE;
779                 dev = ipmr_reg_vif(net, mrt);
780                 if (!dev)
781                         return -ENOBUFS;
782                 err = dev_set_allmulti(dev, 1);
783                 if (err) {
784                         unregister_netdevice(dev);
785                         dev_put(dev);
786                         return err;
787                 }
788                 break;
789         case VIFF_TUNNEL:
790                 dev = ipmr_new_tunnel(net, vifc);
791                 if (!dev)
792                         return -ENOBUFS;
793                 err = dev_set_allmulti(dev, 1);
794                 if (err) {
795                         ipmr_del_tunnel(dev, vifc);
796                         dev_put(dev);
797                         return err;
798                 }
799                 break;
800         case VIFF_USE_IFINDEX:
801         case 0:
802                 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
803                         dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
804                         if (dev && !__in_dev_get_rtnl(dev)) {
805                                 dev_put(dev);
806                                 return -EADDRNOTAVAIL;
807                         }
808                 } else {
809                         dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
810                 }
811                 if (!dev)
812                         return -EADDRNOTAVAIL;
813                 err = dev_set_allmulti(dev, 1);
814                 if (err) {
815                         dev_put(dev);
816                         return err;
817                 }
818                 break;
819         default:
820                 return -EINVAL;
821         }
822
823         in_dev = __in_dev_get_rtnl(dev);
824         if (!in_dev) {
825                 dev_put(dev);
826                 return -EADDRNOTAVAIL;
827         }
828         IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
829         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
830                                     &in_dev->cnf);
831         ip_rt_multicast_event(in_dev);
832
833         /* Fill in the VIF structures */
834
835         v->rate_limit = vifc->vifc_rate_limit;
836         v->local = vifc->vifc_lcl_addr.s_addr;
837         v->remote = vifc->vifc_rmt_addr.s_addr;
838         v->flags = vifc->vifc_flags;
839         if (!mrtsock)
840                 v->flags |= VIFF_STATIC;
841         v->threshold = vifc->vifc_threshold;
842         v->bytes_in = 0;
843         v->bytes_out = 0;
844         v->pkt_in = 0;
845         v->pkt_out = 0;
846         v->link = dev->ifindex;
847         if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
848                 v->link = dev_get_iflink(dev);
849
850         /* And finish update writing critical data */
851         write_lock_bh(&mrt_lock);
852         v->dev = dev;
853         if (v->flags & VIFF_REGISTER)
854                 mrt->mroute_reg_vif_num = vifi;
855         if (vifi+1 > mrt->maxvif)
856                 mrt->maxvif = vifi+1;
857         write_unlock_bh(&mrt_lock);
858         return 0;
859 }
860
861 /* called with rcu_read_lock() */
862 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
863                                          __be32 origin,
864                                          __be32 mcastgrp)
865 {
866         int line = MFC_HASH(mcastgrp, origin);
867         struct mfc_cache *c;
868
869         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
870                 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
871                         return c;
872         }
873         return NULL;
874 }
875
876 /* Look for a (*,*,oif) entry */
877 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
878                                                     int vifi)
879 {
880         int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
881         struct mfc_cache *c;
882
883         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
884                 if (c->mfc_origin == htonl(INADDR_ANY) &&
885                     c->mfc_mcastgrp == htonl(INADDR_ANY) &&
886                     c->mfc_un.res.ttls[vifi] < 255)
887                         return c;
888
889         return NULL;
890 }
891
892 /* Look for a (*,G) entry */
893 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
894                                              __be32 mcastgrp, int vifi)
895 {
896         int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
897         struct mfc_cache *c, *proxy;
898
899         if (mcastgrp == htonl(INADDR_ANY))
900                 goto skip;
901
902         list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
903                 if (c->mfc_origin == htonl(INADDR_ANY) &&
904                     c->mfc_mcastgrp == mcastgrp) {
905                         if (c->mfc_un.res.ttls[vifi] < 255)
906                                 return c;
907
908                         /* It's ok if the vifi is part of the static tree */
909                         proxy = ipmr_cache_find_any_parent(mrt,
910                                                            c->mfc_parent);
911                         if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
912                                 return c;
913                 }
914
915 skip:
916         return ipmr_cache_find_any_parent(mrt, vifi);
917 }
918
919 /* Allocate a multicast cache entry */
920 static struct mfc_cache *ipmr_cache_alloc(void)
921 {
922         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
923
924         if (c)
925                 c->mfc_un.res.minvif = MAXVIFS;
926         return c;
927 }
928
929 static struct mfc_cache *ipmr_cache_alloc_unres(void)
930 {
931         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
932
933         if (c) {
934                 skb_queue_head_init(&c->mfc_un.unres.unresolved);
935                 c->mfc_un.unres.expires = jiffies + 10*HZ;
936         }
937         return c;
938 }
939
940 /* A cache entry has gone into a resolved state from queued */
941 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
942                                struct mfc_cache *uc, struct mfc_cache *c)
943 {
944         struct sk_buff *skb;
945         struct nlmsgerr *e;
946
947         /* Play the pending entries through our router */
948         while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
949                 if (ip_hdr(skb)->version == 0) {
950                         struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
951
952                         if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
953                                 nlh->nlmsg_len = skb_tail_pointer(skb) -
954                                                  (u8 *)nlh;
955                         } else {
956                                 nlh->nlmsg_type = NLMSG_ERROR;
957                                 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
958                                 skb_trim(skb, nlh->nlmsg_len);
959                                 e = nlmsg_data(nlh);
960                                 e->error = -EMSGSIZE;
961                                 memset(&e->msg, 0, sizeof(e->msg));
962                         }
963
964                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
965                 } else {
966                         ip_mr_forward(net, mrt, skb, c, 0);
967                 }
968         }
969 }
970
971 /* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
972  * expects the following bizarre scheme.
973  *
974  * Called under mrt_lock.
975  */
976 static int ipmr_cache_report(struct mr_table *mrt,
977                              struct sk_buff *pkt, vifi_t vifi, int assert)
978 {
979         const int ihl = ip_hdrlen(pkt);
980         struct sock *mroute_sk;
981         struct igmphdr *igmp;
982         struct igmpmsg *msg;
983         struct sk_buff *skb;
984         int ret;
985
986         if (assert == IGMPMSG_WHOLEPKT)
987                 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
988         else
989                 skb = alloc_skb(128, GFP_ATOMIC);
990
991         if (!skb)
992                 return -ENOBUFS;
993
994         if (assert == IGMPMSG_WHOLEPKT) {
995                 /* Ugly, but we have no choice with this interface.
996                  * Duplicate old header, fix ihl, length etc.
997                  * And all this only to mangle msg->im_msgtype and
998                  * to set msg->im_mbz to "mbz" :-)
999                  */
1000                 skb_push(skb, sizeof(struct iphdr));
1001                 skb_reset_network_header(skb);
1002                 skb_reset_transport_header(skb);
1003                 msg = (struct igmpmsg *)skb_network_header(skb);
1004                 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1005                 msg->im_msgtype = IGMPMSG_WHOLEPKT;
1006                 msg->im_mbz = 0;
1007                 msg->im_vif = mrt->mroute_reg_vif_num;
1008                 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1009                 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1010                                              sizeof(struct iphdr));
1011         } else {
1012                 /* Copy the IP header */
1013                 skb_set_network_header(skb, skb->len);
1014                 skb_put(skb, ihl);
1015                 skb_copy_to_linear_data(skb, pkt->data, ihl);
1016                 /* Flag to the kernel this is a route add */
1017                 ip_hdr(skb)->protocol = 0;
1018                 msg = (struct igmpmsg *)skb_network_header(skb);
1019                 msg->im_vif = vifi;
1020                 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1021                 /* Add our header */
1022                 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1023                 igmp->type = assert;
1024                 msg->im_msgtype = assert;
1025                 igmp->code = 0;
1026                 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1027                 skb->transport_header = skb->network_header;
1028         }
1029
1030         rcu_read_lock();
1031         mroute_sk = rcu_dereference(mrt->mroute_sk);
1032         if (!mroute_sk) {
1033                 rcu_read_unlock();
1034                 kfree_skb(skb);
1035                 return -EINVAL;
1036         }
1037
1038         /* Deliver to mrouted */
1039         ret = sock_queue_rcv_skb(mroute_sk, skb);
1040         rcu_read_unlock();
1041         if (ret < 0) {
1042                 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1043                 kfree_skb(skb);
1044         }
1045
1046         return ret;
1047 }
1048
1049 /* Queue a packet for resolution. It gets locked cache entry! */
1050 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1051                                  struct sk_buff *skb)
1052 {
1053         bool found = false;
1054         int err;
1055         struct mfc_cache *c;
1056         const struct iphdr *iph = ip_hdr(skb);
1057
1058         spin_lock_bh(&mfc_unres_lock);
1059         list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1060                 if (c->mfc_mcastgrp == iph->daddr &&
1061                     c->mfc_origin == iph->saddr) {
1062                         found = true;
1063                         break;
1064                 }
1065         }
1066
1067         if (!found) {
1068                 /* Create a new entry if allowable */
1069                 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1070                     (c = ipmr_cache_alloc_unres()) == NULL) {
1071                         spin_unlock_bh(&mfc_unres_lock);
1072
1073                         kfree_skb(skb);
1074                         return -ENOBUFS;
1075                 }
1076
1077                 /* Fill in the new cache entry */
1078                 c->mfc_parent   = -1;
1079                 c->mfc_origin   = iph->saddr;
1080                 c->mfc_mcastgrp = iph->daddr;
1081
1082                 /* Reflect first query at mrouted. */
1083                 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1084                 if (err < 0) {
1085                         /* If the report failed throw the cache entry
1086                            out - Brad Parker
1087                          */
1088                         spin_unlock_bh(&mfc_unres_lock);
1089
1090                         ipmr_cache_free(c);
1091                         kfree_skb(skb);
1092                         return err;
1093                 }
1094
1095                 atomic_inc(&mrt->cache_resolve_queue_len);
1096                 list_add(&c->list, &mrt->mfc_unres_queue);
1097                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1098
1099                 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1100                         mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1101         }
1102
1103         /* See if we can append the packet */
1104         if (c->mfc_un.unres.unresolved.qlen > 3) {
1105                 kfree_skb(skb);
1106                 err = -ENOBUFS;
1107         } else {
1108                 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1109                 err = 0;
1110         }
1111
1112         spin_unlock_bh(&mfc_unres_lock);
1113         return err;
1114 }
1115
1116 /* MFC cache manipulation by user space mroute daemon */
1117
1118 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1119 {
1120         int line;
1121         struct mfc_cache *c, *next;
1122
1123         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1124
1125         list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1126                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1127                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1128                     (parent == -1 || parent == c->mfc_parent)) {
1129                         list_del_rcu(&c->list);
1130                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1131                         ipmr_cache_free(c);
1132                         return 0;
1133                 }
1134         }
1135         return -ENOENT;
1136 }
1137
1138 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1139                         struct mfcctl *mfc, int mrtsock, int parent)
1140 {
1141         bool found = false;
1142         int line;
1143         struct mfc_cache *uc, *c;
1144
1145         if (mfc->mfcc_parent >= MAXVIFS)
1146                 return -ENFILE;
1147
1148         line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1149
1150         list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1151                 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1152                     c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1153                     (parent == -1 || parent == c->mfc_parent)) {
1154                         found = true;
1155                         break;
1156                 }
1157         }
1158
1159         if (found) {
1160                 write_lock_bh(&mrt_lock);
1161                 c->mfc_parent = mfc->mfcc_parent;
1162                 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1163                 if (!mrtsock)
1164                         c->mfc_flags |= MFC_STATIC;
1165                 write_unlock_bh(&mrt_lock);
1166                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1167                 return 0;
1168         }
1169
1170         if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1171             !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1172                 return -EINVAL;
1173
1174         c = ipmr_cache_alloc();
1175         if (!c)
1176                 return -ENOMEM;
1177
1178         c->mfc_origin = mfc->mfcc_origin.s_addr;
1179         c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1180         c->mfc_parent = mfc->mfcc_parent;
1181         ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1182         if (!mrtsock)
1183                 c->mfc_flags |= MFC_STATIC;
1184
1185         list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1186
1187         /* Check to see if we resolved a queued list. If so we
1188          * need to send on the frames and tidy up.
1189          */
1190         found = false;
1191         spin_lock_bh(&mfc_unres_lock);
1192         list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1193                 if (uc->mfc_origin == c->mfc_origin &&
1194                     uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1195                         list_del(&uc->list);
1196                         atomic_dec(&mrt->cache_resolve_queue_len);
1197                         found = true;
1198                         break;
1199                 }
1200         }
1201         if (list_empty(&mrt->mfc_unres_queue))
1202                 del_timer(&mrt->ipmr_expire_timer);
1203         spin_unlock_bh(&mfc_unres_lock);
1204
1205         if (found) {
1206                 ipmr_cache_resolve(net, mrt, uc, c);
1207                 ipmr_cache_free(uc);
1208         }
1209         mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1210         return 0;
1211 }
1212
1213 /* Close the multicast socket, and clear the vif tables etc */
1214 static void mroute_clean_tables(struct mr_table *mrt)
1215 {
1216         int i;
1217         LIST_HEAD(list);
1218         struct mfc_cache *c, *next;
1219
1220         /* Shut down all active vif entries */
1221         for (i = 0; i < mrt->maxvif; i++) {
1222                 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1223                         vif_delete(mrt, i, 0, &list);
1224         }
1225         unregister_netdevice_many(&list);
1226
1227         /* Wipe the cache */
1228         for (i = 0; i < MFC_LINES; i++) {
1229                 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1230                         if (c->mfc_flags & MFC_STATIC)
1231                                 continue;
1232                         list_del_rcu(&c->list);
1233                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1234                         ipmr_cache_free(c);
1235                 }
1236         }
1237
1238         if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1239                 spin_lock_bh(&mfc_unres_lock);
1240                 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1241                         list_del(&c->list);
1242                         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1243                         ipmr_destroy_unres(mrt, c);
1244                 }
1245                 spin_unlock_bh(&mfc_unres_lock);
1246         }
1247 }
1248
1249 /* called from ip_ra_control(), before an RCU grace period,
1250  * we dont need to call synchronize_rcu() here
1251  */
1252 static void mrtsock_destruct(struct sock *sk)
1253 {
1254         struct net *net = sock_net(sk);
1255         struct mr_table *mrt;
1256
1257         rtnl_lock();
1258         ipmr_for_each_table(mrt, net) {
1259                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1260                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1261                         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1262                                                     NETCONFA_IFINDEX_ALL,
1263                                                     net->ipv4.devconf_all);
1264                         RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1265                         mroute_clean_tables(mrt);
1266                 }
1267         }
1268         rtnl_unlock();
1269 }
1270
1271 /* Socket options and virtual interface manipulation. The whole
1272  * virtual interface system is a complete heap, but unfortunately
1273  * that's how BSD mrouted happens to think. Maybe one day with a proper
1274  * MOSPF/PIM router set up we can clean this up.
1275  */
1276
1277 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1278                          unsigned int optlen)
1279 {
1280         struct net *net = sock_net(sk);
1281         int val, ret = 0, parent = 0;
1282         struct mr_table *mrt;
1283         struct vifctl vif;
1284         struct mfcctl mfc;
1285         u32 uval;
1286
1287         /* There's one exception to the lock - MRT_DONE which needs to unlock */
1288         rtnl_lock();
1289         if (sk->sk_type != SOCK_RAW ||
1290             inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1291                 ret = -EOPNOTSUPP;
1292                 goto out_unlock;
1293         }
1294
1295         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1296         if (!mrt) {
1297                 ret = -ENOENT;
1298                 goto out_unlock;
1299         }
1300         if (optname != MRT_INIT) {
1301                 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1302                     !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1303                         ret = -EACCES;
1304                         goto out_unlock;
1305                 }
1306         }
1307
1308         switch (optname) {
1309         case MRT_INIT:
1310                 if (optlen != sizeof(int))
1311                         ret = -EINVAL;
1312                 if (rtnl_dereference(mrt->mroute_sk))
1313                         ret = -EADDRINUSE;
1314                 if (ret)
1315                         break;
1316
1317                 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1318                 if (ret == 0) {
1319                         rcu_assign_pointer(mrt->mroute_sk, sk);
1320                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1321                         inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1322                                                     NETCONFA_IFINDEX_ALL,
1323                                                     net->ipv4.devconf_all);
1324                 }
1325                 break;
1326         case MRT_DONE:
1327                 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1328                         ret = -EACCES;
1329                 } else {
1330                         /* We need to unlock here because mrtsock_destruct takes
1331                          * care of rtnl itself and we can't change that due to
1332                          * the IP_ROUTER_ALERT setsockopt which runs without it.
1333                          */
1334                         rtnl_unlock();
1335                         ret = ip_ra_control(sk, 0, NULL);
1336                         goto out;
1337                 }
1338                 break;
1339         case MRT_ADD_VIF:
1340         case MRT_DEL_VIF:
1341                 if (optlen != sizeof(vif)) {
1342                         ret = -EINVAL;
1343                         break;
1344                 }
1345                 if (copy_from_user(&vif, optval, sizeof(vif))) {
1346                         ret = -EFAULT;
1347                         break;
1348                 }
1349                 if (vif.vifc_vifi >= MAXVIFS) {
1350                         ret = -ENFILE;
1351                         break;
1352                 }
1353                 if (optname == MRT_ADD_VIF) {
1354                         ret = vif_add(net, mrt, &vif,
1355                                       sk == rtnl_dereference(mrt->mroute_sk));
1356                 } else {
1357                         ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1358                 }
1359                 break;
1360         /* Manipulate the forwarding caches. These live
1361          * in a sort of kernel/user symbiosis.
1362          */
1363         case MRT_ADD_MFC:
1364         case MRT_DEL_MFC:
1365                 parent = -1;
1366         case MRT_ADD_MFC_PROXY:
1367         case MRT_DEL_MFC_PROXY:
1368                 if (optlen != sizeof(mfc)) {
1369                         ret = -EINVAL;
1370                         break;
1371                 }
1372                 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1373                         ret = -EFAULT;
1374                         break;
1375                 }
1376                 if (parent == 0)
1377                         parent = mfc.mfcc_parent;
1378                 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1379                         ret = ipmr_mfc_delete(mrt, &mfc, parent);
1380                 else
1381                         ret = ipmr_mfc_add(net, mrt, &mfc,
1382                                            sk == rtnl_dereference(mrt->mroute_sk),
1383                                            parent);
1384                 break;
1385         /* Control PIM assert. */
1386         case MRT_ASSERT:
1387                 if (optlen != sizeof(val)) {
1388                         ret = -EINVAL;
1389                         break;
1390                 }
1391                 if (get_user(val, (int __user *)optval)) {
1392                         ret = -EFAULT;
1393                         break;
1394                 }
1395                 mrt->mroute_do_assert = val;
1396                 break;
1397         case MRT_PIM:
1398                 if (!pimsm_enabled()) {
1399                         ret = -ENOPROTOOPT;
1400                         break;
1401                 }
1402                 if (optlen != sizeof(val)) {
1403                         ret = -EINVAL;
1404                         break;
1405                 }
1406                 if (get_user(val, (int __user *)optval)) {
1407                         ret = -EFAULT;
1408                         break;
1409                 }
1410
1411                 val = !!val;
1412                 if (val != mrt->mroute_do_pim) {
1413                         mrt->mroute_do_pim = val;
1414                         mrt->mroute_do_assert = val;
1415                 }
1416                 break;
1417         case MRT_TABLE:
1418                 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1419                         ret = -ENOPROTOOPT;
1420                         break;
1421                 }
1422                 if (optlen != sizeof(uval)) {
1423                         ret = -EINVAL;
1424                         break;
1425                 }
1426                 if (get_user(uval, (u32 __user *)optval)) {
1427                         ret = -EFAULT;
1428                         break;
1429                 }
1430
1431                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1432                         ret = -EBUSY;
1433                 } else {
1434                         mrt = ipmr_new_table(net, uval);
1435                         if (IS_ERR(mrt))
1436                                 ret = PTR_ERR(mrt);
1437                         else
1438                                 raw_sk(sk)->ipmr_table = uval;
1439                 }
1440                 break;
1441         /* Spurious command, or MRT_VERSION which you cannot set. */
1442         default:
1443                 ret = -ENOPROTOOPT;
1444         }
1445 out_unlock:
1446         rtnl_unlock();
1447 out:
1448         return ret;
1449 }
1450
1451 /* Getsock opt support for the multicast routing system. */
1452 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1453 {
1454         int olr;
1455         int val;
1456         struct net *net = sock_net(sk);
1457         struct mr_table *mrt;
1458
1459         if (sk->sk_type != SOCK_RAW ||
1460             inet_sk(sk)->inet_num != IPPROTO_IGMP)
1461                 return -EOPNOTSUPP;
1462
1463         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1464         if (!mrt)
1465                 return -ENOENT;
1466
1467         switch (optname) {
1468         case MRT_VERSION:
1469                 val = 0x0305;
1470                 break;
1471         case MRT_PIM:
1472                 if (!pimsm_enabled())
1473                         return -ENOPROTOOPT;
1474                 val = mrt->mroute_do_pim;
1475                 break;
1476         case MRT_ASSERT:
1477                 val = mrt->mroute_do_assert;
1478                 break;
1479         default:
1480                 return -ENOPROTOOPT;
1481         }
1482
1483         if (get_user(olr, optlen))
1484                 return -EFAULT;
1485         olr = min_t(unsigned int, olr, sizeof(int));
1486         if (olr < 0)
1487                 return -EINVAL;
1488         if (put_user(olr, optlen))
1489                 return -EFAULT;
1490         if (copy_to_user(optval, &val, olr))
1491                 return -EFAULT;
1492         return 0;
1493 }
1494
1495 /* The IP multicast ioctl support routines. */
1496 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1497 {
1498         struct sioc_sg_req sr;
1499         struct sioc_vif_req vr;
1500         struct vif_device *vif;
1501         struct mfc_cache *c;
1502         struct net *net = sock_net(sk);
1503         struct mr_table *mrt;
1504
1505         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1506         if (!mrt)
1507                 return -ENOENT;
1508
1509         switch (cmd) {
1510         case SIOCGETVIFCNT:
1511                 if (copy_from_user(&vr, arg, sizeof(vr)))
1512                         return -EFAULT;
1513                 if (vr.vifi >= mrt->maxvif)
1514                         return -EINVAL;
1515                 read_lock(&mrt_lock);
1516                 vif = &mrt->vif_table[vr.vifi];
1517                 if (VIF_EXISTS(mrt, vr.vifi)) {
1518                         vr.icount = vif->pkt_in;
1519                         vr.ocount = vif->pkt_out;
1520                         vr.ibytes = vif->bytes_in;
1521                         vr.obytes = vif->bytes_out;
1522                         read_unlock(&mrt_lock);
1523
1524                         if (copy_to_user(arg, &vr, sizeof(vr)))
1525                                 return -EFAULT;
1526                         return 0;
1527                 }
1528                 read_unlock(&mrt_lock);
1529                 return -EADDRNOTAVAIL;
1530         case SIOCGETSGCNT:
1531                 if (copy_from_user(&sr, arg, sizeof(sr)))
1532                         return -EFAULT;
1533
1534                 rcu_read_lock();
1535                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1536                 if (c) {
1537                         sr.pktcnt = c->mfc_un.res.pkt;
1538                         sr.bytecnt = c->mfc_un.res.bytes;
1539                         sr.wrong_if = c->mfc_un.res.wrong_if;
1540                         rcu_read_unlock();
1541
1542                         if (copy_to_user(arg, &sr, sizeof(sr)))
1543                                 return -EFAULT;
1544                         return 0;
1545                 }
1546                 rcu_read_unlock();
1547                 return -EADDRNOTAVAIL;
1548         default:
1549                 return -ENOIOCTLCMD;
1550         }
1551 }
1552
1553 #ifdef CONFIG_COMPAT
1554 struct compat_sioc_sg_req {
1555         struct in_addr src;
1556         struct in_addr grp;
1557         compat_ulong_t pktcnt;
1558         compat_ulong_t bytecnt;
1559         compat_ulong_t wrong_if;
1560 };
1561
1562 struct compat_sioc_vif_req {
1563         vifi_t  vifi;           /* Which iface */
1564         compat_ulong_t icount;
1565         compat_ulong_t ocount;
1566         compat_ulong_t ibytes;
1567         compat_ulong_t obytes;
1568 };
1569
1570 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1571 {
1572         struct compat_sioc_sg_req sr;
1573         struct compat_sioc_vif_req vr;
1574         struct vif_device *vif;
1575         struct mfc_cache *c;
1576         struct net *net = sock_net(sk);
1577         struct mr_table *mrt;
1578
1579         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1580         if (!mrt)
1581                 return -ENOENT;
1582
1583         switch (cmd) {
1584         case SIOCGETVIFCNT:
1585                 if (copy_from_user(&vr, arg, sizeof(vr)))
1586                         return -EFAULT;
1587                 if (vr.vifi >= mrt->maxvif)
1588                         return -EINVAL;
1589                 read_lock(&mrt_lock);
1590                 vif = &mrt->vif_table[vr.vifi];
1591                 if (VIF_EXISTS(mrt, vr.vifi)) {
1592                         vr.icount = vif->pkt_in;
1593                         vr.ocount = vif->pkt_out;
1594                         vr.ibytes = vif->bytes_in;
1595                         vr.obytes = vif->bytes_out;
1596                         read_unlock(&mrt_lock);
1597
1598                         if (copy_to_user(arg, &vr, sizeof(vr)))
1599                                 return -EFAULT;
1600                         return 0;
1601                 }
1602                 read_unlock(&mrt_lock);
1603                 return -EADDRNOTAVAIL;
1604         case SIOCGETSGCNT:
1605                 if (copy_from_user(&sr, arg, sizeof(sr)))
1606                         return -EFAULT;
1607
1608                 rcu_read_lock();
1609                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1610                 if (c) {
1611                         sr.pktcnt = c->mfc_un.res.pkt;
1612                         sr.bytecnt = c->mfc_un.res.bytes;
1613                         sr.wrong_if = c->mfc_un.res.wrong_if;
1614                         rcu_read_unlock();
1615
1616                         if (copy_to_user(arg, &sr, sizeof(sr)))
1617                                 return -EFAULT;
1618                         return 0;
1619                 }
1620                 rcu_read_unlock();
1621                 return -EADDRNOTAVAIL;
1622         default:
1623                 return -ENOIOCTLCMD;
1624         }
1625 }
1626 #endif
1627
1628 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1629 {
1630         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1631         struct net *net = dev_net(dev);
1632         struct mr_table *mrt;
1633         struct vif_device *v;
1634         int ct;
1635
1636         if (event != NETDEV_UNREGISTER)
1637                 return NOTIFY_DONE;
1638
1639         ipmr_for_each_table(mrt, net) {
1640                 v = &mrt->vif_table[0];
1641                 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1642                         if (v->dev == dev)
1643                                 vif_delete(mrt, ct, 1, NULL);
1644                 }
1645         }
1646         return NOTIFY_DONE;
1647 }
1648
1649 static struct notifier_block ip_mr_notifier = {
1650         .notifier_call = ipmr_device_event,
1651 };
1652
1653 /* Encapsulate a packet by attaching a valid IPIP header to it.
1654  * This avoids tunnel drivers and other mess and gives us the speed so
1655  * important for multicast video.
1656  */
1657 static void ip_encap(struct net *net, struct sk_buff *skb,
1658                      __be32 saddr, __be32 daddr)
1659 {
1660         struct iphdr *iph;
1661         const struct iphdr *old_iph = ip_hdr(skb);
1662
1663         skb_push(skb, sizeof(struct iphdr));
1664         skb->transport_header = skb->network_header;
1665         skb_reset_network_header(skb);
1666         iph = ip_hdr(skb);
1667
1668         iph->version    =       4;
1669         iph->tos        =       old_iph->tos;
1670         iph->ttl        =       old_iph->ttl;
1671         iph->frag_off   =       0;
1672         iph->daddr      =       daddr;
1673         iph->saddr      =       saddr;
1674         iph->protocol   =       IPPROTO_IPIP;
1675         iph->ihl        =       5;
1676         iph->tot_len    =       htons(skb->len);
1677         ip_select_ident(net, skb, NULL);
1678         ip_send_check(iph);
1679
1680         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1681         nf_reset(skb);
1682 }
1683
1684 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1685                                       struct sk_buff *skb)
1686 {
1687         struct ip_options *opt = &(IPCB(skb)->opt);
1688
1689         IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1690         IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1691
1692         if (unlikely(opt->optlen))
1693                 ip_forward_options(skb);
1694
1695         return dst_output(net, sk, skb);
1696 }
1697
1698 /* Processing handlers for ipmr_forward */
1699
1700 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1701                             struct sk_buff *skb, struct mfc_cache *c, int vifi)
1702 {
1703         const struct iphdr *iph = ip_hdr(skb);
1704         struct vif_device *vif = &mrt->vif_table[vifi];
1705         struct net_device *dev;
1706         struct rtable *rt;
1707         struct flowi4 fl4;
1708         int    encap = 0;
1709
1710         if (!vif->dev)
1711                 goto out_free;
1712
1713         if (vif->flags & VIFF_REGISTER) {
1714                 vif->pkt_out++;
1715                 vif->bytes_out += skb->len;
1716                 vif->dev->stats.tx_bytes += skb->len;
1717                 vif->dev->stats.tx_packets++;
1718                 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1719                 goto out_free;
1720         }
1721
1722         if (vif->flags & VIFF_TUNNEL) {
1723                 rt = ip_route_output_ports(net, &fl4, NULL,
1724                                            vif->remote, vif->local,
1725                                            0, 0,
1726                                            IPPROTO_IPIP,
1727                                            RT_TOS(iph->tos), vif->link);
1728                 if (IS_ERR(rt))
1729                         goto out_free;
1730                 encap = sizeof(struct iphdr);
1731         } else {
1732                 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1733                                            0, 0,
1734                                            IPPROTO_IPIP,
1735                                            RT_TOS(iph->tos), vif->link);
1736                 if (IS_ERR(rt))
1737                         goto out_free;
1738         }
1739
1740         dev = rt->dst.dev;
1741
1742         if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1743                 /* Do not fragment multicasts. Alas, IPv4 does not
1744                  * allow to send ICMP, so that packets will disappear
1745                  * to blackhole.
1746                  */
1747                 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1748                 ip_rt_put(rt);
1749                 goto out_free;
1750         }
1751
1752         encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1753
1754         if (skb_cow(skb, encap)) {
1755                 ip_rt_put(rt);
1756                 goto out_free;
1757         }
1758
1759         vif->pkt_out++;
1760         vif->bytes_out += skb->len;
1761
1762         skb_dst_drop(skb);
1763         skb_dst_set(skb, &rt->dst);
1764         ip_decrease_ttl(ip_hdr(skb));
1765
1766         /* FIXME: forward and output firewalls used to be called here.
1767          * What do we do with netfilter? -- RR
1768          */
1769         if (vif->flags & VIFF_TUNNEL) {
1770                 ip_encap(net, skb, vif->local, vif->remote);
1771                 /* FIXME: extra output firewall step used to be here. --RR */
1772                 vif->dev->stats.tx_packets++;
1773                 vif->dev->stats.tx_bytes += skb->len;
1774         }
1775
1776         IPCB(skb)->flags |= IPSKB_FORWARDED;
1777
1778         /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1779          * not only before forwarding, but after forwarding on all output
1780          * interfaces. It is clear, if mrouter runs a multicasting
1781          * program, it should receive packets not depending to what interface
1782          * program is joined.
1783          * If we will not make it, the program will have to join on all
1784          * interfaces. On the other hand, multihoming host (or router, but
1785          * not mrouter) cannot join to more than one interface - it will
1786          * result in receiving multiple packets.
1787          */
1788         NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1789                 net, NULL, skb, skb->dev, dev,
1790                 ipmr_forward_finish);
1791         return;
1792
1793 out_free:
1794         kfree_skb(skb);
1795 }
1796
1797 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1798 {
1799         int ct;
1800
1801         for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1802                 if (mrt->vif_table[ct].dev == dev)
1803                         break;
1804         }
1805         return ct;
1806 }
1807
1808 /* "local" means that we should preserve one skb (for local delivery) */
1809 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1810                           struct sk_buff *skb, struct mfc_cache *cache,
1811                           int local)
1812 {
1813         int psend = -1;
1814         int vif, ct;
1815         int true_vifi = ipmr_find_vif(mrt, skb->dev);
1816
1817         vif = cache->mfc_parent;
1818         cache->mfc_un.res.pkt++;
1819         cache->mfc_un.res.bytes += skb->len;
1820
1821         if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1822                 struct mfc_cache *cache_proxy;
1823
1824                 /* For an (*,G) entry, we only check that the incomming
1825                  * interface is part of the static tree.
1826                  */
1827                 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1828                 if (cache_proxy &&
1829                     cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1830                         goto forward;
1831         }
1832
1833         /* Wrong interface: drop packet and (maybe) send PIM assert. */
1834         if (mrt->vif_table[vif].dev != skb->dev) {
1835                 if (rt_is_output_route(skb_rtable(skb))) {
1836                         /* It is our own packet, looped back.
1837                          * Very complicated situation...
1838                          *
1839                          * The best workaround until routing daemons will be
1840                          * fixed is not to redistribute packet, if it was
1841                          * send through wrong interface. It means, that
1842                          * multicast applications WILL NOT work for
1843                          * (S,G), which have default multicast route pointing
1844                          * to wrong oif. In any case, it is not a good
1845                          * idea to use multicasting applications on router.
1846                          */
1847                         goto dont_forward;
1848                 }
1849
1850                 cache->mfc_un.res.wrong_if++;
1851
1852                 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1853                     /* pimsm uses asserts, when switching from RPT to SPT,
1854                      * so that we cannot check that packet arrived on an oif.
1855                      * It is bad, but otherwise we would need to move pretty
1856                      * large chunk of pimd to kernel. Ough... --ANK
1857                      */
1858                     (mrt->mroute_do_pim ||
1859                      cache->mfc_un.res.ttls[true_vifi] < 255) &&
1860                     time_after(jiffies,
1861                                cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1862                         cache->mfc_un.res.last_assert = jiffies;
1863                         ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1864                 }
1865                 goto dont_forward;
1866         }
1867
1868 forward:
1869         mrt->vif_table[vif].pkt_in++;
1870         mrt->vif_table[vif].bytes_in += skb->len;
1871
1872         /* Forward the frame */
1873         if (cache->mfc_origin == htonl(INADDR_ANY) &&
1874             cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1875                 if (true_vifi >= 0 &&
1876                     true_vifi != cache->mfc_parent &&
1877                     ip_hdr(skb)->ttl >
1878                                 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1879                         /* It's an (*,*) entry and the packet is not coming from
1880                          * the upstream: forward the packet to the upstream
1881                          * only.
1882                          */
1883                         psend = cache->mfc_parent;
1884                         goto last_forward;
1885                 }
1886                 goto dont_forward;
1887         }
1888         for (ct = cache->mfc_un.res.maxvif - 1;
1889              ct >= cache->mfc_un.res.minvif; ct--) {
1890                 /* For (*,G) entry, don't forward to the incoming interface */
1891                 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1892                      ct != true_vifi) &&
1893                     ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1894                         if (psend != -1) {
1895                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1896
1897                                 if (skb2)
1898                                         ipmr_queue_xmit(net, mrt, skb2, cache,
1899                                                         psend);
1900                         }
1901                         psend = ct;
1902                 }
1903         }
1904 last_forward:
1905         if (psend != -1) {
1906                 if (local) {
1907                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1908
1909                         if (skb2)
1910                                 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1911                 } else {
1912                         ipmr_queue_xmit(net, mrt, skb, cache, psend);
1913                         return;
1914                 }
1915         }
1916
1917 dont_forward:
1918         if (!local)
1919                 kfree_skb(skb);
1920 }
1921
1922 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1923 {
1924         struct rtable *rt = skb_rtable(skb);
1925         struct iphdr *iph = ip_hdr(skb);
1926         struct flowi4 fl4 = {
1927                 .daddr = iph->daddr,
1928                 .saddr = iph->saddr,
1929                 .flowi4_tos = RT_TOS(iph->tos),
1930                 .flowi4_oif = (rt_is_output_route(rt) ?
1931                                skb->dev->ifindex : 0),
1932                 .flowi4_iif = (rt_is_output_route(rt) ?
1933                                LOOPBACK_IFINDEX :
1934                                skb->dev->ifindex),
1935                 .flowi4_mark = skb->mark,
1936         };
1937         struct mr_table *mrt;
1938         int err;
1939
1940         err = ipmr_fib_lookup(net, &fl4, &mrt);
1941         if (err)
1942                 return ERR_PTR(err);
1943         return mrt;
1944 }
1945
1946 /* Multicast packets for forwarding arrive here
1947  * Called with rcu_read_lock();
1948  */
1949 int ip_mr_input(struct sk_buff *skb)
1950 {
1951         struct mfc_cache *cache;
1952         struct net *net = dev_net(skb->dev);
1953         int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1954         struct mr_table *mrt;
1955
1956         /* Packet is looped back after forward, it should not be
1957          * forwarded second time, but still can be delivered locally.
1958          */
1959         if (IPCB(skb)->flags & IPSKB_FORWARDED)
1960                 goto dont_forward;
1961
1962         mrt = ipmr_rt_fib_lookup(net, skb);
1963         if (IS_ERR(mrt)) {
1964                 kfree_skb(skb);
1965                 return PTR_ERR(mrt);
1966         }
1967         if (!local) {
1968                 if (IPCB(skb)->opt.router_alert) {
1969                         if (ip_call_ra_chain(skb))
1970                                 return 0;
1971                 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1972                         /* IGMPv1 (and broken IGMPv2 implementations sort of
1973                          * Cisco IOS <= 11.2(8)) do not put router alert
1974                          * option to IGMP packets destined to routable
1975                          * groups. It is very bad, because it means
1976                          * that we can forward NO IGMP messages.
1977                          */
1978                         struct sock *mroute_sk;
1979
1980                         mroute_sk = rcu_dereference(mrt->mroute_sk);
1981                         if (mroute_sk) {
1982                                 nf_reset(skb);
1983                                 raw_rcv(mroute_sk, skb);
1984                                 return 0;
1985                         }
1986                     }
1987         }
1988
1989         /* already under rcu_read_lock() */
1990         cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1991         if (!cache) {
1992                 int vif = ipmr_find_vif(mrt, skb->dev);
1993
1994                 if (vif >= 0)
1995                         cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1996                                                     vif);
1997         }
1998
1999         /* No usable cache entry */
2000         if (!cache) {
2001                 int vif;
2002
2003                 if (local) {
2004                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2005                         ip_local_deliver(skb);
2006                         if (!skb2)
2007                                 return -ENOBUFS;
2008                         skb = skb2;
2009                 }
2010
2011                 read_lock(&mrt_lock);
2012                 vif = ipmr_find_vif(mrt, skb->dev);
2013                 if (vif >= 0) {
2014                         int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2015                         read_unlock(&mrt_lock);
2016
2017                         return err2;
2018                 }
2019                 read_unlock(&mrt_lock);
2020                 kfree_skb(skb);
2021                 return -ENODEV;
2022         }
2023
2024         read_lock(&mrt_lock);
2025         ip_mr_forward(net, mrt, skb, cache, local);
2026         read_unlock(&mrt_lock);
2027
2028         if (local)
2029                 return ip_local_deliver(skb);
2030
2031         return 0;
2032
2033 dont_forward:
2034         if (local)
2035                 return ip_local_deliver(skb);
2036         kfree_skb(skb);
2037         return 0;
2038 }
2039
2040 #ifdef CONFIG_IP_PIMSM_V1
2041 /* Handle IGMP messages of PIMv1 */
2042 int pim_rcv_v1(struct sk_buff *skb)
2043 {
2044         struct igmphdr *pim;
2045         struct net *net = dev_net(skb->dev);
2046         struct mr_table *mrt;
2047
2048         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2049                 goto drop;
2050
2051         pim = igmp_hdr(skb);
2052
2053         mrt = ipmr_rt_fib_lookup(net, skb);
2054         if (IS_ERR(mrt))
2055                 goto drop;
2056         if (!mrt->mroute_do_pim ||
2057             pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2058                 goto drop;
2059
2060         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2061 drop:
2062                 kfree_skb(skb);
2063         }
2064         return 0;
2065 }
2066 #endif
2067
2068 #ifdef CONFIG_IP_PIMSM_V2
2069 static int pim_rcv(struct sk_buff *skb)
2070 {
2071         struct pimreghdr *pim;
2072         struct net *net = dev_net(skb->dev);
2073         struct mr_table *mrt;
2074
2075         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2076                 goto drop;
2077
2078         pim = (struct pimreghdr *)skb_transport_header(skb);
2079         if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2080             (pim->flags & PIM_NULL_REGISTER) ||
2081             (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2082              csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2083                 goto drop;
2084
2085         mrt = ipmr_rt_fib_lookup(net, skb);
2086         if (IS_ERR(mrt))
2087                 goto drop;
2088         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2089 drop:
2090                 kfree_skb(skb);
2091         }
2092         return 0;
2093 }
2094 #endif
2095
2096 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2097                               struct mfc_cache *c, struct rtmsg *rtm)
2098 {
2099         int ct;
2100         struct rtnexthop *nhp;
2101         struct nlattr *mp_attr;
2102         struct rta_mfc_stats mfcs;
2103
2104         /* If cache is unresolved, don't try to parse IIF and OIF */
2105         if (c->mfc_parent >= MAXVIFS)
2106                 return -ENOENT;
2107
2108         if (VIF_EXISTS(mrt, c->mfc_parent) &&
2109             nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2110                 return -EMSGSIZE;
2111
2112         if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2113                 return -EMSGSIZE;
2114
2115         for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2116                 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2117                         if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2118                                 nla_nest_cancel(skb, mp_attr);
2119                                 return -EMSGSIZE;
2120                         }
2121
2122                         nhp->rtnh_flags = 0;
2123                         nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2124                         nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2125                         nhp->rtnh_len = sizeof(*nhp);
2126                 }
2127         }
2128
2129         nla_nest_end(skb, mp_attr);
2130
2131         mfcs.mfcs_packets = c->mfc_un.res.pkt;
2132         mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2133         mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2134         if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2135                 return -EMSGSIZE;
2136
2137         rtm->rtm_type = RTN_MULTICAST;
2138         return 1;
2139 }
2140
2141 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2142                    __be32 saddr, __be32 daddr,
2143                    struct rtmsg *rtm, int nowait)
2144 {
2145         struct mfc_cache *cache;
2146         struct mr_table *mrt;
2147         int err;
2148
2149         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2150         if (!mrt)
2151                 return -ENOENT;
2152
2153         rcu_read_lock();
2154         cache = ipmr_cache_find(mrt, saddr, daddr);
2155         if (!cache && skb->dev) {
2156                 int vif = ipmr_find_vif(mrt, skb->dev);
2157
2158                 if (vif >= 0)
2159                         cache = ipmr_cache_find_any(mrt, daddr, vif);
2160         }
2161         if (!cache) {
2162                 struct sk_buff *skb2;
2163                 struct iphdr *iph;
2164                 struct net_device *dev;
2165                 int vif = -1;
2166
2167                 if (nowait) {
2168                         rcu_read_unlock();
2169                         return -EAGAIN;
2170                 }
2171
2172                 dev = skb->dev;
2173                 read_lock(&mrt_lock);
2174                 if (dev)
2175                         vif = ipmr_find_vif(mrt, dev);
2176                 if (vif < 0) {
2177                         read_unlock(&mrt_lock);
2178                         rcu_read_unlock();
2179                         return -ENODEV;
2180                 }
2181                 skb2 = skb_clone(skb, GFP_ATOMIC);
2182                 if (!skb2) {
2183                         read_unlock(&mrt_lock);
2184                         rcu_read_unlock();
2185                         return -ENOMEM;
2186                 }
2187
2188                 skb_push(skb2, sizeof(struct iphdr));
2189                 skb_reset_network_header(skb2);
2190                 iph = ip_hdr(skb2);
2191                 iph->ihl = sizeof(struct iphdr) >> 2;
2192                 iph->saddr = saddr;
2193                 iph->daddr = daddr;
2194                 iph->version = 0;
2195                 err = ipmr_cache_unresolved(mrt, vif, skb2);
2196                 read_unlock(&mrt_lock);
2197                 rcu_read_unlock();
2198                 return err;
2199         }
2200
2201         read_lock(&mrt_lock);
2202         if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2203                 cache->mfc_flags |= MFC_NOTIFY;
2204         err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2205         read_unlock(&mrt_lock);
2206         rcu_read_unlock();
2207         return err;
2208 }
2209
2210 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2211                             u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2212                             int flags)
2213 {
2214         struct nlmsghdr *nlh;
2215         struct rtmsg *rtm;
2216         int err;
2217
2218         nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2219         if (!nlh)
2220                 return -EMSGSIZE;
2221
2222         rtm = nlmsg_data(nlh);
2223         rtm->rtm_family   = RTNL_FAMILY_IPMR;
2224         rtm->rtm_dst_len  = 32;
2225         rtm->rtm_src_len  = 32;
2226         rtm->rtm_tos      = 0;
2227         rtm->rtm_table    = mrt->id;
2228         if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2229                 goto nla_put_failure;
2230         rtm->rtm_type     = RTN_MULTICAST;
2231         rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2232         if (c->mfc_flags & MFC_STATIC)
2233                 rtm->rtm_protocol = RTPROT_STATIC;
2234         else
2235                 rtm->rtm_protocol = RTPROT_MROUTED;
2236         rtm->rtm_flags    = 0;
2237
2238         if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2239             nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2240                 goto nla_put_failure;
2241         err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2242         /* do not break the dump if cache is unresolved */
2243         if (err < 0 && err != -ENOENT)
2244                 goto nla_put_failure;
2245
2246         nlmsg_end(skb, nlh);
2247         return 0;
2248
2249 nla_put_failure:
2250         nlmsg_cancel(skb, nlh);
2251         return -EMSGSIZE;
2252 }
2253
2254 static size_t mroute_msgsize(bool unresolved, int maxvif)
2255 {
2256         size_t len =
2257                 NLMSG_ALIGN(sizeof(struct rtmsg))
2258                 + nla_total_size(4)     /* RTA_TABLE */
2259                 + nla_total_size(4)     /* RTA_SRC */
2260                 + nla_total_size(4)     /* RTA_DST */
2261                 ;
2262
2263         if (!unresolved)
2264                 len = len
2265                       + nla_total_size(4)       /* RTA_IIF */
2266                       + nla_total_size(0)       /* RTA_MULTIPATH */
2267                       + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2268                                                 /* RTA_MFC_STATS */
2269                       + nla_total_size(sizeof(struct rta_mfc_stats))
2270                 ;
2271
2272         return len;
2273 }
2274
2275 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2276                                  int cmd)
2277 {
2278         struct net *net = read_pnet(&mrt->net);
2279         struct sk_buff *skb;
2280         int err = -ENOBUFS;
2281
2282         skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2283                         GFP_ATOMIC);
2284         if (!skb)
2285                 goto errout;
2286
2287         err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2288         if (err < 0)
2289                 goto errout;
2290
2291         rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2292         return;
2293
2294 errout:
2295         kfree_skb(skb);
2296         if (err < 0)
2297                 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2298 }
2299
2300 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2301 {
2302         struct net *net = sock_net(skb->sk);
2303         struct mr_table *mrt;
2304         struct mfc_cache *mfc;
2305         unsigned int t = 0, s_t;
2306         unsigned int h = 0, s_h;
2307         unsigned int e = 0, s_e;
2308
2309         s_t = cb->args[0];
2310         s_h = cb->args[1];
2311         s_e = cb->args[2];
2312
2313         rcu_read_lock();
2314         ipmr_for_each_table(mrt, net) {
2315                 if (t < s_t)
2316                         goto next_table;
2317                 if (t > s_t)
2318                         s_h = 0;
2319                 for (h = s_h; h < MFC_LINES; h++) {
2320                         list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2321                                 if (e < s_e)
2322                                         goto next_entry;
2323                                 if (ipmr_fill_mroute(mrt, skb,
2324                                                      NETLINK_CB(cb->skb).portid,
2325                                                      cb->nlh->nlmsg_seq,
2326                                                      mfc, RTM_NEWROUTE,
2327                                                      NLM_F_MULTI) < 0)
2328                                         goto done;
2329 next_entry:
2330                                 e++;
2331                         }
2332                         e = s_e = 0;
2333                 }
2334                 spin_lock_bh(&mfc_unres_lock);
2335                 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2336                         if (e < s_e)
2337                                 goto next_entry2;
2338                         if (ipmr_fill_mroute(mrt, skb,
2339                                              NETLINK_CB(cb->skb).portid,
2340                                              cb->nlh->nlmsg_seq,
2341                                              mfc, RTM_NEWROUTE,
2342                                              NLM_F_MULTI) < 0) {
2343                                 spin_unlock_bh(&mfc_unres_lock);
2344                                 goto done;
2345                         }
2346 next_entry2:
2347                         e++;
2348                 }
2349                 spin_unlock_bh(&mfc_unres_lock);
2350                 e = s_e = 0;
2351                 s_h = 0;
2352 next_table:
2353                 t++;
2354         }
2355 done:
2356         rcu_read_unlock();
2357
2358         cb->args[2] = e;
2359         cb->args[1] = h;
2360         cb->args[0] = t;
2361
2362         return skb->len;
2363 }
2364
2365 #ifdef CONFIG_PROC_FS
2366 /* The /proc interfaces to multicast routing :
2367  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2368  */
2369 struct ipmr_vif_iter {
2370         struct seq_net_private p;
2371         struct mr_table *mrt;
2372         int ct;
2373 };
2374
2375 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2376                                            struct ipmr_vif_iter *iter,
2377                                            loff_t pos)
2378 {
2379         struct mr_table *mrt = iter->mrt;
2380
2381         for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2382                 if (!VIF_EXISTS(mrt, iter->ct))
2383                         continue;
2384                 if (pos-- == 0)
2385                         return &mrt->vif_table[iter->ct];
2386         }
2387         return NULL;
2388 }
2389
2390 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2391         __acquires(mrt_lock)
2392 {
2393         struct ipmr_vif_iter *iter = seq->private;
2394         struct net *net = seq_file_net(seq);
2395         struct mr_table *mrt;
2396
2397         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2398         if (!mrt)
2399                 return ERR_PTR(-ENOENT);
2400
2401         iter->mrt = mrt;
2402
2403         read_lock(&mrt_lock);
2404         return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2405                 : SEQ_START_TOKEN;
2406 }
2407
2408 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2409 {
2410         struct ipmr_vif_iter *iter = seq->private;
2411         struct net *net = seq_file_net(seq);
2412         struct mr_table *mrt = iter->mrt;
2413
2414         ++*pos;
2415         if (v == SEQ_START_TOKEN)
2416                 return ipmr_vif_seq_idx(net, iter, 0);
2417
2418         while (++iter->ct < mrt->maxvif) {
2419                 if (!VIF_EXISTS(mrt, iter->ct))
2420                         continue;
2421                 return &mrt->vif_table[iter->ct];
2422         }
2423         return NULL;
2424 }
2425
2426 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2427         __releases(mrt_lock)
2428 {
2429         read_unlock(&mrt_lock);
2430 }
2431
2432 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2433 {
2434         struct ipmr_vif_iter *iter = seq->private;
2435         struct mr_table *mrt = iter->mrt;
2436
2437         if (v == SEQ_START_TOKEN) {
2438                 seq_puts(seq,
2439                          "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2440         } else {
2441                 const struct vif_device *vif = v;
2442                 const char *name =  vif->dev ? vif->dev->name : "none";
2443
2444                 seq_printf(seq,
2445                            "%2Zd %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2446                            vif - mrt->vif_table,
2447                            name, vif->bytes_in, vif->pkt_in,
2448                            vif->bytes_out, vif->pkt_out,
2449                            vif->flags, vif->local, vif->remote);
2450         }
2451         return 0;
2452 }
2453
2454 static const struct seq_operations ipmr_vif_seq_ops = {
2455         .start = ipmr_vif_seq_start,
2456         .next  = ipmr_vif_seq_next,
2457         .stop  = ipmr_vif_seq_stop,
2458         .show  = ipmr_vif_seq_show,
2459 };
2460
2461 static int ipmr_vif_open(struct inode *inode, struct file *file)
2462 {
2463         return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2464                             sizeof(struct ipmr_vif_iter));
2465 }
2466
2467 static const struct file_operations ipmr_vif_fops = {
2468         .owner   = THIS_MODULE,
2469         .open    = ipmr_vif_open,
2470         .read    = seq_read,
2471         .llseek  = seq_lseek,
2472         .release = seq_release_net,
2473 };
2474
2475 struct ipmr_mfc_iter {
2476         struct seq_net_private p;
2477         struct mr_table *mrt;
2478         struct list_head *cache;
2479         int ct;
2480 };
2481
2482
2483 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2484                                           struct ipmr_mfc_iter *it, loff_t pos)
2485 {
2486         struct mr_table *mrt = it->mrt;
2487         struct mfc_cache *mfc;
2488
2489         rcu_read_lock();
2490         for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2491                 it->cache = &mrt->mfc_cache_array[it->ct];
2492                 list_for_each_entry_rcu(mfc, it->cache, list)
2493                         if (pos-- == 0)
2494                                 return mfc;
2495         }
2496         rcu_read_unlock();
2497
2498         spin_lock_bh(&mfc_unres_lock);
2499         it->cache = &mrt->mfc_unres_queue;
2500         list_for_each_entry(mfc, it->cache, list)
2501                 if (pos-- == 0)
2502                         return mfc;
2503         spin_unlock_bh(&mfc_unres_lock);
2504
2505         it->cache = NULL;
2506         return NULL;
2507 }
2508
2509
2510 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2511 {
2512         struct ipmr_mfc_iter *it = seq->private;
2513         struct net *net = seq_file_net(seq);
2514         struct mr_table *mrt;
2515
2516         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2517         if (!mrt)
2518                 return ERR_PTR(-ENOENT);
2519
2520         it->mrt = mrt;
2521         it->cache = NULL;
2522         it->ct = 0;
2523         return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2524                 : SEQ_START_TOKEN;
2525 }
2526
2527 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2528 {
2529         struct mfc_cache *mfc = v;
2530         struct ipmr_mfc_iter *it = seq->private;
2531         struct net *net = seq_file_net(seq);
2532         struct mr_table *mrt = it->mrt;
2533
2534         ++*pos;
2535
2536         if (v == SEQ_START_TOKEN)
2537                 return ipmr_mfc_seq_idx(net, seq->private, 0);
2538
2539         if (mfc->list.next != it->cache)
2540                 return list_entry(mfc->list.next, struct mfc_cache, list);
2541
2542         if (it->cache == &mrt->mfc_unres_queue)
2543                 goto end_of_list;
2544
2545         BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2546
2547         while (++it->ct < MFC_LINES) {
2548                 it->cache = &mrt->mfc_cache_array[it->ct];
2549                 if (list_empty(it->cache))
2550                         continue;
2551                 return list_first_entry(it->cache, struct mfc_cache, list);
2552         }
2553
2554         /* exhausted cache_array, show unresolved */
2555         rcu_read_unlock();
2556         it->cache = &mrt->mfc_unres_queue;
2557         it->ct = 0;
2558
2559         spin_lock_bh(&mfc_unres_lock);
2560         if (!list_empty(it->cache))
2561                 return list_first_entry(it->cache, struct mfc_cache, list);
2562
2563 end_of_list:
2564         spin_unlock_bh(&mfc_unres_lock);
2565         it->cache = NULL;
2566
2567         return NULL;
2568 }
2569
2570 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2571 {
2572         struct ipmr_mfc_iter *it = seq->private;
2573         struct mr_table *mrt = it->mrt;
2574
2575         if (it->cache == &mrt->mfc_unres_queue)
2576                 spin_unlock_bh(&mfc_unres_lock);
2577         else if (it->cache == &mrt->mfc_cache_array[it->ct])
2578                 rcu_read_unlock();
2579 }
2580
2581 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2582 {
2583         int n;
2584
2585         if (v == SEQ_START_TOKEN) {
2586                 seq_puts(seq,
2587                  "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2588         } else {
2589                 const struct mfc_cache *mfc = v;
2590                 const struct ipmr_mfc_iter *it = seq->private;
2591                 const struct mr_table *mrt = it->mrt;
2592
2593                 seq_printf(seq, "%08X %08X %-3hd",
2594                            (__force u32) mfc->mfc_mcastgrp,
2595                            (__force u32) mfc->mfc_origin,
2596                            mfc->mfc_parent);
2597
2598                 if (it->cache != &mrt->mfc_unres_queue) {
2599                         seq_printf(seq, " %8lu %8lu %8lu",
2600                                    mfc->mfc_un.res.pkt,
2601                                    mfc->mfc_un.res.bytes,
2602                                    mfc->mfc_un.res.wrong_if);
2603                         for (n = mfc->mfc_un.res.minvif;
2604                              n < mfc->mfc_un.res.maxvif; n++) {
2605                                 if (VIF_EXISTS(mrt, n) &&
2606                                     mfc->mfc_un.res.ttls[n] < 255)
2607                                         seq_printf(seq,
2608                                            " %2d:%-3d",
2609                                            n, mfc->mfc_un.res.ttls[n]);
2610                         }
2611                 } else {
2612                         /* unresolved mfc_caches don't contain
2613                          * pkt, bytes and wrong_if values
2614                          */
2615                         seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2616                 }
2617                 seq_putc(seq, '\n');
2618         }
2619         return 0;
2620 }
2621
2622 static const struct seq_operations ipmr_mfc_seq_ops = {
2623         .start = ipmr_mfc_seq_start,
2624         .next  = ipmr_mfc_seq_next,
2625         .stop  = ipmr_mfc_seq_stop,
2626         .show  = ipmr_mfc_seq_show,
2627 };
2628
2629 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2630 {
2631         return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2632                             sizeof(struct ipmr_mfc_iter));
2633 }
2634
2635 static const struct file_operations ipmr_mfc_fops = {
2636         .owner   = THIS_MODULE,
2637         .open    = ipmr_mfc_open,
2638         .read    = seq_read,
2639         .llseek  = seq_lseek,
2640         .release = seq_release_net,
2641 };
2642 #endif
2643
2644 #ifdef CONFIG_IP_PIMSM_V2
2645 static const struct net_protocol pim_protocol = {
2646         .handler        =       pim_rcv,
2647         .netns_ok       =       1,
2648 };
2649 #endif
2650
2651 /* Setup for IP multicast routing */
2652 static int __net_init ipmr_net_init(struct net *net)
2653 {
2654         int err;
2655
2656         err = ipmr_rules_init(net);
2657         if (err < 0)
2658                 goto fail;
2659
2660 #ifdef CONFIG_PROC_FS
2661         err = -ENOMEM;
2662         if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2663                 goto proc_vif_fail;
2664         if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2665                 goto proc_cache_fail;
2666 #endif
2667         return 0;
2668
2669 #ifdef CONFIG_PROC_FS
2670 proc_cache_fail:
2671         remove_proc_entry("ip_mr_vif", net->proc_net);
2672 proc_vif_fail:
2673         ipmr_rules_exit(net);
2674 #endif
2675 fail:
2676         return err;
2677 }
2678
2679 static void __net_exit ipmr_net_exit(struct net *net)
2680 {
2681 #ifdef CONFIG_PROC_FS
2682         remove_proc_entry("ip_mr_cache", net->proc_net);
2683         remove_proc_entry("ip_mr_vif", net->proc_net);
2684 #endif
2685         ipmr_rules_exit(net);
2686 }
2687
2688 static struct pernet_operations ipmr_net_ops = {
2689         .init = ipmr_net_init,
2690         .exit = ipmr_net_exit,
2691 };
2692
2693 int __init ip_mr_init(void)
2694 {
2695         int err;
2696
2697         mrt_cachep = kmem_cache_create("ip_mrt_cache",
2698                                        sizeof(struct mfc_cache),
2699                                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2700                                        NULL);
2701
2702         err = register_pernet_subsys(&ipmr_net_ops);
2703         if (err)
2704                 goto reg_pernet_fail;
2705
2706         err = register_netdevice_notifier(&ip_mr_notifier);
2707         if (err)
2708                 goto reg_notif_fail;
2709 #ifdef CONFIG_IP_PIMSM_V2
2710         if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2711                 pr_err("%s: can't add PIM protocol\n", __func__);
2712                 err = -EAGAIN;
2713                 goto add_proto_fail;
2714         }
2715 #endif
2716         rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2717                       NULL, ipmr_rtm_dumproute, NULL);
2718         return 0;
2719
2720 #ifdef CONFIG_IP_PIMSM_V2
2721 add_proto_fail:
2722         unregister_netdevice_notifier(&ip_mr_notifier);
2723 #endif
2724 reg_notif_fail:
2725         unregister_pernet_subsys(&ipmr_net_ops);
2726 reg_pernet_fail:
2727         kmem_cache_destroy(mrt_cachep);
2728         return err;
2729 }