2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <net/protocol.h>
37 #include <linux/skbuff.h>
40 #include <linux/notifier.h>
41 #include <linux/if_arp.h>
42 #include <net/checksum.h>
43 #include <net/netlink.h>
46 #include <net/ip6_route.h>
47 #include <linux/mroute6.h>
48 #include <linux/pim.h>
49 #include <net/addrconf.h>
50 #include <linux/netfilter_ipv6.h>
52 struct sock *mroute6_socket;
55 /* Big lock, protecting vif table, mrt cache and mroute socket state.
56 Note that the changes are semaphored via rtnl_lock.
59 static DEFINE_RWLOCK(mrt_lock);
62 * Multicast router control variables
65 static struct mif_device vif6_table[MAXMIFS]; /* Devices */
68 #define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
70 static int mroute_do_assert; /* Set in PIM assert */
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 static int mroute_do_pim;
74 #define mroute_do_pim 0
77 static struct mfc6_cache *mfc6_cache_array[MFC6_LINES]; /* Forwarding cache */
79 static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
80 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
82 /* Special spinlock for queue of unresolved entries */
83 static DEFINE_SPINLOCK(mfc_unres_lock);
85 /* We return to original Alan's scheme. Hash table of resolved
86 entries is changed only in process context and protected
87 with weak lock mrt_lock. Queue of unresolved entries is protected
88 with strong spinlock mfc_unres_lock.
90 In this case data path is free of exclusive locks at all.
93 static struct kmem_cache *mrt_cachep __read_mostly;
95 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
96 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert);
97 static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
99 #ifdef CONFIG_IPV6_PIMSM_V2
100 static struct inet6_protocol pim6_protocol;
103 static struct timer_list ipmr_expire_timer;
106 #ifdef CONFIG_PROC_FS
108 struct ipmr_mfc_iter {
109 struct mfc6_cache **cache;
114 static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
116 struct mfc6_cache *mfc;
118 it->cache = mfc6_cache_array;
119 read_lock(&mrt_lock);
120 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
121 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
124 read_unlock(&mrt_lock);
126 it->cache = &mfc_unres_queue;
127 spin_lock_bh(&mfc_unres_lock);
128 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
131 spin_unlock_bh(&mfc_unres_lock);
141 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
144 struct ipmr_vif_iter {
148 static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
151 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
152 if (!MIF_EXISTS(iter->ct))
155 return &vif6_table[iter->ct];
160 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
163 read_lock(&mrt_lock);
164 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
168 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
170 struct ipmr_vif_iter *iter = seq->private;
173 if (v == SEQ_START_TOKEN)
174 return ip6mr_vif_seq_idx(iter, 0);
176 while (++iter->ct < maxvif) {
177 if (!MIF_EXISTS(iter->ct))
179 return &vif6_table[iter->ct];
184 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
187 read_unlock(&mrt_lock);
190 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
192 if (v == SEQ_START_TOKEN) {
194 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
196 const struct mif_device *vif = v;
197 const char *name = vif->dev ? vif->dev->name : "none";
200 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
202 name, vif->bytes_in, vif->pkt_in,
203 vif->bytes_out, vif->pkt_out,
209 static struct seq_operations ip6mr_vif_seq_ops = {
210 .start = ip6mr_vif_seq_start,
211 .next = ip6mr_vif_seq_next,
212 .stop = ip6mr_vif_seq_stop,
213 .show = ip6mr_vif_seq_show,
216 static int ip6mr_vif_open(struct inode *inode, struct file *file)
218 return seq_open_private(file, &ip6mr_vif_seq_ops,
219 sizeof(struct ipmr_vif_iter));
222 static struct file_operations ip6mr_vif_fops = {
223 .owner = THIS_MODULE,
224 .open = ip6mr_vif_open,
227 .release = seq_release,
230 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
232 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
236 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
243 if (v == SEQ_START_TOKEN)
244 return ipmr_mfc_seq_idx(seq->private, 0);
249 if (it->cache == &mfc_unres_queue)
252 BUG_ON(it->cache != mfc6_cache_array);
254 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
255 mfc = mfc6_cache_array[it->ct];
260 /* exhausted cache_array, show unresolved */
261 read_unlock(&mrt_lock);
262 it->cache = &mfc_unres_queue;
265 spin_lock_bh(&mfc_unres_lock);
266 mfc = mfc_unres_queue;
271 spin_unlock_bh(&mfc_unres_lock);
277 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
279 struct ipmr_mfc_iter *it = seq->private;
281 if (it->cache == &mfc_unres_queue)
282 spin_unlock_bh(&mfc_unres_lock);
283 else if (it->cache == mfc6_cache_array)
284 read_unlock(&mrt_lock);
287 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
291 if (v == SEQ_START_TOKEN) {
295 "Iif Pkts Bytes Wrong Oifs\n");
297 const struct mfc6_cache *mfc = v;
298 const struct ipmr_mfc_iter *it = seq->private;
301 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
302 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
305 mfc->mfc_un.res.bytes,
306 mfc->mfc_un.res.wrong_if);
308 if (it->cache != &mfc_unres_queue) {
309 for (n = mfc->mfc_un.res.minvif;
310 n < mfc->mfc_un.res.maxvif; n++) {
312 mfc->mfc_un.res.ttls[n] < 255)
315 n, mfc->mfc_un.res.ttls[n]);
323 static struct seq_operations ipmr_mfc_seq_ops = {
324 .start = ipmr_mfc_seq_start,
325 .next = ipmr_mfc_seq_next,
326 .stop = ipmr_mfc_seq_stop,
327 .show = ipmr_mfc_seq_show,
330 static int ipmr_mfc_open(struct inode *inode, struct file *file)
332 return seq_open_private(file, &ipmr_mfc_seq_ops,
333 sizeof(struct ipmr_mfc_iter));
336 static struct file_operations ip6mr_mfc_fops = {
337 .owner = THIS_MODULE,
338 .open = ipmr_mfc_open,
341 .release = seq_release,
345 #ifdef CONFIG_IPV6_PIMSM_V2
346 static int reg_vif_num = -1;
348 static int pim6_rcv(struct sk_buff *skb)
350 struct pimreghdr *pim;
351 struct ipv6hdr *encap;
352 struct net_device *reg_dev = NULL;
354 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
357 pim = (struct pimreghdr *)skb_transport_header(skb);
358 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
359 (pim->flags & PIM_NULL_REGISTER) ||
360 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
361 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
364 /* check if the inner packet is destined to mcast group */
365 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
368 if (!ipv6_addr_is_multicast(&encap->daddr) ||
369 encap->payload_len == 0 ||
370 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
373 read_lock(&mrt_lock);
374 if (reg_vif_num >= 0)
375 reg_dev = vif6_table[reg_vif_num].dev;
378 read_unlock(&mrt_lock);
383 skb->mac_header = skb->network_header;
384 skb_pull(skb, (u8 *)encap - skb->data);
385 skb_reset_network_header(skb);
387 skb->protocol = htons(ETH_P_IP);
389 skb->pkt_type = PACKET_HOST;
390 dst_release(skb->dst);
391 reg_dev->stats.rx_bytes += skb->len;
392 reg_dev->stats.rx_packets++;
403 static struct inet6_protocol pim6_protocol = {
407 /* Service routines creating virtual interfaces: PIMREG */
409 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
411 read_lock(&mrt_lock);
412 dev->stats.tx_bytes += skb->len;
413 dev->stats.tx_packets++;
414 ip6mr_cache_report(skb, reg_vif_num, MRT6MSG_WHOLEPKT);
415 read_unlock(&mrt_lock);
420 static void reg_vif_setup(struct net_device *dev)
422 dev->type = ARPHRD_PIMREG;
423 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
424 dev->flags = IFF_NOARP;
425 dev->hard_start_xmit = reg_vif_xmit;
426 dev->destructor = free_netdev;
429 static struct net_device *ip6mr_reg_vif(void)
431 struct net_device *dev;
433 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
437 if (register_netdevice(dev)) {
449 /* allow the register to be completed before unregistering. */
453 unregister_netdevice(dev);
462 static int mif6_delete(int vifi)
464 struct mif_device *v;
465 struct net_device *dev;
466 if (vifi < 0 || vifi >= maxvif)
467 return -EADDRNOTAVAIL;
469 v = &vif6_table[vifi];
471 write_lock_bh(&mrt_lock);
476 write_unlock_bh(&mrt_lock);
477 return -EADDRNOTAVAIL;
480 #ifdef CONFIG_IPV6_PIMSM_V2
481 if (vifi == reg_vif_num)
485 if (vifi + 1 == maxvif) {
487 for (tmp = vifi - 1; tmp >= 0; tmp--) {
494 write_unlock_bh(&mrt_lock);
496 dev_set_allmulti(dev, -1);
498 if (v->flags & MIFF_REGISTER)
499 unregister_netdevice(dev);
505 /* Destroy an unresolved cache entry, killing queued skbs
506 and reporting error to netlink readers.
509 static void ip6mr_destroy_unres(struct mfc6_cache *c)
513 atomic_dec(&cache_resolve_queue_len);
515 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
516 if (ipv6_hdr(skb)->version == 0) {
517 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
518 nlh->nlmsg_type = NLMSG_ERROR;
519 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
520 skb_trim(skb, nlh->nlmsg_len);
521 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
522 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
527 kmem_cache_free(mrt_cachep, c);
531 /* Single timer process for all the unresolved queue. */
533 static void ipmr_do_expire_process(unsigned long dummy)
535 unsigned long now = jiffies;
536 unsigned long expires = 10 * HZ;
537 struct mfc6_cache *c, **cp;
539 cp = &mfc_unres_queue;
541 while ((c = *cp) != NULL) {
542 if (time_after(c->mfc_un.unres.expires, now)) {
544 unsigned long interval = c->mfc_un.unres.expires - now;
545 if (interval < expires)
552 ip6mr_destroy_unres(c);
555 if (atomic_read(&cache_resolve_queue_len))
556 mod_timer(&ipmr_expire_timer, jiffies + expires);
559 static void ipmr_expire_process(unsigned long dummy)
561 if (!spin_trylock(&mfc_unres_lock)) {
562 mod_timer(&ipmr_expire_timer, jiffies + 1);
566 if (atomic_read(&cache_resolve_queue_len))
567 ipmr_do_expire_process(dummy);
569 spin_unlock(&mfc_unres_lock);
572 /* Fill oifs list. It is called under write locked mrt_lock. */
574 static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
578 cache->mfc_un.res.minvif = MAXMIFS;
579 cache->mfc_un.res.maxvif = 0;
580 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
582 for (vifi = 0; vifi < maxvif; vifi++) {
583 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
584 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
585 if (cache->mfc_un.res.minvif > vifi)
586 cache->mfc_un.res.minvif = vifi;
587 if (cache->mfc_un.res.maxvif <= vifi)
588 cache->mfc_un.res.maxvif = vifi + 1;
593 static int mif6_add(struct mif6ctl *vifc, int mrtsock)
595 int vifi = vifc->mif6c_mifi;
596 struct mif_device *v = &vif6_table[vifi];
597 struct net_device *dev;
600 if (MIF_EXISTS(vifi))
603 switch (vifc->mif6c_flags) {
604 #ifdef CONFIG_IPV6_PIMSM_V2
607 * Special Purpose VIF in PIM
608 * All the packets will be sent to the daemon
610 if (reg_vif_num >= 0)
612 dev = ip6mr_reg_vif();
618 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
620 return -EADDRNOTAVAIL;
627 dev_set_allmulti(dev, 1);
630 * Fill in the VIF structures
632 v->rate_limit = vifc->vifc_rate_limit;
633 v->flags = vifc->mif6c_flags;
635 v->flags |= VIFF_STATIC;
636 v->threshold = vifc->vifc_threshold;
641 v->link = dev->ifindex;
642 if (v->flags & MIFF_REGISTER)
643 v->link = dev->iflink;
645 /* And finish update writing critical data */
646 write_lock_bh(&mrt_lock);
649 #ifdef CONFIG_IPV6_PIMSM_V2
650 if (v->flags & MIFF_REGISTER)
653 if (vifi + 1 > maxvif)
655 write_unlock_bh(&mrt_lock);
659 static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
661 int line = MFC6_HASH(mcastgrp, origin);
662 struct mfc6_cache *c;
664 for (c = mfc6_cache_array[line]; c; c = c->next) {
665 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
666 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
673 * Allocate a multicast cache entry
675 static struct mfc6_cache *ip6mr_cache_alloc(void)
677 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
680 memset(c, 0, sizeof(*c));
681 c->mfc_un.res.minvif = MAXMIFS;
685 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
687 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
690 memset(c, 0, sizeof(*c));
691 skb_queue_head_init(&c->mfc_un.unres.unresolved);
692 c->mfc_un.unres.expires = jiffies + 10 * HZ;
697 * A cache entry has gone into a resolved state from queued
700 static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
705 * Play the pending entries through our router
708 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
709 if (ipv6_hdr(skb)->version == 0) {
711 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
713 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
714 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
716 nlh->nlmsg_type = NLMSG_ERROR;
717 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
718 skb_trim(skb, nlh->nlmsg_len);
719 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
721 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
723 ip6_mr_forward(skb, c);
728 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
729 * expects the following bizarre scheme.
731 * Called under mrt_lock.
734 static int ip6mr_cache_report(struct sk_buff *pkt, mifi_t mifi, int assert)
740 #ifdef CONFIG_IPV6_PIMSM_V2
741 if (assert == MRT6MSG_WHOLEPKT)
742 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
746 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
751 /* I suppose that internal messages
752 * do not require checksums */
754 skb->ip_summed = CHECKSUM_UNNECESSARY;
756 #ifdef CONFIG_IPV6_PIMSM_V2
757 if (assert == MRT6MSG_WHOLEPKT) {
758 /* Ugly, but we have no choice with this interface.
759 Duplicate old header, fix length etc.
760 And all this only to mangle msg->im6_msgtype and
761 to set msg->im6_mbz to "mbz" :-)
763 skb_push(skb, -skb_network_offset(pkt));
765 skb_push(skb, sizeof(*msg));
766 skb_reset_transport_header(skb);
767 msg = (struct mrt6msg *)skb_transport_header(skb);
769 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
770 msg->im6_mif = reg_vif_num;
772 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
773 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
775 skb->ip_summed = CHECKSUM_UNNECESSARY;
783 skb_put(skb, sizeof(struct ipv6hdr));
784 skb_reset_network_header(skb);
785 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
790 skb_put(skb, sizeof(*msg));
791 skb_reset_transport_header(skb);
792 msg = (struct mrt6msg *)skb_transport_header(skb);
795 msg->im6_msgtype = assert;
798 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
799 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
801 skb->dst = dst_clone(pkt->dst);
802 skb->ip_summed = CHECKSUM_UNNECESSARY;
804 skb_pull(skb, sizeof(struct ipv6hdr));
807 if (mroute6_socket == NULL) {
813 * Deliver to user space multicast routing algorithms
815 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
817 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
825 * Queue a packet for resolution. It gets locked cache entry!
829 ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
832 struct mfc6_cache *c;
834 spin_lock_bh(&mfc_unres_lock);
835 for (c = mfc_unres_queue; c; c = c->next) {
836 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
837 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
843 * Create a new entry if allowable
846 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
847 (c = ip6mr_cache_alloc_unres()) == NULL) {
848 spin_unlock_bh(&mfc_unres_lock);
855 * Fill in the new cache entry
858 c->mf6c_origin = ipv6_hdr(skb)->saddr;
859 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
862 * Reflect first query at pim6sd
864 if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
865 /* If the report failed throw the cache entry
868 spin_unlock_bh(&mfc_unres_lock);
870 kmem_cache_free(mrt_cachep, c);
875 atomic_inc(&cache_resolve_queue_len);
876 c->next = mfc_unres_queue;
879 ipmr_do_expire_process(1);
883 * See if we can append the packet
885 if (c->mfc_un.unres.unresolved.qlen > 3) {
889 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
893 spin_unlock_bh(&mfc_unres_lock);
898 * MFC6 cache manipulation by user space
901 static int ip6mr_mfc_delete(struct mf6cctl *mfc)
904 struct mfc6_cache *c, **cp;
906 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
908 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
909 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
910 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
911 write_lock_bh(&mrt_lock);
913 write_unlock_bh(&mrt_lock);
915 kmem_cache_free(mrt_cachep, c);
922 static int ip6mr_device_event(struct notifier_block *this,
923 unsigned long event, void *ptr)
925 struct net_device *dev = ptr;
926 struct mif_device *v;
929 if (dev_net(dev) != &init_net)
932 if (event != NETDEV_UNREGISTER)
936 for (ct = 0; ct < maxvif; ct++, v++) {
943 static struct notifier_block ip6_mr_notifier = {
944 .notifier_call = ip6mr_device_event
948 * Setup for IP multicast routing
951 int __init ip6_mr_init(void)
955 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
956 sizeof(struct mfc6_cache),
957 0, SLAB_HWCACHE_ALIGN,
962 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
963 err = register_netdevice_notifier(&ip6_mr_notifier);
966 #ifdef CONFIG_PROC_FS
968 if (!proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
970 if (!proc_net_fops_create(&init_net, "ip6_mr_cache",
972 goto proc_cache_fail;
976 kmem_cache_destroy(mrt_cachep);
977 #ifdef CONFIG_PROC_FS
979 unregister_netdevice_notifier(&ip6_mr_notifier);
981 proc_net_remove(&init_net, "ip6_mr_vif");
986 void ip6_mr_cleanup(void)
988 #ifdef CONFIG_PROC_FS
989 proc_net_remove(&init_net, "ip6_mr_cache");
990 proc_net_remove(&init_net, "ip6_mr_vif");
992 unregister_netdevice_notifier(&ip6_mr_notifier);
993 del_timer(&ipmr_expire_timer);
994 kmem_cache_destroy(mrt_cachep);
997 static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
1000 struct mfc6_cache *uc, *c, **cp;
1001 unsigned char ttls[MAXMIFS];
1004 memset(ttls, 255, MAXMIFS);
1005 for (i = 0; i < MAXMIFS; i++) {
1006 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1011 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1013 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
1014 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1015 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1020 write_lock_bh(&mrt_lock);
1021 c->mf6c_parent = mfc->mf6cc_parent;
1022 ip6mr_update_thresholds(c, ttls);
1024 c->mfc_flags |= MFC_STATIC;
1025 write_unlock_bh(&mrt_lock);
1029 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1032 c = ip6mr_cache_alloc();
1036 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1037 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1038 c->mf6c_parent = mfc->mf6cc_parent;
1039 ip6mr_update_thresholds(c, ttls);
1041 c->mfc_flags |= MFC_STATIC;
1043 write_lock_bh(&mrt_lock);
1044 c->next = mfc6_cache_array[line];
1045 mfc6_cache_array[line] = c;
1046 write_unlock_bh(&mrt_lock);
1049 * Check to see if we resolved a queued list. If so we
1050 * need to send on the frames and tidy up.
1052 spin_lock_bh(&mfc_unres_lock);
1053 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1055 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1056 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1058 if (atomic_dec_and_test(&cache_resolve_queue_len))
1059 del_timer(&ipmr_expire_timer);
1063 spin_unlock_bh(&mfc_unres_lock);
1066 ip6mr_cache_resolve(uc, c);
1067 kmem_cache_free(mrt_cachep, uc);
1073 * Close the multicast socket, and clear the vif tables etc
1076 static void mroute_clean_tables(struct sock *sk)
1081 * Shut down all active vif entries
1083 for (i = 0; i < maxvif; i++) {
1084 if (!(vif6_table[i].flags & VIFF_STATIC))
1091 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
1092 struct mfc6_cache *c, **cp;
1094 cp = &mfc6_cache_array[i];
1095 while ((c = *cp) != NULL) {
1096 if (c->mfc_flags & MFC_STATIC) {
1100 write_lock_bh(&mrt_lock);
1102 write_unlock_bh(&mrt_lock);
1104 kmem_cache_free(mrt_cachep, c);
1108 if (atomic_read(&cache_resolve_queue_len) != 0) {
1109 struct mfc6_cache *c;
1111 spin_lock_bh(&mfc_unres_lock);
1112 while (mfc_unres_queue != NULL) {
1113 c = mfc_unres_queue;
1114 mfc_unres_queue = c->next;
1115 spin_unlock_bh(&mfc_unres_lock);
1117 ip6mr_destroy_unres(c);
1119 spin_lock_bh(&mfc_unres_lock);
1121 spin_unlock_bh(&mfc_unres_lock);
1125 static int ip6mr_sk_init(struct sock *sk)
1130 write_lock_bh(&mrt_lock);
1131 if (likely(mroute6_socket == NULL))
1132 mroute6_socket = sk;
1135 write_unlock_bh(&mrt_lock);
1142 int ip6mr_sk_done(struct sock *sk)
1147 if (sk == mroute6_socket) {
1148 write_lock_bh(&mrt_lock);
1149 mroute6_socket = NULL;
1150 write_unlock_bh(&mrt_lock);
1152 mroute_clean_tables(sk);
1161 * Socket options and virtual interface manipulation. The whole
1162 * virtual interface system is a complete heap, but unfortunately
1163 * that's how BSD mrouted happens to think. Maybe one day with a proper
1164 * MOSPF/PIM router set up we can clean this up.
1167 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1174 if (optname != MRT6_INIT) {
1175 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
1181 if (sk->sk_type != SOCK_RAW ||
1182 inet_sk(sk)->num != IPPROTO_ICMPV6)
1184 if (optlen < sizeof(int))
1187 return ip6mr_sk_init(sk);
1190 return ip6mr_sk_done(sk);
1193 if (optlen < sizeof(vif))
1195 if (copy_from_user(&vif, optval, sizeof(vif)))
1197 if (vif.mif6c_mifi >= MAXMIFS)
1200 ret = mif6_add(&vif, sk == mroute6_socket);
1205 if (optlen < sizeof(mifi_t))
1207 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1210 ret = mif6_delete(mifi);
1215 * Manipulate the forwarding caches. These live
1216 * in a sort of kernel/user symbiosis.
1220 if (optlen < sizeof(mfc))
1222 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1225 if (optname == MRT6_DEL_MFC)
1226 ret = ip6mr_mfc_delete(&mfc);
1228 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1233 * Control PIM assert (to activate pim will activate assert)
1238 if (get_user(v, (int __user *)optval))
1240 mroute_do_assert = !!v;
1244 #ifdef CONFIG_IPV6_PIMSM_V2
1248 if (get_user(v, (int __user *)optval))
1253 if (v != mroute_do_pim) {
1255 mroute_do_assert = v;
1257 ret = inet6_add_protocol(&pim6_protocol,
1260 ret = inet6_del_protocol(&pim6_protocol,
1271 * Spurious command, or MRT6_VERSION which you cannot
1275 return -ENOPROTOOPT;
1280 * Getsock opt support for the multicast routing system.
1283 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1293 #ifdef CONFIG_IPV6_PIMSM_V2
1295 val = mroute_do_pim;
1299 val = mroute_do_assert;
1302 return -ENOPROTOOPT;
1305 if (get_user(olr, optlen))
1308 olr = min_t(int, olr, sizeof(int));
1312 if (put_user(olr, optlen))
1314 if (copy_to_user(optval, &val, olr))
1320 * The IP multicast ioctl support routines.
1323 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1325 struct sioc_sg_req6 sr;
1326 struct sioc_mif_req6 vr;
1327 struct mif_device *vif;
1328 struct mfc6_cache *c;
1331 case SIOCGETMIFCNT_IN6:
1332 if (copy_from_user(&vr, arg, sizeof(vr)))
1334 if (vr.mifi >= maxvif)
1336 read_lock(&mrt_lock);
1337 vif = &vif6_table[vr.mifi];
1338 if (MIF_EXISTS(vr.mifi)) {
1339 vr.icount = vif->pkt_in;
1340 vr.ocount = vif->pkt_out;
1341 vr.ibytes = vif->bytes_in;
1342 vr.obytes = vif->bytes_out;
1343 read_unlock(&mrt_lock);
1345 if (copy_to_user(arg, &vr, sizeof(vr)))
1349 read_unlock(&mrt_lock);
1350 return -EADDRNOTAVAIL;
1351 case SIOCGETSGCNT_IN6:
1352 if (copy_from_user(&sr, arg, sizeof(sr)))
1355 read_lock(&mrt_lock);
1356 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1358 sr.pktcnt = c->mfc_un.res.pkt;
1359 sr.bytecnt = c->mfc_un.res.bytes;
1360 sr.wrong_if = c->mfc_un.res.wrong_if;
1361 read_unlock(&mrt_lock);
1363 if (copy_to_user(arg, &sr, sizeof(sr)))
1367 read_unlock(&mrt_lock);
1368 return -EADDRNOTAVAIL;
1370 return -ENOIOCTLCMD;
1375 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1377 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
1378 return dst_output(skb);
1382 * Processing handlers for ip6mr_forward
1385 static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1387 struct ipv6hdr *ipv6h;
1388 struct mif_device *vif = &vif6_table[vifi];
1389 struct net_device *dev;
1390 struct dst_entry *dst;
1393 if (vif->dev == NULL)
1396 #ifdef CONFIG_IPV6_PIMSM_V2
1397 if (vif->flags & MIFF_REGISTER) {
1399 vif->bytes_out += skb->len;
1400 vif->dev->stats.tx_bytes += skb->len;
1401 vif->dev->stats.tx_packets++;
1402 ip6mr_cache_report(skb, vifi, MRT6MSG_WHOLEPKT);
1408 ipv6h = ipv6_hdr(skb);
1410 fl = (struct flowi) {
1413 { .daddr = ipv6h->daddr, }
1417 dst = ip6_route_output(&init_net, NULL, &fl);
1421 dst_release(skb->dst);
1425 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1426 * not only before forwarding, but after forwarding on all output
1427 * interfaces. It is clear, if mrouter runs a multicasting
1428 * program, it should receive packets not depending to what interface
1429 * program is joined.
1430 * If we will not make it, the program will have to join on all
1431 * interfaces. On the other hand, multihoming host (or router, but
1432 * not mrouter) cannot join to more than one interface - it will
1433 * result in receiving multiple packets.
1438 vif->bytes_out += skb->len;
1440 /* We are about to write */
1441 /* XXX: extension headers? */
1442 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1445 ipv6h = ipv6_hdr(skb);
1448 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1450 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1451 ip6mr_forward2_finish);
1458 static int ip6mr_find_vif(struct net_device *dev)
1461 for (ct = maxvif - 1; ct >= 0; ct--) {
1462 if (vif6_table[ct].dev == dev)
1468 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1473 vif = cache->mf6c_parent;
1474 cache->mfc_un.res.pkt++;
1475 cache->mfc_un.res.bytes += skb->len;
1478 * Wrong interface: drop packet and (maybe) send PIM assert.
1480 if (vif6_table[vif].dev != skb->dev) {
1483 cache->mfc_un.res.wrong_if++;
1484 true_vifi = ip6mr_find_vif(skb->dev);
1486 if (true_vifi >= 0 && mroute_do_assert &&
1487 /* pimsm uses asserts, when switching from RPT to SPT,
1488 so that we cannot check that packet arrived on an oif.
1489 It is bad, but otherwise we would need to move pretty
1490 large chunk of pimd to kernel. Ough... --ANK
1492 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1494 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1495 cache->mfc_un.res.last_assert = jiffies;
1496 ip6mr_cache_report(skb, true_vifi, MRT6MSG_WRONGMIF);
1501 vif6_table[vif].pkt_in++;
1502 vif6_table[vif].bytes_in += skb->len;
1507 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1508 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1510 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1512 ip6mr_forward2(skb2, cache, psend);
1518 ip6mr_forward2(skb, cache, psend);
1529 * Multicast packets for forwarding arrive here
1532 int ip6_mr_input(struct sk_buff *skb)
1534 struct mfc6_cache *cache;
1536 read_lock(&mrt_lock);
1537 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1540 * No usable cache entry
1542 if (cache == NULL) {
1545 vif = ip6mr_find_vif(skb->dev);
1547 int err = ip6mr_cache_unresolved(vif, skb);
1548 read_unlock(&mrt_lock);
1552 read_unlock(&mrt_lock);
1557 ip6_mr_forward(skb, cache);
1559 read_unlock(&mrt_lock);
1566 ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1569 struct rtnexthop *nhp;
1570 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1571 u8 *b = skb_tail_pointer(skb);
1572 struct rtattr *mp_head;
1575 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1577 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1579 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1580 if (c->mfc_un.res.ttls[ct] < 255) {
1581 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1582 goto rtattr_failure;
1583 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1584 nhp->rtnh_flags = 0;
1585 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1586 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1587 nhp->rtnh_len = sizeof(*nhp);
1590 mp_head->rta_type = RTA_MULTIPATH;
1591 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1592 rtm->rtm_type = RTN_MULTICAST;
1600 int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1603 struct mfc6_cache *cache;
1604 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1606 read_lock(&mrt_lock);
1607 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1610 struct sk_buff *skb2;
1611 struct ipv6hdr *iph;
1612 struct net_device *dev;
1616 read_unlock(&mrt_lock);
1621 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1622 read_unlock(&mrt_lock);
1626 /* really correct? */
1627 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1629 read_unlock(&mrt_lock);
1633 skb_reset_transport_header(skb2);
1635 skb_put(skb2, sizeof(struct ipv6hdr));
1636 skb_reset_network_header(skb2);
1638 iph = ipv6_hdr(skb2);
1641 iph->flow_lbl[0] = 0;
1642 iph->flow_lbl[1] = 0;
1643 iph->flow_lbl[2] = 0;
1644 iph->payload_len = 0;
1645 iph->nexthdr = IPPROTO_NONE;
1647 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1648 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1650 err = ip6mr_cache_unresolved(vif, skb2);
1651 read_unlock(&mrt_lock);
1656 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1657 cache->mfc_flags |= MFC_NOTIFY;
1659 err = ip6mr_fill_mroute(skb, cache, rtm);
1660 read_unlock(&mrt_lock);