2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/timer.h>
38 #include <linux/kernel.h>
39 #include <linux/fcntl.h>
40 #include <linux/stat.h>
41 #include <linux/socket.h>
43 #include <linux/inet.h>
44 #include <linux/netdevice.h>
45 #include <linux/inetdevice.h>
46 #include <linux/igmp.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/mroute.h>
50 #include <linux/init.h>
51 #include <linux/if_ether.h>
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55 #include <net/route.h>
60 #include <linux/notifier.h>
61 #include <linux/if_arp.h>
62 #include <linux/netfilter_ipv4.h>
64 #include <net/checksum.h>
65 #include <net/netlink.h>
67 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
68 #define CONFIG_IP_PIMSM 1
71 static struct sock *mroute_socket;
74 /* Big lock, protecting vif table, mrt cache and mroute socket state.
75 Note that the changes are semaphored via rtnl_lock.
78 static DEFINE_RWLOCK(mrt_lock);
81 * Multicast router control variables
84 static struct vif_device vif_table[MAXVIFS]; /* Devices */
87 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
89 static int mroute_do_assert; /* Set in PIM assert */
90 static int mroute_do_pim;
92 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
94 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
95 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
97 /* Special spinlock for queue of unresolved entries */
98 static DEFINE_SPINLOCK(mfc_unres_lock);
100 /* We return to original Alan's scheme. Hash table of resolved
101 entries is changed only in process context and protected
102 with weak lock mrt_lock. Queue of unresolved entries is protected
103 with strong spinlock mfc_unres_lock.
105 In this case data path is free of exclusive locks at all.
108 static struct kmem_cache *mrt_cachep __read_mostly;
110 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
111 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
112 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
114 #ifdef CONFIG_IP_PIMSM_V2
115 static struct net_protocol pim_protocol;
118 static struct timer_list ipmr_expire_timer;
120 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
123 struct net_device *ipmr_new_tunnel(struct vifctl *v)
125 struct net_device *dev;
127 dev = __dev_get_by_name("tunl0");
133 struct ip_tunnel_parm p;
134 struct in_device *in_dev;
136 memset(&p, 0, sizeof(p));
137 p.iph.daddr = v->vifc_rmt_addr.s_addr;
138 p.iph.saddr = v->vifc_lcl_addr.s_addr;
141 p.iph.protocol = IPPROTO_IPIP;
142 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
143 ifr.ifr_ifru.ifru_data = (void*)&p;
145 oldfs = get_fs(); set_fs(KERNEL_DS);
146 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
151 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
152 dev->flags |= IFF_MULTICAST;
154 in_dev = __in_dev_get_rtnl(dev);
155 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
157 in_dev->cnf.rp_filter = 0;
166 /* allow the register to be completed before unregistering. */
170 unregister_netdevice(dev);
174 #ifdef CONFIG_IP_PIMSM
176 static int reg_vif_num = -1;
178 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
180 read_lock(&mrt_lock);
181 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
182 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
183 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
184 read_unlock(&mrt_lock);
189 static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
191 return (struct net_device_stats*)netdev_priv(dev);
194 static void reg_vif_setup(struct net_device *dev)
196 dev->type = ARPHRD_PIMREG;
197 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
198 dev->flags = IFF_NOARP;
199 dev->hard_start_xmit = reg_vif_xmit;
200 dev->get_stats = reg_vif_get_stats;
201 dev->destructor = free_netdev;
204 static struct net_device *ipmr_reg_vif(void)
206 struct net_device *dev;
207 struct in_device *in_dev;
209 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
215 if (register_netdevice(dev)) {
221 if ((in_dev = inetdev_init(dev)) == NULL)
224 in_dev->cnf.rp_filter = 0;
232 /* allow the register to be completed before unregistering. */
236 unregister_netdevice(dev);
245 static int vif_delete(int vifi)
247 struct vif_device *v;
248 struct net_device *dev;
249 struct in_device *in_dev;
251 if (vifi < 0 || vifi >= maxvif)
252 return -EADDRNOTAVAIL;
254 v = &vif_table[vifi];
256 write_lock_bh(&mrt_lock);
261 write_unlock_bh(&mrt_lock);
262 return -EADDRNOTAVAIL;
265 #ifdef CONFIG_IP_PIMSM
266 if (vifi == reg_vif_num)
270 if (vifi+1 == maxvif) {
272 for (tmp=vifi-1; tmp>=0; tmp--) {
279 write_unlock_bh(&mrt_lock);
281 dev_set_allmulti(dev, -1);
283 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
284 in_dev->cnf.mc_forwarding--;
285 ip_rt_multicast_event(in_dev);
288 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
289 unregister_netdevice(dev);
295 /* Destroy an unresolved cache entry, killing queued skbs
296 and reporting error to netlink readers.
299 static void ipmr_destroy_unres(struct mfc_cache *c)
304 atomic_dec(&cache_resolve_queue_len);
306 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
307 if (ip_hdr(skb)->version == 0) {
308 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
309 nlh->nlmsg_type = NLMSG_ERROR;
310 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
311 skb_trim(skb, nlh->nlmsg_len);
313 e->error = -ETIMEDOUT;
314 memset(&e->msg, 0, sizeof(e->msg));
316 rtnl_unicast(skb, NETLINK_CB(skb).pid);
321 kmem_cache_free(mrt_cachep, c);
325 /* Single timer process for all the unresolved queue. */
327 static void ipmr_expire_process(unsigned long dummy)
330 unsigned long expires;
331 struct mfc_cache *c, **cp;
333 if (!spin_trylock(&mfc_unres_lock)) {
334 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
338 if (atomic_read(&cache_resolve_queue_len) == 0)
343 cp = &mfc_unres_queue;
345 while ((c=*cp) != NULL) {
346 if (time_after(c->mfc_un.unres.expires, now)) {
347 unsigned long interval = c->mfc_un.unres.expires - now;
348 if (interval < expires)
356 ipmr_destroy_unres(c);
359 if (atomic_read(&cache_resolve_queue_len))
360 mod_timer(&ipmr_expire_timer, jiffies + expires);
363 spin_unlock(&mfc_unres_lock);
366 /* Fill oifs list. It is called under write locked mrt_lock. */
368 static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
372 cache->mfc_un.res.minvif = MAXVIFS;
373 cache->mfc_un.res.maxvif = 0;
374 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
376 for (vifi=0; vifi<maxvif; vifi++) {
377 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
378 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
379 if (cache->mfc_un.res.minvif > vifi)
380 cache->mfc_un.res.minvif = vifi;
381 if (cache->mfc_un.res.maxvif <= vifi)
382 cache->mfc_un.res.maxvif = vifi + 1;
387 static int vif_add(struct vifctl *vifc, int mrtsock)
389 int vifi = vifc->vifc_vifi;
390 struct vif_device *v = &vif_table[vifi];
391 struct net_device *dev;
392 struct in_device *in_dev;
395 if (VIF_EXISTS(vifi))
398 switch (vifc->vifc_flags) {
399 #ifdef CONFIG_IP_PIMSM
402 * Special Purpose VIF in PIM
403 * All the packets will be sent to the daemon
405 if (reg_vif_num >= 0)
407 dev = ipmr_reg_vif();
413 dev = ipmr_new_tunnel(vifc);
418 dev = ip_dev_find(vifc->vifc_lcl_addr.s_addr);
420 return -EADDRNOTAVAIL;
427 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
428 return -EADDRNOTAVAIL;
429 in_dev->cnf.mc_forwarding++;
430 dev_set_allmulti(dev, +1);
431 ip_rt_multicast_event(in_dev);
434 * Fill in the VIF structures
436 v->rate_limit=vifc->vifc_rate_limit;
437 v->local=vifc->vifc_lcl_addr.s_addr;
438 v->remote=vifc->vifc_rmt_addr.s_addr;
439 v->flags=vifc->vifc_flags;
441 v->flags |= VIFF_STATIC;
442 v->threshold=vifc->vifc_threshold;
447 v->link = dev->ifindex;
448 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
449 v->link = dev->iflink;
451 /* And finish update writing critical data */
452 write_lock_bh(&mrt_lock);
455 #ifdef CONFIG_IP_PIMSM
456 if (v->flags&VIFF_REGISTER)
461 write_unlock_bh(&mrt_lock);
465 static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
467 int line=MFC_HASH(mcastgrp,origin);
470 for (c=mfc_cache_array[line]; c; c = c->next) {
471 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
478 * Allocate a multicast cache entry
480 static struct mfc_cache *ipmr_cache_alloc(void)
482 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
485 c->mfc_un.res.minvif = MAXVIFS;
489 static struct mfc_cache *ipmr_cache_alloc_unres(void)
491 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
494 skb_queue_head_init(&c->mfc_un.unres.unresolved);
495 c->mfc_un.unres.expires = jiffies + 10*HZ;
500 * A cache entry has gone into a resolved state from queued
503 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
509 * Play the pending entries through our router
512 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
513 if (ip_hdr(skb)->version == 0) {
514 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
516 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
517 nlh->nlmsg_len = (skb_tail_pointer(skb) -
520 nlh->nlmsg_type = NLMSG_ERROR;
521 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
522 skb_trim(skb, nlh->nlmsg_len);
524 e->error = -EMSGSIZE;
525 memset(&e->msg, 0, sizeof(e->msg));
528 rtnl_unicast(skb, NETLINK_CB(skb).pid);
530 ip_mr_forward(skb, c, 0);
535 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
536 * expects the following bizarre scheme.
538 * Called under mrt_lock.
541 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
544 const int ihl = ip_hdrlen(pkt);
545 struct igmphdr *igmp;
549 #ifdef CONFIG_IP_PIMSM
550 if (assert == IGMPMSG_WHOLEPKT)
551 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
554 skb = alloc_skb(128, GFP_ATOMIC);
559 #ifdef CONFIG_IP_PIMSM
560 if (assert == IGMPMSG_WHOLEPKT) {
561 /* Ugly, but we have no choice with this interface.
562 Duplicate old header, fix ihl, length etc.
563 And all this only to mangle msg->im_msgtype and
564 to set msg->im_mbz to "mbz" :-)
566 skb_push(skb, sizeof(struct iphdr));
567 skb_reset_network_header(skb);
568 skb_reset_transport_header(skb);
569 msg = (struct igmpmsg *)skb_network_header(skb);
570 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
571 msg->im_msgtype = IGMPMSG_WHOLEPKT;
573 msg->im_vif = reg_vif_num;
574 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
575 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
576 sizeof(struct iphdr));
585 skb->network_header = skb->tail;
587 skb_copy_to_linear_data(skb, pkt->data, ihl);
588 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
589 msg = (struct igmpmsg *)skb_network_header(skb);
591 skb->dst = dst_clone(pkt->dst);
597 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
599 msg->im_msgtype = assert;
601 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
602 skb->transport_header = skb->network_header;
605 if (mroute_socket == NULL) {
613 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
615 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
623 * Queue a packet for resolution. It gets locked cache entry!
627 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
631 const struct iphdr *iph = ip_hdr(skb);
633 spin_lock_bh(&mfc_unres_lock);
634 for (c=mfc_unres_queue; c; c=c->next) {
635 if (c->mfc_mcastgrp == iph->daddr &&
636 c->mfc_origin == iph->saddr)
642 * Create a new entry if allowable
645 if (atomic_read(&cache_resolve_queue_len)>=10 ||
646 (c=ipmr_cache_alloc_unres())==NULL) {
647 spin_unlock_bh(&mfc_unres_lock);
654 * Fill in the new cache entry
657 c->mfc_origin = iph->saddr;
658 c->mfc_mcastgrp = iph->daddr;
661 * Reflect first query at mrouted.
663 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
664 /* If the report failed throw the cache entry
667 spin_unlock_bh(&mfc_unres_lock);
669 kmem_cache_free(mrt_cachep, c);
674 atomic_inc(&cache_resolve_queue_len);
675 c->next = mfc_unres_queue;
678 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
682 * See if we can append the packet
684 if (c->mfc_un.unres.unresolved.qlen>3) {
688 skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
692 spin_unlock_bh(&mfc_unres_lock);
697 * MFC cache manipulation by user space mroute daemon
700 static int ipmr_mfc_delete(struct mfcctl *mfc)
703 struct mfc_cache *c, **cp;
705 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
707 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
708 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
709 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
710 write_lock_bh(&mrt_lock);
712 write_unlock_bh(&mrt_lock);
714 kmem_cache_free(mrt_cachep, c);
721 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
724 struct mfc_cache *uc, *c, **cp;
726 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
728 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
729 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
730 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
735 write_lock_bh(&mrt_lock);
736 c->mfc_parent = mfc->mfcc_parent;
737 ipmr_update_thresholds(c, mfc->mfcc_ttls);
739 c->mfc_flags |= MFC_STATIC;
740 write_unlock_bh(&mrt_lock);
744 if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
747 c=ipmr_cache_alloc();
751 c->mfc_origin=mfc->mfcc_origin.s_addr;
752 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
753 c->mfc_parent=mfc->mfcc_parent;
754 ipmr_update_thresholds(c, mfc->mfcc_ttls);
756 c->mfc_flags |= MFC_STATIC;
758 write_lock_bh(&mrt_lock);
759 c->next = mfc_cache_array[line];
760 mfc_cache_array[line] = c;
761 write_unlock_bh(&mrt_lock);
764 * Check to see if we resolved a queued list. If so we
765 * need to send on the frames and tidy up.
767 spin_lock_bh(&mfc_unres_lock);
768 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
770 if (uc->mfc_origin == c->mfc_origin &&
771 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
773 if (atomic_dec_and_test(&cache_resolve_queue_len))
774 del_timer(&ipmr_expire_timer);
778 spin_unlock_bh(&mfc_unres_lock);
781 ipmr_cache_resolve(uc, c);
782 kmem_cache_free(mrt_cachep, uc);
788 * Close the multicast socket, and clear the vif tables etc
791 static void mroute_clean_tables(struct sock *sk)
796 * Shut down all active vif entries
798 for (i=0; i<maxvif; i++) {
799 if (!(vif_table[i].flags&VIFF_STATIC))
806 for (i=0;i<MFC_LINES;i++) {
807 struct mfc_cache *c, **cp;
809 cp = &mfc_cache_array[i];
810 while ((c = *cp) != NULL) {
811 if (c->mfc_flags&MFC_STATIC) {
815 write_lock_bh(&mrt_lock);
817 write_unlock_bh(&mrt_lock);
819 kmem_cache_free(mrt_cachep, c);
823 if (atomic_read(&cache_resolve_queue_len) != 0) {
826 spin_lock_bh(&mfc_unres_lock);
827 while (mfc_unres_queue != NULL) {
829 mfc_unres_queue = c->next;
830 spin_unlock_bh(&mfc_unres_lock);
832 ipmr_destroy_unres(c);
834 spin_lock_bh(&mfc_unres_lock);
836 spin_unlock_bh(&mfc_unres_lock);
840 static void mrtsock_destruct(struct sock *sk)
843 if (sk == mroute_socket) {
844 ipv4_devconf.mc_forwarding--;
846 write_lock_bh(&mrt_lock);
848 write_unlock_bh(&mrt_lock);
850 mroute_clean_tables(sk);
856 * Socket options and virtual interface manipulation. The whole
857 * virtual interface system is a complete heap, but unfortunately
858 * that's how BSD mrouted happens to think. Maybe one day with a proper
859 * MOSPF/PIM router set up we can clean this up.
862 int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
868 if (optname != MRT_INIT) {
869 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
875 if (sk->sk_type != SOCK_RAW ||
876 inet_sk(sk)->num != IPPROTO_IGMP)
878 if (optlen!=sizeof(int))
887 ret = ip_ra_control(sk, 1, mrtsock_destruct);
889 write_lock_bh(&mrt_lock);
891 write_unlock_bh(&mrt_lock);
893 ipv4_devconf.mc_forwarding++;
898 if (sk!=mroute_socket)
900 return ip_ra_control(sk, 0, NULL);
903 if (optlen!=sizeof(vif))
905 if (copy_from_user(&vif,optval,sizeof(vif)))
907 if (vif.vifc_vifi >= MAXVIFS)
910 if (optname==MRT_ADD_VIF) {
911 ret = vif_add(&vif, sk==mroute_socket);
913 ret = vif_delete(vif.vifc_vifi);
919 * Manipulate the forwarding caches. These live
920 * in a sort of kernel/user symbiosis.
924 if (optlen!=sizeof(mfc))
926 if (copy_from_user(&mfc,optval, sizeof(mfc)))
929 if (optname==MRT_DEL_MFC)
930 ret = ipmr_mfc_delete(&mfc);
932 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
936 * Control PIM assert.
941 if (get_user(v,(int __user *)optval))
943 mroute_do_assert=(v)?1:0;
946 #ifdef CONFIG_IP_PIMSM
950 if (get_user(v,(int __user *)optval))
955 if (v != mroute_do_pim) {
957 mroute_do_assert = v;
958 #ifdef CONFIG_IP_PIMSM_V2
960 ret = inet_add_protocol(&pim_protocol,
963 ret = inet_del_protocol(&pim_protocol,
974 * Spurious command, or MRT_VERSION which you cannot
983 * Getsock opt support for the multicast routing system.
986 int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
991 if (optname!=MRT_VERSION &&
992 #ifdef CONFIG_IP_PIMSM
998 if (get_user(olr, optlen))
1001 olr = min_t(unsigned int, olr, sizeof(int));
1005 if (put_user(olr,optlen))
1007 if (optname==MRT_VERSION)
1009 #ifdef CONFIG_IP_PIMSM
1010 else if (optname==MRT_PIM)
1014 val=mroute_do_assert;
1015 if (copy_to_user(optval,&val,olr))
1021 * The IP multicast ioctl support routines.
1024 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1026 struct sioc_sg_req sr;
1027 struct sioc_vif_req vr;
1028 struct vif_device *vif;
1029 struct mfc_cache *c;
1033 if (copy_from_user(&vr,arg,sizeof(vr)))
1035 if (vr.vifi>=maxvif)
1037 read_lock(&mrt_lock);
1038 vif=&vif_table[vr.vifi];
1039 if (VIF_EXISTS(vr.vifi)) {
1040 vr.icount=vif->pkt_in;
1041 vr.ocount=vif->pkt_out;
1042 vr.ibytes=vif->bytes_in;
1043 vr.obytes=vif->bytes_out;
1044 read_unlock(&mrt_lock);
1046 if (copy_to_user(arg,&vr,sizeof(vr)))
1050 read_unlock(&mrt_lock);
1051 return -EADDRNOTAVAIL;
1053 if (copy_from_user(&sr,arg,sizeof(sr)))
1056 read_lock(&mrt_lock);
1057 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1059 sr.pktcnt = c->mfc_un.res.pkt;
1060 sr.bytecnt = c->mfc_un.res.bytes;
1061 sr.wrong_if = c->mfc_un.res.wrong_if;
1062 read_unlock(&mrt_lock);
1064 if (copy_to_user(arg,&sr,sizeof(sr)))
1068 read_unlock(&mrt_lock);
1069 return -EADDRNOTAVAIL;
1071 return -ENOIOCTLCMD;
1076 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1078 struct vif_device *v;
1080 if (event != NETDEV_UNREGISTER)
1083 for (ct=0;ct<maxvif;ct++,v++) {
1091 static struct notifier_block ip_mr_notifier={
1092 .notifier_call = ipmr_device_event,
1096 * Encapsulate a packet by attaching a valid IPIP header to it.
1097 * This avoids tunnel drivers and other mess and gives us the speed so
1098 * important for multicast video.
1101 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1104 struct iphdr *old_iph = ip_hdr(skb);
1106 skb_push(skb, sizeof(struct iphdr));
1107 skb->transport_header = skb->network_header;
1108 skb_reset_network_header(skb);
1112 iph->tos = old_iph->tos;
1113 iph->ttl = old_iph->ttl;
1117 iph->protocol = IPPROTO_IPIP;
1119 iph->tot_len = htons(skb->len);
1120 ip_select_ident(iph, skb->dst, NULL);
1123 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1127 static inline int ipmr_forward_finish(struct sk_buff *skb)
1129 struct ip_options * opt = &(IPCB(skb)->opt);
1131 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
1133 if (unlikely(opt->optlen))
1134 ip_forward_options(skb);
1136 return dst_output(skb);
1140 * Processing handlers for ipmr_forward
1143 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1145 const struct iphdr *iph = ip_hdr(skb);
1146 struct vif_device *vif = &vif_table[vifi];
1147 struct net_device *dev;
1151 if (vif->dev == NULL)
1154 #ifdef CONFIG_IP_PIMSM
1155 if (vif->flags & VIFF_REGISTER) {
1157 vif->bytes_out+=skb->len;
1158 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
1159 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
1160 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1166 if (vif->flags&VIFF_TUNNEL) {
1167 struct flowi fl = { .oif = vif->link,
1169 { .daddr = vif->remote,
1170 .saddr = vif->local,
1171 .tos = RT_TOS(iph->tos) } },
1172 .proto = IPPROTO_IPIP };
1173 if (ip_route_output_key(&rt, &fl))
1175 encap = sizeof(struct iphdr);
1177 struct flowi fl = { .oif = vif->link,
1179 { .daddr = iph->daddr,
1180 .tos = RT_TOS(iph->tos) } },
1181 .proto = IPPROTO_IPIP };
1182 if (ip_route_output_key(&rt, &fl))
1186 dev = rt->u.dst.dev;
1188 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1189 /* Do not fragment multicasts. Alas, IPv4 does not
1190 allow to send ICMP, so that packets will disappear
1194 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
1199 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1201 if (skb_cow(skb, encap)) {
1207 vif->bytes_out+=skb->len;
1209 dst_release(skb->dst);
1210 skb->dst = &rt->u.dst;
1211 ip_decrease_ttl(ip_hdr(skb));
1213 /* FIXME: forward and output firewalls used to be called here.
1214 * What do we do with netfilter? -- RR */
1215 if (vif->flags & VIFF_TUNNEL) {
1216 ip_encap(skb, vif->local, vif->remote);
1217 /* FIXME: extra output firewall step used to be here. --RR */
1218 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
1219 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
1222 IPCB(skb)->flags |= IPSKB_FORWARDED;
1225 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1226 * not only before forwarding, but after forwarding on all output
1227 * interfaces. It is clear, if mrouter runs a multicasting
1228 * program, it should receive packets not depending to what interface
1229 * program is joined.
1230 * If we will not make it, the program will have to join on all
1231 * interfaces. On the other hand, multihoming host (or router, but
1232 * not mrouter) cannot join to more than one interface - it will
1233 * result in receiving multiple packets.
1235 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
1236 ipmr_forward_finish);
1244 static int ipmr_find_vif(struct net_device *dev)
1247 for (ct=maxvif-1; ct>=0; ct--) {
1248 if (vif_table[ct].dev == dev)
1254 /* "local" means that we should preserve one skb (for local delivery) */
1256 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1261 vif = cache->mfc_parent;
1262 cache->mfc_un.res.pkt++;
1263 cache->mfc_un.res.bytes += skb->len;
1266 * Wrong interface: drop packet and (maybe) send PIM assert.
1268 if (vif_table[vif].dev != skb->dev) {
1271 if (((struct rtable*)skb->dst)->fl.iif == 0) {
1272 /* It is our own packet, looped back.
1273 Very complicated situation...
1275 The best workaround until routing daemons will be
1276 fixed is not to redistribute packet, if it was
1277 send through wrong interface. It means, that
1278 multicast applications WILL NOT work for
1279 (S,G), which have default multicast route pointing
1280 to wrong oif. In any case, it is not a good
1281 idea to use multicasting applications on router.
1286 cache->mfc_un.res.wrong_if++;
1287 true_vifi = ipmr_find_vif(skb->dev);
1289 if (true_vifi >= 0 && mroute_do_assert &&
1290 /* pimsm uses asserts, when switching from RPT to SPT,
1291 so that we cannot check that packet arrived on an oif.
1292 It is bad, but otherwise we would need to move pretty
1293 large chunk of pimd to kernel. Ough... --ANK
1295 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1297 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1298 cache->mfc_un.res.last_assert = jiffies;
1299 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1304 vif_table[vif].pkt_in++;
1305 vif_table[vif].bytes_in+=skb->len;
1310 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1311 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1313 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1315 ipmr_queue_xmit(skb2, cache, psend);
1322 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1324 ipmr_queue_xmit(skb2, cache, psend);
1326 ipmr_queue_xmit(skb, cache, psend);
1339 * Multicast packets for forwarding arrive here
1342 int ip_mr_input(struct sk_buff *skb)
1344 struct mfc_cache *cache;
1345 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1347 /* Packet is looped back after forward, it should not be
1348 forwarded second time, but still can be delivered locally.
1350 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1354 if (IPCB(skb)->opt.router_alert) {
1355 if (ip_call_ra_chain(skb))
1357 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1358 /* IGMPv1 (and broken IGMPv2 implementations sort of
1359 Cisco IOS <= 11.2(8)) do not put router alert
1360 option to IGMP packets destined to routable
1361 groups. It is very bad, because it means
1362 that we can forward NO IGMP messages.
1364 read_lock(&mrt_lock);
1365 if (mroute_socket) {
1367 raw_rcv(mroute_socket, skb);
1368 read_unlock(&mrt_lock);
1371 read_unlock(&mrt_lock);
1375 read_lock(&mrt_lock);
1376 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1379 * No usable cache entry
1385 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1386 ip_local_deliver(skb);
1388 read_unlock(&mrt_lock);
1394 vif = ipmr_find_vif(skb->dev);
1396 int err = ipmr_cache_unresolved(vif, skb);
1397 read_unlock(&mrt_lock);
1401 read_unlock(&mrt_lock);
1406 ip_mr_forward(skb, cache, local);
1408 read_unlock(&mrt_lock);
1411 return ip_local_deliver(skb);
1417 return ip_local_deliver(skb);
1422 #ifdef CONFIG_IP_PIMSM_V1
1424 * Handle IGMP messages of PIMv1
1427 int pim_rcv_v1(struct sk_buff * skb)
1429 struct igmphdr *pim;
1430 struct iphdr *encap;
1431 struct net_device *reg_dev = NULL;
1433 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1436 pim = igmp_hdr(skb);
1438 if (!mroute_do_pim ||
1439 skb->len < sizeof(*pim) + sizeof(*encap) ||
1440 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1443 encap = (struct iphdr *)(skb_transport_header(skb) +
1444 sizeof(struct igmphdr));
1447 a. packet is really destinted to a multicast group
1448 b. packet is not a NULL-REGISTER
1449 c. packet is not truncated
1451 if (!MULTICAST(encap->daddr) ||
1452 encap->tot_len == 0 ||
1453 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1456 read_lock(&mrt_lock);
1457 if (reg_vif_num >= 0)
1458 reg_dev = vif_table[reg_vif_num].dev;
1461 read_unlock(&mrt_lock);
1463 if (reg_dev == NULL)
1466 skb->mac_header = skb->network_header;
1467 skb_pull(skb, (u8*)encap - skb->data);
1468 skb_reset_network_header(skb);
1470 skb->protocol = htons(ETH_P_IP);
1472 skb->pkt_type = PACKET_HOST;
1473 dst_release(skb->dst);
1475 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1476 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1487 #ifdef CONFIG_IP_PIMSM_V2
1488 static int pim_rcv(struct sk_buff * skb)
1490 struct pimreghdr *pim;
1491 struct iphdr *encap;
1492 struct net_device *reg_dev = NULL;
1494 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1497 pim = (struct pimreghdr *)skb_transport_header(skb);
1498 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1499 (pim->flags&PIM_NULL_REGISTER) ||
1500 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1501 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1504 /* check if the inner packet is destined to mcast group */
1505 encap = (struct iphdr *)(skb_transport_header(skb) +
1506 sizeof(struct pimreghdr));
1507 if (!MULTICAST(encap->daddr) ||
1508 encap->tot_len == 0 ||
1509 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1512 read_lock(&mrt_lock);
1513 if (reg_vif_num >= 0)
1514 reg_dev = vif_table[reg_vif_num].dev;
1517 read_unlock(&mrt_lock);
1519 if (reg_dev == NULL)
1522 skb->mac_header = skb->network_header;
1523 skb_pull(skb, (u8*)encap - skb->data);
1524 skb_reset_network_header(skb);
1526 skb->protocol = htons(ETH_P_IP);
1528 skb->pkt_type = PACKET_HOST;
1529 dst_release(skb->dst);
1530 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1531 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1544 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1547 struct rtnexthop *nhp;
1548 struct net_device *dev = vif_table[c->mfc_parent].dev;
1549 u8 *b = skb_tail_pointer(skb);
1550 struct rtattr *mp_head;
1553 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1555 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1557 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1558 if (c->mfc_un.res.ttls[ct] < 255) {
1559 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1560 goto rtattr_failure;
1561 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1562 nhp->rtnh_flags = 0;
1563 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1564 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1565 nhp->rtnh_len = sizeof(*nhp);
1568 mp_head->rta_type = RTA_MULTIPATH;
1569 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1570 rtm->rtm_type = RTN_MULTICAST;
1578 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1581 struct mfc_cache *cache;
1582 struct rtable *rt = (struct rtable*)skb->dst;
1584 read_lock(&mrt_lock);
1585 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1588 struct sk_buff *skb2;
1590 struct net_device *dev;
1594 read_unlock(&mrt_lock);
1599 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1600 read_unlock(&mrt_lock);
1603 skb2 = skb_clone(skb, GFP_ATOMIC);
1605 read_unlock(&mrt_lock);
1609 skb_push(skb2, sizeof(struct iphdr));
1610 skb_reset_network_header(skb2);
1612 iph->ihl = sizeof(struct iphdr) >> 2;
1613 iph->saddr = rt->rt_src;
1614 iph->daddr = rt->rt_dst;
1616 err = ipmr_cache_unresolved(vif, skb2);
1617 read_unlock(&mrt_lock);
1621 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1622 cache->mfc_flags |= MFC_NOTIFY;
1623 err = ipmr_fill_mroute(skb, cache, rtm);
1624 read_unlock(&mrt_lock);
1628 #ifdef CONFIG_PROC_FS
1630 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1632 struct ipmr_vif_iter {
1636 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1639 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
1640 if (!VIF_EXISTS(iter->ct))
1643 return &vif_table[iter->ct];
1648 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1650 read_lock(&mrt_lock);
1651 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1655 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1657 struct ipmr_vif_iter *iter = seq->private;
1660 if (v == SEQ_START_TOKEN)
1661 return ipmr_vif_seq_idx(iter, 0);
1663 while (++iter->ct < maxvif) {
1664 if (!VIF_EXISTS(iter->ct))
1666 return &vif_table[iter->ct];
1671 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1673 read_unlock(&mrt_lock);
1676 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1678 if (v == SEQ_START_TOKEN) {
1680 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1682 const struct vif_device *vif = v;
1683 const char *name = vif->dev ? vif->dev->name : "none";
1686 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1688 name, vif->bytes_in, vif->pkt_in,
1689 vif->bytes_out, vif->pkt_out,
1690 vif->flags, vif->local, vif->remote);
1695 static const struct seq_operations ipmr_vif_seq_ops = {
1696 .start = ipmr_vif_seq_start,
1697 .next = ipmr_vif_seq_next,
1698 .stop = ipmr_vif_seq_stop,
1699 .show = ipmr_vif_seq_show,
1702 static int ipmr_vif_open(struct inode *inode, struct file *file)
1704 struct seq_file *seq;
1706 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1711 rc = seq_open(file, &ipmr_vif_seq_ops);
1716 seq = file->private_data;
1726 static const struct file_operations ipmr_vif_fops = {
1727 .owner = THIS_MODULE,
1728 .open = ipmr_vif_open,
1730 .llseek = seq_lseek,
1731 .release = seq_release_private,
1734 struct ipmr_mfc_iter {
1735 struct mfc_cache **cache;
1740 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1742 struct mfc_cache *mfc;
1744 it->cache = mfc_cache_array;
1745 read_lock(&mrt_lock);
1746 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1747 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1750 read_unlock(&mrt_lock);
1752 it->cache = &mfc_unres_queue;
1753 spin_lock_bh(&mfc_unres_lock);
1754 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1757 spin_unlock_bh(&mfc_unres_lock);
1764 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1766 struct ipmr_mfc_iter *it = seq->private;
1769 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1773 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1775 struct mfc_cache *mfc = v;
1776 struct ipmr_mfc_iter *it = seq->private;
1780 if (v == SEQ_START_TOKEN)
1781 return ipmr_mfc_seq_idx(seq->private, 0);
1786 if (it->cache == &mfc_unres_queue)
1789 BUG_ON(it->cache != mfc_cache_array);
1791 while (++it->ct < MFC_LINES) {
1792 mfc = mfc_cache_array[it->ct];
1797 /* exhausted cache_array, show unresolved */
1798 read_unlock(&mrt_lock);
1799 it->cache = &mfc_unres_queue;
1802 spin_lock_bh(&mfc_unres_lock);
1803 mfc = mfc_unres_queue;
1808 spin_unlock_bh(&mfc_unres_lock);
1814 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1816 struct ipmr_mfc_iter *it = seq->private;
1818 if (it->cache == &mfc_unres_queue)
1819 spin_unlock_bh(&mfc_unres_lock);
1820 else if (it->cache == mfc_cache_array)
1821 read_unlock(&mrt_lock);
1824 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1828 if (v == SEQ_START_TOKEN) {
1830 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1832 const struct mfc_cache *mfc = v;
1833 const struct ipmr_mfc_iter *it = seq->private;
1835 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1836 (unsigned long) mfc->mfc_mcastgrp,
1837 (unsigned long) mfc->mfc_origin,
1839 mfc->mfc_un.res.pkt,
1840 mfc->mfc_un.res.bytes,
1841 mfc->mfc_un.res.wrong_if);
1843 if (it->cache != &mfc_unres_queue) {
1844 for (n = mfc->mfc_un.res.minvif;
1845 n < mfc->mfc_un.res.maxvif; n++ ) {
1847 && mfc->mfc_un.res.ttls[n] < 255)
1850 n, mfc->mfc_un.res.ttls[n]);
1853 seq_putc(seq, '\n');
1858 static const struct seq_operations ipmr_mfc_seq_ops = {
1859 .start = ipmr_mfc_seq_start,
1860 .next = ipmr_mfc_seq_next,
1861 .stop = ipmr_mfc_seq_stop,
1862 .show = ipmr_mfc_seq_show,
1865 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1867 struct seq_file *seq;
1869 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
1874 rc = seq_open(file, &ipmr_mfc_seq_ops);
1878 seq = file->private_data;
1888 static const struct file_operations ipmr_mfc_fops = {
1889 .owner = THIS_MODULE,
1890 .open = ipmr_mfc_open,
1892 .llseek = seq_lseek,
1893 .release = seq_release_private,
1897 #ifdef CONFIG_IP_PIMSM_V2
1898 static struct net_protocol pim_protocol = {
1905 * Setup for IP multicast routing
1908 void __init ip_mr_init(void)
1910 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1911 sizeof(struct mfc_cache),
1912 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1914 init_timer(&ipmr_expire_timer);
1915 ipmr_expire_timer.function=ipmr_expire_process;
1916 register_netdevice_notifier(&ip_mr_notifier);
1917 #ifdef CONFIG_PROC_FS
1918 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1919 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);