2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
26 * Fixed routing subtrees.
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/times.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
36 #include <linux/route.h>
37 #include <linux/netdevice.h>
38 #include <linux/in6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ndisc.h>
52 #include <net/addrconf.h>
54 #include <linux/rtnetlink.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
60 #include <asm/uaccess.h>
63 #include <linux/sysctl.h>
66 /* Set to 3 to get tracing. */
70 #define RDBG(x) printk x
71 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
74 #define RT6_TRACE(x...) do { ; } while (0)
77 #define CLONE_OFFLINK_ROUTE 0
79 static int ip6_rt_max_size = 4096;
80 static int ip6_rt_gc_min_interval = HZ / 2;
81 static int ip6_rt_gc_timeout = 60*HZ;
82 int ip6_rt_gc_interval = 30*HZ;
83 static int ip6_rt_gc_elasticity = 9;
84 static int ip6_rt_mtu_expires = 10*60*HZ;
85 static int ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
87 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
88 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
89 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
90 static void ip6_dst_destroy(struct dst_entry *);
91 static void ip6_dst_ifdown(struct dst_entry *,
92 struct net_device *dev, int how);
93 static int ip6_dst_gc(void);
95 static int ip6_pkt_discard(struct sk_buff *skb);
96 static int ip6_pkt_discard_out(struct sk_buff *skb);
97 static void ip6_link_failure(struct sk_buff *skb);
98 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
100 #ifdef CONFIG_IPV6_ROUTE_INFO
101 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
102 struct in6_addr *gwaddr, int ifindex,
104 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
105 struct in6_addr *gwaddr, int ifindex);
108 static struct dst_ops ip6_dst_ops = {
110 .protocol = __constant_htons(ETH_P_IPV6),
113 .check = ip6_dst_check,
114 .destroy = ip6_dst_destroy,
115 .ifdown = ip6_dst_ifdown,
116 .negative_advice = ip6_negative_advice,
117 .link_failure = ip6_link_failure,
118 .update_pmtu = ip6_rt_update_pmtu,
119 .entry_size = sizeof(struct rt6_info),
122 struct rt6_info ip6_null_entry = {
125 .__refcnt = ATOMIC_INIT(1),
127 .dev = &loopback_dev,
129 .error = -ENETUNREACH,
130 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
131 .input = ip6_pkt_discard,
132 .output = ip6_pkt_discard_out,
134 .path = (struct dst_entry*)&ip6_null_entry,
137 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
138 .rt6i_metric = ~(u32) 0,
139 .rt6i_ref = ATOMIC_INIT(1),
142 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
144 static int ip6_pkt_prohibit(struct sk_buff *skb);
145 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
146 static int ip6_pkt_blk_hole(struct sk_buff *skb);
148 struct rt6_info ip6_prohibit_entry = {
151 .__refcnt = ATOMIC_INIT(1),
153 .dev = &loopback_dev,
156 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
157 .input = ip6_pkt_prohibit,
158 .output = ip6_pkt_prohibit_out,
160 .path = (struct dst_entry*)&ip6_prohibit_entry,
163 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
164 .rt6i_metric = ~(u32) 0,
165 .rt6i_ref = ATOMIC_INIT(1),
168 struct rt6_info ip6_blk_hole_entry = {
171 .__refcnt = ATOMIC_INIT(1),
173 .dev = &loopback_dev,
176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
177 .input = ip6_pkt_blk_hole,
178 .output = ip6_pkt_blk_hole,
180 .path = (struct dst_entry*)&ip6_blk_hole_entry,
183 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
184 .rt6i_metric = ~(u32) 0,
185 .rt6i_ref = ATOMIC_INIT(1),
190 /* allocate dst with ip6_dst_ops */
191 static __inline__ struct rt6_info *ip6_dst_alloc(void)
193 return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
196 static void ip6_dst_destroy(struct dst_entry *dst)
198 struct rt6_info *rt = (struct rt6_info *)dst;
199 struct inet6_dev *idev = rt->rt6i_idev;
202 rt->rt6i_idev = NULL;
207 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
210 struct rt6_info *rt = (struct rt6_info *)dst;
211 struct inet6_dev *idev = rt->rt6i_idev;
213 if (dev != &loopback_dev && idev != NULL && idev->dev == dev) {
214 struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev);
215 if (loopback_idev != NULL) {
216 rt->rt6i_idev = loopback_idev;
222 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
224 return (rt->rt6i_flags & RTF_EXPIRES &&
225 time_after(jiffies, rt->rt6i_expires));
228 static inline int rt6_need_strict(struct in6_addr *daddr)
230 return (ipv6_addr_type(daddr) &
231 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
235 * Route lookup. Any table->tb6_lock is implied.
238 static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
242 struct rt6_info *local = NULL;
243 struct rt6_info *sprt;
246 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
247 struct net_device *dev = sprt->rt6i_dev;
248 if (dev->ifindex == oif)
250 if (dev->flags & IFF_LOOPBACK) {
251 if (sprt->rt6i_idev == NULL ||
252 sprt->rt6i_idev->dev->ifindex != oif) {
255 if (local && (!oif ||
256 local->rt6i_idev->dev->ifindex == oif))
267 return &ip6_null_entry;
272 #ifdef CONFIG_IPV6_ROUTER_PREF
273 static void rt6_probe(struct rt6_info *rt)
275 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
277 * Okay, this does not seem to be appropriate
278 * for now, however, we need to check if it
279 * is really so; aka Router Reachability Probing.
281 * Router Reachability Probe MUST be rate-limited
282 * to no more than one per minute.
284 if (!neigh || (neigh->nud_state & NUD_VALID))
286 read_lock_bh(&neigh->lock);
287 if (!(neigh->nud_state & NUD_VALID) &&
288 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
289 struct in6_addr mcaddr;
290 struct in6_addr *target;
292 neigh->updated = jiffies;
293 read_unlock_bh(&neigh->lock);
295 target = (struct in6_addr *)&neigh->primary_key;
296 addrconf_addr_solict_mult(target, &mcaddr);
297 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
299 read_unlock_bh(&neigh->lock);
302 static inline void rt6_probe(struct rt6_info *rt)
309 * Default Router Selection (RFC 2461 6.3.6)
311 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
313 struct net_device *dev = rt->rt6i_dev;
318 if (dev->flags & IFF_LOOPBACK) {
319 if (!WARN_ON(rt->rt6i_idev == NULL) &&
320 rt->rt6i_idev->dev->ifindex == oif)
325 if (dev->ifindex == oif)
331 static inline int rt6_check_neigh(struct rt6_info *rt)
333 struct neighbour *neigh = rt->rt6i_nexthop;
335 if (rt->rt6i_flags & RTF_NONEXTHOP ||
336 !(rt->rt6i_flags & RTF_GATEWAY))
339 read_lock_bh(&neigh->lock);
340 if (neigh->nud_state & NUD_VALID)
342 else if (!(neigh->nud_state & NUD_FAILED))
344 read_unlock_bh(&neigh->lock);
349 static int rt6_score_route(struct rt6_info *rt, int oif,
354 m = rt6_check_dev(rt, oif);
355 if (!m && (strict & RT6_LOOKUP_F_IFACE))
357 #ifdef CONFIG_IPV6_ROUTER_PREF
358 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
360 n = rt6_check_neigh(rt);
361 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
366 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
367 int *mpri, struct rt6_info *match)
371 if (rt6_check_expired(rt))
374 m = rt6_score_route(rt, oif, strict);
379 if (strict & RT6_LOOKUP_F_REACHABLE)
383 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
391 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
392 struct rt6_info *rr_head,
393 u32 metric, int oif, int strict)
395 struct rt6_info *rt, *match;
399 for (rt = rr_head; rt && rt->rt6i_metric == metric;
400 rt = rt->u.dst.rt6_next)
401 match = find_match(rt, oif, strict, &mpri, match);
402 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
403 rt = rt->u.dst.rt6_next)
404 match = find_match(rt, oif, strict, &mpri, match);
409 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
411 struct rt6_info *match, *rt0;
413 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
414 __FUNCTION__, fn->leaf, oif);
418 fn->rr_ptr = rt0 = fn->leaf;
420 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
423 (strict & RT6_LOOKUP_F_REACHABLE)) {
424 struct rt6_info *next = rt0->u.dst.rt6_next;
426 /* no entries matched; do round-robin */
427 if (!next || next->rt6i_metric != rt0->rt6i_metric)
434 RT6_TRACE("%s() => %p\n",
435 __FUNCTION__, match);
437 return (match ? match : &ip6_null_entry);
440 #ifdef CONFIG_IPV6_ROUTE_INFO
441 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
442 struct in6_addr *gwaddr)
444 struct route_info *rinfo = (struct route_info *) opt;
445 struct in6_addr prefix_buf, *prefix;
450 if (len < sizeof(struct route_info)) {
454 /* Sanity check for prefix_len and length */
455 if (rinfo->length > 3) {
457 } else if (rinfo->prefix_len > 128) {
459 } else if (rinfo->prefix_len > 64) {
460 if (rinfo->length < 2) {
463 } else if (rinfo->prefix_len > 0) {
464 if (rinfo->length < 1) {
469 pref = rinfo->route_pref;
470 if (pref == ICMPV6_ROUTER_PREF_INVALID)
471 pref = ICMPV6_ROUTER_PREF_MEDIUM;
473 lifetime = ntohl(rinfo->lifetime);
474 if (lifetime == 0xffffffff) {
476 } else if (lifetime > 0x7fffffff/HZ) {
477 /* Avoid arithmetic overflow */
478 lifetime = 0x7fffffff/HZ - 1;
481 if (rinfo->length == 3)
482 prefix = (struct in6_addr *)rinfo->prefix;
484 /* this function is safe */
485 ipv6_addr_prefix(&prefix_buf,
486 (struct in6_addr *)rinfo->prefix,
488 prefix = &prefix_buf;
491 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex);
493 if (rt && !lifetime) {
499 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
502 rt->rt6i_flags = RTF_ROUTEINFO |
503 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
506 if (lifetime == 0xffffffff) {
507 rt->rt6i_flags &= ~RTF_EXPIRES;
509 rt->rt6i_expires = jiffies + HZ * lifetime;
510 rt->rt6i_flags |= RTF_EXPIRES;
512 dst_release(&rt->u.dst);
518 #define BACKTRACK(saddr) \
520 if (rt == &ip6_null_entry) { \
521 struct fib6_node *pn; \
523 if (fn->fn_flags & RTN_TL_ROOT) \
526 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
527 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
530 if (fn->fn_flags & RTN_RTINFO) \
536 static struct rt6_info *ip6_pol_route_lookup(struct fib6_table *table,
537 struct flowi *fl, int flags)
539 struct fib6_node *fn;
542 read_lock_bh(&table->tb6_lock);
543 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
546 rt = rt6_device_match(rt, fl->oif, flags);
547 BACKTRACK(&fl->fl6_src);
549 dst_hold(&rt->u.dst);
550 read_unlock_bh(&table->tb6_lock);
552 rt->u.dst.lastuse = jiffies;
559 struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
570 struct dst_entry *dst;
571 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
574 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
575 flags |= RT6_LOOKUP_F_HAS_SADDR;
578 dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
580 return (struct rt6_info *) dst;
587 /* ip6_ins_rt is called with FREE table->tb6_lock.
588 It takes new route entry, the addition fails by any reason the
589 route is freed. In any case, if caller does not hold it, it may
593 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
596 struct fib6_table *table;
598 table = rt->rt6i_table;
599 write_lock_bh(&table->tb6_lock);
600 err = fib6_add(&table->tb6_root, rt, info);
601 write_unlock_bh(&table->tb6_lock);
606 int ip6_ins_rt(struct rt6_info *rt)
608 return __ip6_ins_rt(rt, NULL);
611 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
612 struct in6_addr *saddr)
620 rt = ip6_rt_copy(ort);
623 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
624 if (rt->rt6i_dst.plen != 128 &&
625 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
626 rt->rt6i_flags |= RTF_ANYCAST;
627 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
630 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
631 rt->rt6i_dst.plen = 128;
632 rt->rt6i_flags |= RTF_CACHE;
633 rt->u.dst.flags |= DST_HOST;
635 #ifdef CONFIG_IPV6_SUBTREES
636 if (rt->rt6i_src.plen && saddr) {
637 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
638 rt->rt6i_src.plen = 128;
642 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
649 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
651 struct rt6_info *rt = ip6_rt_copy(ort);
653 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
654 rt->rt6i_dst.plen = 128;
655 rt->rt6i_flags |= RTF_CACHE;
656 rt->u.dst.flags |= DST_HOST;
657 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
662 static struct rt6_info *ip6_pol_route_input(struct fib6_table *table,
663 struct flowi *fl, int flags)
665 struct fib6_node *fn;
666 struct rt6_info *rt, *nrt;
670 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
672 strict |= flags & RT6_LOOKUP_F_IFACE;
675 read_lock_bh(&table->tb6_lock);
678 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
681 rt = rt6_select(fn, fl->iif, strict | reachable);
682 BACKTRACK(&fl->fl6_src);
683 if (rt == &ip6_null_entry ||
684 rt->rt6i_flags & RTF_CACHE)
687 dst_hold(&rt->u.dst);
688 read_unlock_bh(&table->tb6_lock);
690 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
691 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
693 #if CLONE_OFFLINK_ROUTE
694 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
700 dst_release(&rt->u.dst);
701 rt = nrt ? : &ip6_null_entry;
703 dst_hold(&rt->u.dst);
705 err = ip6_ins_rt(nrt);
714 * Race condition! In the gap, when table->tb6_lock was
715 * released someone could insert this route. Relookup.
717 dst_release(&rt->u.dst);
725 dst_hold(&rt->u.dst);
726 read_unlock_bh(&table->tb6_lock);
728 rt->u.dst.lastuse = jiffies;
734 void ip6_route_input(struct sk_buff *skb)
736 struct ipv6hdr *iph = skb->nh.ipv6h;
737 int flags = RT6_LOOKUP_F_HAS_SADDR;
739 .iif = skb->dev->ifindex,
744 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
748 .proto = iph->nexthdr,
751 if (rt6_need_strict(&iph->daddr))
752 flags |= RT6_LOOKUP_F_IFACE;
754 skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
757 static struct rt6_info *ip6_pol_route_output(struct fib6_table *table,
758 struct flowi *fl, int flags)
760 struct fib6_node *fn;
761 struct rt6_info *rt, *nrt;
765 int reachable = ipv6_devconf.forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
767 strict |= flags & RT6_LOOKUP_F_IFACE;
770 read_lock_bh(&table->tb6_lock);
773 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
776 rt = rt6_select(fn, fl->oif, strict | reachable);
777 BACKTRACK(&fl->fl6_src);
778 if (rt == &ip6_null_entry ||
779 rt->rt6i_flags & RTF_CACHE)
782 dst_hold(&rt->u.dst);
783 read_unlock_bh(&table->tb6_lock);
785 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
786 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
788 #if CLONE_OFFLINK_ROUTE
789 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
795 dst_release(&rt->u.dst);
796 rt = nrt ? : &ip6_null_entry;
798 dst_hold(&rt->u.dst);
800 err = ip6_ins_rt(nrt);
809 * Race condition! In the gap, when table->tb6_lock was
810 * released someone could insert this route. Relookup.
812 dst_release(&rt->u.dst);
820 dst_hold(&rt->u.dst);
821 read_unlock_bh(&table->tb6_lock);
823 rt->u.dst.lastuse = jiffies;
828 struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
832 if (rt6_need_strict(&fl->fl6_dst))
833 flags |= RT6_LOOKUP_F_IFACE;
835 if (!ipv6_addr_any(&fl->fl6_src))
836 flags |= RT6_LOOKUP_F_HAS_SADDR;
838 return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
843 * Destination cache support functions
846 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
850 rt = (struct rt6_info *) dst;
852 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
858 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
860 struct rt6_info *rt = (struct rt6_info *) dst;
863 if (rt->rt6i_flags & RTF_CACHE)
871 static void ip6_link_failure(struct sk_buff *skb)
875 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
877 rt = (struct rt6_info *) skb->dst;
879 if (rt->rt6i_flags&RTF_CACHE) {
880 dst_set_expires(&rt->u.dst, 0);
881 rt->rt6i_flags |= RTF_EXPIRES;
882 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
883 rt->rt6i_node->fn_sernum = -1;
887 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
889 struct rt6_info *rt6 = (struct rt6_info*)dst;
891 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
892 rt6->rt6i_flags |= RTF_MODIFIED;
893 if (mtu < IPV6_MIN_MTU) {
895 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
897 dst->metrics[RTAX_MTU-1] = mtu;
898 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
902 static int ipv6_get_mtu(struct net_device *dev);
904 static inline unsigned int ipv6_advmss(unsigned int mtu)
906 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
908 if (mtu < ip6_rt_min_advmss)
909 mtu = ip6_rt_min_advmss;
912 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
913 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
914 * IPV6_MAXPLEN is also valid and means: "any MSS,
915 * rely only on pmtu discovery"
917 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
922 static struct dst_entry *ndisc_dst_gc_list;
923 static DEFINE_SPINLOCK(ndisc_lock);
925 struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
926 struct neighbour *neigh,
927 struct in6_addr *addr,
928 int (*output)(struct sk_buff *))
931 struct inet6_dev *idev = in6_dev_get(dev);
933 if (unlikely(idev == NULL))
936 rt = ip6_dst_alloc();
937 if (unlikely(rt == NULL)) {
946 neigh = ndisc_get_neigh(dev, addr);
949 rt->rt6i_idev = idev;
950 rt->rt6i_nexthop = neigh;
951 atomic_set(&rt->u.dst.__refcnt, 1);
952 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
953 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
954 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
955 rt->u.dst.output = output;
957 #if 0 /* there's no chance to use these for ndisc */
958 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
961 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
962 rt->rt6i_dst.plen = 128;
965 spin_lock_bh(&ndisc_lock);
966 rt->u.dst.next = ndisc_dst_gc_list;
967 ndisc_dst_gc_list = &rt->u.dst;
968 spin_unlock_bh(&ndisc_lock);
970 fib6_force_start_gc();
976 int ndisc_dst_gc(int *more)
978 struct dst_entry *dst, *next, **pprev;
984 spin_lock_bh(&ndisc_lock);
985 pprev = &ndisc_dst_gc_list;
987 while ((dst = *pprev) != NULL) {
988 if (!atomic_read(&dst->__refcnt)) {
998 spin_unlock_bh(&ndisc_lock);
1003 static int ip6_dst_gc(void)
1005 static unsigned expire = 30*HZ;
1006 static unsigned long last_gc;
1007 unsigned long now = jiffies;
1009 if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
1010 atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
1014 fib6_run_gc(expire);
1016 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh)
1017 expire = ip6_rt_gc_timeout>>1;
1020 expire -= expire>>ip6_rt_gc_elasticity;
1021 return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
1024 /* Clean host part of a prefix. Not necessary in radix tree,
1025 but results in cleaner routing tables.
1027 Remove it only when all the things will work!
1030 static int ipv6_get_mtu(struct net_device *dev)
1032 int mtu = IPV6_MIN_MTU;
1033 struct inet6_dev *idev;
1035 idev = in6_dev_get(dev);
1037 mtu = idev->cnf.mtu6;
1043 int ipv6_get_hoplimit(struct net_device *dev)
1045 int hoplimit = ipv6_devconf.hop_limit;
1046 struct inet6_dev *idev;
1048 idev = in6_dev_get(dev);
1050 hoplimit = idev->cnf.hop_limit;
1060 int ip6_route_add(struct fib6_config *cfg)
1063 struct rt6_info *rt = NULL;
1064 struct net_device *dev = NULL;
1065 struct inet6_dev *idev = NULL;
1066 struct fib6_table *table;
1069 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1071 #ifndef CONFIG_IPV6_SUBTREES
1072 if (cfg->fc_src_len)
1075 if (cfg->fc_ifindex) {
1077 dev = dev_get_by_index(cfg->fc_ifindex);
1080 idev = in6_dev_get(dev);
1085 if (cfg->fc_metric == 0)
1086 cfg->fc_metric = IP6_RT_PRIO_USER;
1088 table = fib6_new_table(cfg->fc_table);
1089 if (table == NULL) {
1094 rt = ip6_dst_alloc();
1101 rt->u.dst.obsolete = -1;
1102 rt->rt6i_expires = jiffies + clock_t_to_jiffies(cfg->fc_expires);
1104 if (cfg->fc_protocol == RTPROT_UNSPEC)
1105 cfg->fc_protocol = RTPROT_BOOT;
1106 rt->rt6i_protocol = cfg->fc_protocol;
1108 addr_type = ipv6_addr_type(&cfg->fc_dst);
1110 if (addr_type & IPV6_ADDR_MULTICAST)
1111 rt->u.dst.input = ip6_mc_input;
1113 rt->u.dst.input = ip6_forward;
1115 rt->u.dst.output = ip6_output;
1117 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1118 rt->rt6i_dst.plen = cfg->fc_dst_len;
1119 if (rt->rt6i_dst.plen == 128)
1120 rt->u.dst.flags = DST_HOST;
1122 #ifdef CONFIG_IPV6_SUBTREES
1123 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1124 rt->rt6i_src.plen = cfg->fc_src_len;
1127 rt->rt6i_metric = cfg->fc_metric;
1129 /* We cannot add true routes via loopback here,
1130 they would result in kernel looping; promote them to reject routes
1132 if ((cfg->fc_flags & RTF_REJECT) ||
1133 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1134 /* hold loopback dev/idev if we haven't done so. */
1135 if (dev != &loopback_dev) {
1140 dev = &loopback_dev;
1142 idev = in6_dev_get(dev);
1148 rt->u.dst.output = ip6_pkt_discard_out;
1149 rt->u.dst.input = ip6_pkt_discard;
1150 rt->u.dst.error = -ENETUNREACH;
1151 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1155 if (cfg->fc_flags & RTF_GATEWAY) {
1156 struct in6_addr *gw_addr;
1159 gw_addr = &cfg->fc_gateway;
1160 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1161 gwa_type = ipv6_addr_type(gw_addr);
1163 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1164 struct rt6_info *grt;
1166 /* IPv6 strictly inhibits using not link-local
1167 addresses as nexthop address.
1168 Otherwise, router will not able to send redirects.
1169 It is very good, but in some (rare!) circumstances
1170 (SIT, PtP, NBMA NOARP links) it is handy to allow
1171 some exceptions. --ANK
1174 if (!(gwa_type&IPV6_ADDR_UNICAST))
1177 grt = rt6_lookup(gw_addr, NULL, cfg->fc_ifindex, 1);
1179 err = -EHOSTUNREACH;
1183 if (dev != grt->rt6i_dev) {
1184 dst_release(&grt->u.dst);
1188 dev = grt->rt6i_dev;
1189 idev = grt->rt6i_idev;
1191 in6_dev_hold(grt->rt6i_idev);
1193 if (!(grt->rt6i_flags&RTF_GATEWAY))
1195 dst_release(&grt->u.dst);
1201 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1209 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1210 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1211 if (IS_ERR(rt->rt6i_nexthop)) {
1212 err = PTR_ERR(rt->rt6i_nexthop);
1213 rt->rt6i_nexthop = NULL;
1218 rt->rt6i_flags = cfg->fc_flags;
1225 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1226 int type = nla->nla_type;
1229 if (type > RTAX_MAX) {
1234 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1239 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1240 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1241 if (!rt->u.dst.metrics[RTAX_MTU-1])
1242 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1243 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1244 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1245 rt->u.dst.dev = dev;
1246 rt->rt6i_idev = idev;
1247 rt->rt6i_table = table;
1248 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1256 dst_free(&rt->u.dst);
1260 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1263 struct fib6_table *table;
1265 if (rt == &ip6_null_entry)
1268 table = rt->rt6i_table;
1269 write_lock_bh(&table->tb6_lock);
1271 err = fib6_del(rt, info);
1272 dst_release(&rt->u.dst);
1274 write_unlock_bh(&table->tb6_lock);
1279 int ip6_del_rt(struct rt6_info *rt)
1281 return __ip6_del_rt(rt, NULL);
1284 static int ip6_route_del(struct fib6_config *cfg)
1286 struct fib6_table *table;
1287 struct fib6_node *fn;
1288 struct rt6_info *rt;
1291 table = fib6_get_table(cfg->fc_table);
1295 read_lock_bh(&table->tb6_lock);
1297 fn = fib6_locate(&table->tb6_root,
1298 &cfg->fc_dst, cfg->fc_dst_len,
1299 &cfg->fc_src, cfg->fc_src_len);
1302 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1303 if (cfg->fc_ifindex &&
1304 (rt->rt6i_dev == NULL ||
1305 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1307 if (cfg->fc_flags & RTF_GATEWAY &&
1308 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1310 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1312 dst_hold(&rt->u.dst);
1313 read_unlock_bh(&table->tb6_lock);
1315 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1318 read_unlock_bh(&table->tb6_lock);
1326 struct ip6rd_flowi {
1328 struct in6_addr gateway;
1331 static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
1335 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1336 struct rt6_info *rt;
1337 struct fib6_node *fn;
1340 * Get the "current" route for this destination and
1341 * check if the redirect has come from approriate router.
1343 * RFC 2461 specifies that redirects should only be
1344 * accepted if they come from the nexthop to the target.
1345 * Due to the way the routes are chosen, this notion
1346 * is a bit fuzzy and one might need to check all possible
1350 read_lock_bh(&table->tb6_lock);
1351 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1353 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1355 * Current route is on-link; redirect is always invalid.
1357 * Seems, previous statement is not true. It could
1358 * be node, which looks for us as on-link (f.e. proxy ndisc)
1359 * But then router serving it might decide, that we should
1360 * know truth 8)8) --ANK (980726).
1362 if (rt6_check_expired(rt))
1364 if (!(rt->rt6i_flags & RTF_GATEWAY))
1366 if (fl->oif != rt->rt6i_dev->ifindex)
1368 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1374 rt = &ip6_null_entry;
1375 BACKTRACK(&fl->fl6_src);
1377 dst_hold(&rt->u.dst);
1379 read_unlock_bh(&table->tb6_lock);
1384 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1385 struct in6_addr *src,
1386 struct in6_addr *gateway,
1387 struct net_device *dev)
1389 int flags = RT6_LOOKUP_F_HAS_SADDR;
1390 struct ip6rd_flowi rdfl = {
1392 .oif = dev->ifindex,
1400 .gateway = *gateway,
1403 if (rt6_need_strict(dest))
1404 flags |= RT6_LOOKUP_F_IFACE;
1406 return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect);
1409 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1410 struct in6_addr *saddr,
1411 struct neighbour *neigh, u8 *lladdr, int on_link)
1413 struct rt6_info *rt, *nrt = NULL;
1414 struct netevent_redirect netevent;
1416 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1418 if (rt == &ip6_null_entry) {
1419 if (net_ratelimit())
1420 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1421 "for redirect target\n");
1426 * We have finally decided to accept it.
1429 neigh_update(neigh, lladdr, NUD_STALE,
1430 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1431 NEIGH_UPDATE_F_OVERRIDE|
1432 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1433 NEIGH_UPDATE_F_ISROUTER))
1437 * Redirect received -> path was valid.
1438 * Look, redirects are sent only in response to data packets,
1439 * so that this nexthop apparently is reachable. --ANK
1441 dst_confirm(&rt->u.dst);
1443 /* Duplicate redirect: silently ignore. */
1444 if (neigh == rt->u.dst.neighbour)
1447 nrt = ip6_rt_copy(rt);
1451 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1453 nrt->rt6i_flags &= ~RTF_GATEWAY;
1455 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1456 nrt->rt6i_dst.plen = 128;
1457 nrt->u.dst.flags |= DST_HOST;
1459 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1460 nrt->rt6i_nexthop = neigh_clone(neigh);
1461 /* Reset pmtu, it may be better */
1462 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1463 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
1465 if (ip6_ins_rt(nrt))
1468 netevent.old = &rt->u.dst;
1469 netevent.new = &nrt->u.dst;
1470 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1472 if (rt->rt6i_flags&RTF_CACHE) {
1478 dst_release(&rt->u.dst);
1483 * Handle ICMP "packet too big" messages
1484 * i.e. Path MTU discovery
1487 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1488 struct net_device *dev, u32 pmtu)
1490 struct rt6_info *rt, *nrt;
1493 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0);
1497 if (pmtu >= dst_mtu(&rt->u.dst))
1500 if (pmtu < IPV6_MIN_MTU) {
1502 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1503 * MTU (1280) and a fragment header should always be included
1504 * after a node receiving Too Big message reporting PMTU is
1505 * less than the IPv6 Minimum Link MTU.
1507 pmtu = IPV6_MIN_MTU;
1511 /* New mtu received -> path was valid.
1512 They are sent only in response to data packets,
1513 so that this nexthop apparently is reachable. --ANK
1515 dst_confirm(&rt->u.dst);
1517 /* Host route. If it is static, it would be better
1518 not to override it, but add new one, so that
1519 when cache entry will expire old pmtu
1520 would return automatically.
1522 if (rt->rt6i_flags & RTF_CACHE) {
1523 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1525 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1526 dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
1527 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1532 Two cases are possible:
1533 1. It is connected route. Action: COW
1534 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1536 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1537 nrt = rt6_alloc_cow(rt, daddr, saddr);
1539 nrt = rt6_alloc_clone(rt, daddr);
1542 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1544 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1546 /* According to RFC 1981, detecting PMTU increase shouldn't be
1547 * happened within 5 mins, the recommended timer is 10 mins.
1548 * Here this route expiration time is set to ip6_rt_mtu_expires
1549 * which is 10 mins. After 10 mins the decreased pmtu is expired
1550 * and detecting PMTU increase will be automatically happened.
1552 dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
1553 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1558 dst_release(&rt->u.dst);
1562 * Misc support functions
1565 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1567 struct rt6_info *rt = ip6_dst_alloc();
1570 rt->u.dst.input = ort->u.dst.input;
1571 rt->u.dst.output = ort->u.dst.output;
1573 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1574 rt->u.dst.error = ort->u.dst.error;
1575 rt->u.dst.dev = ort->u.dst.dev;
1577 dev_hold(rt->u.dst.dev);
1578 rt->rt6i_idev = ort->rt6i_idev;
1580 in6_dev_hold(rt->rt6i_idev);
1581 rt->u.dst.lastuse = jiffies;
1582 rt->rt6i_expires = 0;
1584 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1585 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1586 rt->rt6i_metric = 0;
1588 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1589 #ifdef CONFIG_IPV6_SUBTREES
1590 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1592 rt->rt6i_table = ort->rt6i_table;
1597 #ifdef CONFIG_IPV6_ROUTE_INFO
1598 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
1599 struct in6_addr *gwaddr, int ifindex)
1601 struct fib6_node *fn;
1602 struct rt6_info *rt = NULL;
1603 struct fib6_table *table;
1605 table = fib6_get_table(RT6_TABLE_INFO);
1609 write_lock_bh(&table->tb6_lock);
1610 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1614 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1615 if (rt->rt6i_dev->ifindex != ifindex)
1617 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1619 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1621 dst_hold(&rt->u.dst);
1625 write_unlock_bh(&table->tb6_lock);
1629 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
1630 struct in6_addr *gwaddr, int ifindex,
1633 struct fib6_config cfg = {
1634 .fc_table = RT6_TABLE_INFO,
1636 .fc_ifindex = ifindex,
1637 .fc_dst_len = prefixlen,
1638 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1639 RTF_UP | RTF_PREF(pref),
1642 ipv6_addr_copy(&cfg.fc_dst, prefix);
1643 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1645 /* We should treat it as a default route if prefix length is 0. */
1647 cfg.fc_flags |= RTF_DEFAULT;
1649 ip6_route_add(&cfg);
1651 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex);
1655 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1657 struct rt6_info *rt;
1658 struct fib6_table *table;
1660 table = fib6_get_table(RT6_TABLE_DFLT);
1664 write_lock_bh(&table->tb6_lock);
1665 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1666 if (dev == rt->rt6i_dev &&
1667 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1668 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1672 dst_hold(&rt->u.dst);
1673 write_unlock_bh(&table->tb6_lock);
1677 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1678 struct net_device *dev,
1681 struct fib6_config cfg = {
1682 .fc_table = RT6_TABLE_DFLT,
1684 .fc_ifindex = dev->ifindex,
1685 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1686 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1689 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1691 ip6_route_add(&cfg);
1693 return rt6_get_dflt_router(gwaddr, dev);
1696 void rt6_purge_dflt_routers(void)
1698 struct rt6_info *rt;
1699 struct fib6_table *table;
1701 /* NOTE: Keep consistent with rt6_get_dflt_router */
1702 table = fib6_get_table(RT6_TABLE_DFLT);
1707 read_lock_bh(&table->tb6_lock);
1708 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1709 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1710 dst_hold(&rt->u.dst);
1711 read_unlock_bh(&table->tb6_lock);
1716 read_unlock_bh(&table->tb6_lock);
1719 static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg,
1720 struct fib6_config *cfg)
1722 memset(cfg, 0, sizeof(*cfg));
1724 cfg->fc_table = RT6_TABLE_MAIN;
1725 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1726 cfg->fc_metric = rtmsg->rtmsg_metric;
1727 cfg->fc_expires = rtmsg->rtmsg_info;
1728 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1729 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1730 cfg->fc_flags = rtmsg->rtmsg_flags;
1732 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1733 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1734 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1737 int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1739 struct fib6_config cfg;
1740 struct in6_rtmsg rtmsg;
1744 case SIOCADDRT: /* Add a route */
1745 case SIOCDELRT: /* Delete a route */
1746 if (!capable(CAP_NET_ADMIN))
1748 err = copy_from_user(&rtmsg, arg,
1749 sizeof(struct in6_rtmsg));
1753 rtmsg_to_fib6_config(&rtmsg, &cfg);
1758 err = ip6_route_add(&cfg);
1761 err = ip6_route_del(&cfg);
1775 * Drop the packet on the floor
1778 static inline int ip6_pkt_drop(struct sk_buff *skb, int code)
1780 int type = ipv6_addr_type(&skb->nh.ipv6h->daddr);
1781 if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED)
1782 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INADDRERRORS);
1784 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTNOROUTES);
1785 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
1790 static int ip6_pkt_discard(struct sk_buff *skb)
1792 return ip6_pkt_drop(skb, ICMPV6_NOROUTE);
1795 static int ip6_pkt_discard_out(struct sk_buff *skb)
1797 skb->dev = skb->dst->dev;
1798 return ip6_pkt_discard(skb);
1801 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1803 static int ip6_pkt_prohibit(struct sk_buff *skb)
1805 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED);
1808 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1810 skb->dev = skb->dst->dev;
1811 return ip6_pkt_prohibit(skb);
1814 static int ip6_pkt_blk_hole(struct sk_buff *skb)
1823 * Allocate a dst for local (unicast / anycast) address.
1826 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1827 const struct in6_addr *addr,
1830 struct rt6_info *rt = ip6_dst_alloc();
1833 return ERR_PTR(-ENOMEM);
1835 dev_hold(&loopback_dev);
1838 rt->u.dst.flags = DST_HOST;
1839 rt->u.dst.input = ip6_input;
1840 rt->u.dst.output = ip6_output;
1841 rt->rt6i_dev = &loopback_dev;
1842 rt->rt6i_idev = idev;
1843 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1844 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1845 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1846 rt->u.dst.obsolete = -1;
1848 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1850 rt->rt6i_flags |= RTF_ANYCAST;
1852 rt->rt6i_flags |= RTF_LOCAL;
1853 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1854 if (rt->rt6i_nexthop == NULL) {
1855 dst_free(&rt->u.dst);
1856 return ERR_PTR(-ENOMEM);
1859 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1860 rt->rt6i_dst.plen = 128;
1861 rt->rt6i_table = fib6_get_table(RT6_TABLE_LOCAL);
1863 atomic_set(&rt->u.dst.__refcnt, 1);
1868 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1870 if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
1871 rt != &ip6_null_entry) {
1872 RT6_TRACE("deleted by ifdown %p\n", rt);
1878 void rt6_ifdown(struct net_device *dev)
1880 fib6_clean_all(fib6_ifdown, 0, dev);
1883 struct rt6_mtu_change_arg
1885 struct net_device *dev;
1889 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1891 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1892 struct inet6_dev *idev;
1894 /* In IPv6 pmtu discovery is not optional,
1895 so that RTAX_MTU lock cannot disable it.
1896 We still use this lock to block changes
1897 caused by addrconf/ndisc.
1900 idev = __in6_dev_get(arg->dev);
1904 /* For administrative MTU increase, there is no way to discover
1905 IPv6 PMTU increase, so PMTU increase should be updated here.
1906 Since RFC 1981 doesn't include administrative MTU increase
1907 update PMTU increase is a MUST. (i.e. jumbo frame)
1910 If new MTU is less than route PMTU, this new MTU will be the
1911 lowest MTU in the path, update the route PMTU to reflect PMTU
1912 decreases; if new MTU is greater than route PMTU, and the
1913 old MTU is the lowest MTU in the path, update the route PMTU
1914 to reflect the increase. In this case if the other nodes' MTU
1915 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1918 if (rt->rt6i_dev == arg->dev &&
1919 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1920 (dst_mtu(&rt->u.dst) > arg->mtu ||
1921 (dst_mtu(&rt->u.dst) < arg->mtu &&
1922 dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
1923 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1924 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
1928 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1930 struct rt6_mtu_change_arg arg = {
1935 fib6_clean_all(rt6_mtu_change_route, 0, &arg);
1938 static struct nla_policy rtm_ipv6_policy[RTA_MAX+1] __read_mostly = {
1939 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
1940 [RTA_OIF] = { .type = NLA_U32 },
1941 [RTA_IIF] = { .type = NLA_U32 },
1942 [RTA_PRIORITY] = { .type = NLA_U32 },
1943 [RTA_METRICS] = { .type = NLA_NESTED },
1946 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1947 struct fib6_config *cfg)
1950 struct nlattr *tb[RTA_MAX+1];
1953 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
1958 rtm = nlmsg_data(nlh);
1959 memset(cfg, 0, sizeof(*cfg));
1961 cfg->fc_table = rtm->rtm_table;
1962 cfg->fc_dst_len = rtm->rtm_dst_len;
1963 cfg->fc_src_len = rtm->rtm_src_len;
1964 cfg->fc_flags = RTF_UP;
1965 cfg->fc_protocol = rtm->rtm_protocol;
1967 if (rtm->rtm_type == RTN_UNREACHABLE)
1968 cfg->fc_flags |= RTF_REJECT;
1970 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
1971 cfg->fc_nlinfo.nlh = nlh;
1973 if (tb[RTA_GATEWAY]) {
1974 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
1975 cfg->fc_flags |= RTF_GATEWAY;
1979 int plen = (rtm->rtm_dst_len + 7) >> 3;
1981 if (nla_len(tb[RTA_DST]) < plen)
1984 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
1988 int plen = (rtm->rtm_src_len + 7) >> 3;
1990 if (nla_len(tb[RTA_SRC]) < plen)
1993 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
1997 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
1999 if (tb[RTA_PRIORITY])
2000 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2002 if (tb[RTA_METRICS]) {
2003 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2004 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2008 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2015 int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2017 struct fib6_config cfg;
2020 err = rtm_to_fib6_config(skb, nlh, &cfg);
2024 return ip6_route_del(&cfg);
2027 int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2029 struct fib6_config cfg;
2032 err = rtm_to_fib6_config(skb, nlh, &cfg);
2036 return ip6_route_add(&cfg);
2039 static inline size_t rt6_nlmsg_size(void)
2041 return NLMSG_ALIGN(sizeof(struct rtmsg))
2042 + nla_total_size(16) /* RTA_SRC */
2043 + nla_total_size(16) /* RTA_DST */
2044 + nla_total_size(16) /* RTA_GATEWAY */
2045 + nla_total_size(16) /* RTA_PREFSRC */
2046 + nla_total_size(4) /* RTA_TABLE */
2047 + nla_total_size(4) /* RTA_IIF */
2048 + nla_total_size(4) /* RTA_OIF */
2049 + nla_total_size(4) /* RTA_PRIORITY */
2050 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2051 + nla_total_size(sizeof(struct rta_cacheinfo));
2054 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
2055 struct in6_addr *dst, struct in6_addr *src,
2056 int iif, int type, u32 pid, u32 seq,
2057 int prefix, unsigned int flags)
2060 struct nlmsghdr *nlh;
2064 if (prefix) { /* user wants prefix routes only */
2065 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2066 /* success since this is not a prefix route */
2071 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2075 rtm = nlmsg_data(nlh);
2076 rtm->rtm_family = AF_INET6;
2077 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2078 rtm->rtm_src_len = rt->rt6i_src.plen;
2081 table = rt->rt6i_table->tb6_id;
2083 table = RT6_TABLE_UNSPEC;
2084 rtm->rtm_table = table;
2085 NLA_PUT_U32(skb, RTA_TABLE, table);
2086 if (rt->rt6i_flags&RTF_REJECT)
2087 rtm->rtm_type = RTN_UNREACHABLE;
2088 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2089 rtm->rtm_type = RTN_LOCAL;
2091 rtm->rtm_type = RTN_UNICAST;
2093 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2094 rtm->rtm_protocol = rt->rt6i_protocol;
2095 if (rt->rt6i_flags&RTF_DYNAMIC)
2096 rtm->rtm_protocol = RTPROT_REDIRECT;
2097 else if (rt->rt6i_flags & RTF_ADDRCONF)
2098 rtm->rtm_protocol = RTPROT_KERNEL;
2099 else if (rt->rt6i_flags&RTF_DEFAULT)
2100 rtm->rtm_protocol = RTPROT_RA;
2102 if (rt->rt6i_flags&RTF_CACHE)
2103 rtm->rtm_flags |= RTM_F_CLONED;
2106 NLA_PUT(skb, RTA_DST, 16, dst);
2107 rtm->rtm_dst_len = 128;
2108 } else if (rtm->rtm_dst_len)
2109 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2110 #ifdef CONFIG_IPV6_SUBTREES
2112 NLA_PUT(skb, RTA_SRC, 16, src);
2113 rtm->rtm_src_len = 128;
2114 } else if (rtm->rtm_src_len)
2115 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2118 NLA_PUT_U32(skb, RTA_IIF, iif);
2120 struct in6_addr saddr_buf;
2121 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
2122 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2125 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2126 goto nla_put_failure;
2128 if (rt->u.dst.neighbour)
2129 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2132 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2134 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2136 expires = rt->rt6i_expires ? rt->rt6i_expires - jiffies : 0;
2137 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2138 expires, rt->u.dst.error) < 0)
2139 goto nla_put_failure;
2141 return nlmsg_end(skb, nlh);
2144 nlmsg_cancel(skb, nlh);
2148 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2150 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2153 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2154 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2155 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2159 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2160 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2161 prefix, NLM_F_MULTI);
2164 int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2166 struct nlattr *tb[RTA_MAX+1];
2167 struct rt6_info *rt;
2168 struct sk_buff *skb;
2173 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2178 memset(&fl, 0, sizeof(fl));
2181 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2184 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2188 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2191 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2195 iif = nla_get_u32(tb[RTA_IIF]);
2198 fl.oif = nla_get_u32(tb[RTA_OIF]);
2201 struct net_device *dev;
2202 dev = __dev_get_by_index(iif);
2209 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2215 /* Reserve room for dummy headers, this skb can pass
2216 through good chunk of routing engine.
2218 skb->mac.raw = skb->data;
2219 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2221 rt = (struct rt6_info*) ip6_route_output(NULL, &fl);
2222 skb->dst = &rt->u.dst;
2224 err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2225 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2226 nlh->nlmsg_seq, 0, 0);
2232 err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
2237 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2239 struct sk_buff *skb;
2240 u32 pid = 0, seq = 0;
2241 struct nlmsghdr *nlh = NULL;
2248 seq = nlh->nlmsg_seq;
2251 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2255 err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
2257 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2258 WARN_ON(err == -EMSGSIZE);
2262 err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
2265 rtnl_set_sk_err(RTNLGRP_IPV6_ROUTE, err);
2272 #ifdef CONFIG_PROC_FS
2274 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2285 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2287 struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
2289 if (arg->skip < arg->offset / RT6_INFO_LEN) {
2294 if (arg->len >= arg->length)
2297 arg->len += sprintf(arg->buffer + arg->len,
2298 NIP6_SEQFMT " %02x ",
2299 NIP6(rt->rt6i_dst.addr),
2302 #ifdef CONFIG_IPV6_SUBTREES
2303 arg->len += sprintf(arg->buffer + arg->len,
2304 NIP6_SEQFMT " %02x ",
2305 NIP6(rt->rt6i_src.addr),
2308 arg->len += sprintf(arg->buffer + arg->len,
2309 "00000000000000000000000000000000 00 ");
2312 if (rt->rt6i_nexthop) {
2313 arg->len += sprintf(arg->buffer + arg->len,
2315 NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
2317 arg->len += sprintf(arg->buffer + arg->len,
2318 "00000000000000000000000000000000");
2320 arg->len += sprintf(arg->buffer + arg->len,
2321 " %08x %08x %08x %08x %8s\n",
2322 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2323 rt->u.dst.__use, rt->rt6i_flags,
2324 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2328 static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
2330 struct rt6_proc_arg arg = {
2336 fib6_clean_all(rt6_info_route, 0, &arg);
2340 *start += offset % RT6_INFO_LEN;
2342 arg.len -= offset % RT6_INFO_LEN;
2344 if (arg.len > length)
2352 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2354 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2355 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
2356 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
2357 rt6_stats.fib_rt_cache,
2358 atomic_read(&ip6_dst_ops.entries),
2359 rt6_stats.fib_discarded_routes);
2364 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2366 return single_open(file, rt6_stats_seq_show, NULL);
2369 static const struct file_operations rt6_stats_seq_fops = {
2370 .owner = THIS_MODULE,
2371 .open = rt6_stats_seq_open,
2373 .llseek = seq_lseek,
2374 .release = single_release,
2376 #endif /* CONFIG_PROC_FS */
2378 #ifdef CONFIG_SYSCTL
2380 static int flush_delay;
2383 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2384 void __user *buffer, size_t *lenp, loff_t *ppos)
2387 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2388 fib6_run_gc(flush_delay <= 0 ? ~0UL : (unsigned long)flush_delay);
2394 ctl_table ipv6_route_table[] = {
2396 .ctl_name = NET_IPV6_ROUTE_FLUSH,
2397 .procname = "flush",
2398 .data = &flush_delay,
2399 .maxlen = sizeof(int),
2401 .proc_handler = &ipv6_sysctl_rtcache_flush
2404 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2405 .procname = "gc_thresh",
2406 .data = &ip6_dst_ops.gc_thresh,
2407 .maxlen = sizeof(int),
2409 .proc_handler = &proc_dointvec,
2412 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2413 .procname = "max_size",
2414 .data = &ip6_rt_max_size,
2415 .maxlen = sizeof(int),
2417 .proc_handler = &proc_dointvec,
2420 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2421 .procname = "gc_min_interval",
2422 .data = &ip6_rt_gc_min_interval,
2423 .maxlen = sizeof(int),
2425 .proc_handler = &proc_dointvec_jiffies,
2426 .strategy = &sysctl_jiffies,
2429 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2430 .procname = "gc_timeout",
2431 .data = &ip6_rt_gc_timeout,
2432 .maxlen = sizeof(int),
2434 .proc_handler = &proc_dointvec_jiffies,
2435 .strategy = &sysctl_jiffies,
2438 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2439 .procname = "gc_interval",
2440 .data = &ip6_rt_gc_interval,
2441 .maxlen = sizeof(int),
2443 .proc_handler = &proc_dointvec_jiffies,
2444 .strategy = &sysctl_jiffies,
2447 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2448 .procname = "gc_elasticity",
2449 .data = &ip6_rt_gc_elasticity,
2450 .maxlen = sizeof(int),
2452 .proc_handler = &proc_dointvec_jiffies,
2453 .strategy = &sysctl_jiffies,
2456 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2457 .procname = "mtu_expires",
2458 .data = &ip6_rt_mtu_expires,
2459 .maxlen = sizeof(int),
2461 .proc_handler = &proc_dointvec_jiffies,
2462 .strategy = &sysctl_jiffies,
2465 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2466 .procname = "min_adv_mss",
2467 .data = &ip6_rt_min_advmss,
2468 .maxlen = sizeof(int),
2470 .proc_handler = &proc_dointvec_jiffies,
2471 .strategy = &sysctl_jiffies,
2474 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2475 .procname = "gc_min_interval_ms",
2476 .data = &ip6_rt_gc_min_interval,
2477 .maxlen = sizeof(int),
2479 .proc_handler = &proc_dointvec_ms_jiffies,
2480 .strategy = &sysctl_ms_jiffies,
2487 void __init ip6_route_init(void)
2489 struct proc_dir_entry *p;
2491 ip6_dst_ops.kmem_cachep =
2492 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2493 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
2495 #ifdef CONFIG_PROC_FS
2496 p = proc_net_create("ipv6_route", 0, rt6_proc_info);
2498 p->owner = THIS_MODULE;
2500 proc_net_fops_create("rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2505 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2510 void ip6_route_cleanup(void)
2512 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2513 fib6_rules_cleanup();
2515 #ifdef CONFIG_PROC_FS
2516 proc_net_remove("ipv6_route");
2517 proc_net_remove("rt6_stats");
2524 kmem_cache_destroy(ip6_dst_ops.kmem_cachep);