2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
27 #include <linux/capability.h>
28 #include <linux/config.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/times.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/route.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/init.h>
39 #include <linux/netlink.h>
40 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ndisc.h>
52 #include <net/addrconf.h>
54 #include <linux/rtnetlink.h>
58 #include <asm/uaccess.h>
61 #include <linux/sysctl.h>
64 /* Set to 3 to get tracing. */
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
72 #define RT6_TRACE(x...) do { ; } while (0)
75 #define CLONE_OFFLINK_ROUTE 0
77 #define RT6_SELECT_F_IFACE 0x1
78 #define RT6_SELECT_F_REACHABLE 0x2
80 static int ip6_rt_max_size = 4096;
81 static int ip6_rt_gc_min_interval = HZ / 2;
82 static int ip6_rt_gc_timeout = 60*HZ;
83 int ip6_rt_gc_interval = 30*HZ;
84 static int ip6_rt_gc_elasticity = 9;
85 static int ip6_rt_mtu_expires = 10*60*HZ;
86 static int ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
88 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
89 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
90 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91 static void ip6_dst_destroy(struct dst_entry *);
92 static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94 static int ip6_dst_gc(void);
96 static int ip6_pkt_discard(struct sk_buff *skb);
97 static int ip6_pkt_discard_out(struct sk_buff *skb);
98 static void ip6_link_failure(struct sk_buff *skb);
99 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
101 #ifdef CONFIG_IPV6_ROUTE_INFO
102 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
103 struct in6_addr *gwaddr, int ifindex,
105 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
106 struct in6_addr *gwaddr, int ifindex);
109 static struct dst_ops ip6_dst_ops = {
111 .protocol = __constant_htons(ETH_P_IPV6),
114 .check = ip6_dst_check,
115 .destroy = ip6_dst_destroy,
116 .ifdown = ip6_dst_ifdown,
117 .negative_advice = ip6_negative_advice,
118 .link_failure = ip6_link_failure,
119 .update_pmtu = ip6_rt_update_pmtu,
120 .entry_size = sizeof(struct rt6_info),
123 struct rt6_info ip6_null_entry = {
126 .__refcnt = ATOMIC_INIT(1),
128 .dev = &loopback_dev,
130 .error = -ENETUNREACH,
131 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
132 .input = ip6_pkt_discard,
133 .output = ip6_pkt_discard_out,
135 .path = (struct dst_entry*)&ip6_null_entry,
138 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
139 .rt6i_metric = ~(u32) 0,
140 .rt6i_ref = ATOMIC_INIT(1),
143 struct fib6_node ip6_routing_table = {
144 .leaf = &ip6_null_entry,
145 .fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
148 /* Protects all the ip6 fib */
150 DEFINE_RWLOCK(rt6_lock);
153 /* allocate dst with ip6_dst_ops */
154 static __inline__ struct rt6_info *ip6_dst_alloc(void)
156 return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
159 static void ip6_dst_destroy(struct dst_entry *dst)
161 struct rt6_info *rt = (struct rt6_info *)dst;
162 struct inet6_dev *idev = rt->rt6i_idev;
165 rt->rt6i_idev = NULL;
170 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
173 struct rt6_info *rt = (struct rt6_info *)dst;
174 struct inet6_dev *idev = rt->rt6i_idev;
176 if (dev != &loopback_dev && idev != NULL && idev->dev == dev) {
177 struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev);
178 if (loopback_idev != NULL) {
179 rt->rt6i_idev = loopback_idev;
185 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
187 return (rt->rt6i_flags & RTF_EXPIRES &&
188 time_after(jiffies, rt->rt6i_expires));
192 * Route lookup. Any rt6_lock is implied.
195 static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
199 struct rt6_info *local = NULL;
200 struct rt6_info *sprt;
203 for (sprt = rt; sprt; sprt = sprt->u.next) {
204 struct net_device *dev = sprt->rt6i_dev;
205 if (dev->ifindex == oif)
207 if (dev->flags & IFF_LOOPBACK) {
208 if (sprt->rt6i_idev == NULL ||
209 sprt->rt6i_idev->dev->ifindex != oif) {
212 if (local && (!oif ||
213 local->rt6i_idev->dev->ifindex == oif))
224 return &ip6_null_entry;
229 #ifdef CONFIG_IPV6_ROUTER_PREF
230 static void rt6_probe(struct rt6_info *rt)
232 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
234 * Okay, this does not seem to be appropriate
235 * for now, however, we need to check if it
236 * is really so; aka Router Reachability Probing.
238 * Router Reachability Probe MUST be rate-limited
239 * to no more than one per minute.
241 if (!neigh || (neigh->nud_state & NUD_VALID))
243 read_lock_bh(&neigh->lock);
244 if (!(neigh->nud_state & NUD_VALID) &&
245 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
246 struct in6_addr mcaddr;
247 struct in6_addr *target;
249 neigh->updated = jiffies;
250 read_unlock_bh(&neigh->lock);
252 target = (struct in6_addr *)&neigh->primary_key;
253 addrconf_addr_solict_mult(target, &mcaddr);
254 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
256 read_unlock_bh(&neigh->lock);
259 static inline void rt6_probe(struct rt6_info *rt)
266 * Default Router Selection (RFC 2461 6.3.6)
268 static int inline rt6_check_dev(struct rt6_info *rt, int oif)
270 struct net_device *dev = rt->rt6i_dev;
271 if (!oif || dev->ifindex == oif)
273 if ((dev->flags & IFF_LOOPBACK) &&
274 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
279 static int inline rt6_check_neigh(struct rt6_info *rt)
281 struct neighbour *neigh = rt->rt6i_nexthop;
284 read_lock_bh(&neigh->lock);
285 if (neigh->nud_state & NUD_VALID)
287 read_unlock_bh(&neigh->lock);
292 static int rt6_score_route(struct rt6_info *rt, int oif,
295 int m = rt6_check_dev(rt, oif);
296 if (!m && (strict & RT6_SELECT_F_IFACE))
298 #ifdef CONFIG_IPV6_ROUTER_PREF
299 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
301 if (rt6_check_neigh(rt))
303 else if (strict & RT6_SELECT_F_REACHABLE)
308 static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
311 struct rt6_info *match = NULL, *last = NULL;
312 struct rt6_info *rt, *rt0 = *head;
316 RT6_TRACE("%s(head=%p(*head=%p), oif=%d)\n",
317 __FUNCTION__, head, head ? *head : NULL, oif);
319 for (rt = rt0, metric = rt0->rt6i_metric;
320 rt && rt->rt6i_metric == metric && (!last || rt != rt0);
324 if (rt6_check_expired(rt))
329 m = rt6_score_route(rt, oif, strict);
343 (strict & RT6_SELECT_F_REACHABLE) &&
344 last && last != rt0) {
345 /* no entries matched; do round-robin */
346 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
349 rt0->u.next = last->u.next;
354 RT6_TRACE("%s() => %p, score=%d\n",
355 __FUNCTION__, match, mpri);
357 return (match ? match : &ip6_null_entry);
360 #ifdef CONFIG_IPV6_ROUTE_INFO
361 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
362 struct in6_addr *gwaddr)
364 struct route_info *rinfo = (struct route_info *) opt;
365 struct in6_addr prefix_buf, *prefix;
370 if (len < sizeof(struct route_info)) {
374 /* Sanity check for prefix_len and length */
375 if (rinfo->length > 3) {
377 } else if (rinfo->prefix_len > 128) {
379 } else if (rinfo->prefix_len > 64) {
380 if (rinfo->length < 2) {
383 } else if (rinfo->prefix_len > 0) {
384 if (rinfo->length < 1) {
389 pref = rinfo->route_pref;
390 if (pref == ICMPV6_ROUTER_PREF_INVALID)
391 pref = ICMPV6_ROUTER_PREF_MEDIUM;
393 lifetime = htonl(rinfo->lifetime);
394 if (lifetime == 0xffffffff) {
396 } else if (lifetime > 0x7fffffff/HZ) {
397 /* Avoid arithmetic overflow */
398 lifetime = 0x7fffffff/HZ - 1;
401 if (rinfo->length == 3)
402 prefix = (struct in6_addr *)rinfo->prefix;
404 /* this function is safe */
405 ipv6_addr_prefix(&prefix_buf,
406 (struct in6_addr *)rinfo->prefix,
408 prefix = &prefix_buf;
411 rt = rt6_get_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex);
413 if (rt && !lifetime) {
414 ip6_del_rt(rt, NULL, NULL, NULL);
419 rt = rt6_add_route_info(prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
422 rt->rt6i_flags = RTF_ROUTEINFO |
423 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
426 if (lifetime == 0xffffffff) {
427 rt->rt6i_flags &= ~RTF_EXPIRES;
429 rt->rt6i_expires = jiffies + HZ * lifetime;
430 rt->rt6i_flags |= RTF_EXPIRES;
432 dst_release(&rt->u.dst);
438 struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
441 struct fib6_node *fn;
444 read_lock_bh(&rt6_lock);
445 fn = fib6_lookup(&ip6_routing_table, daddr, saddr);
446 rt = rt6_device_match(fn->leaf, oif, strict);
447 dst_hold(&rt->u.dst);
449 read_unlock_bh(&rt6_lock);
451 rt->u.dst.lastuse = jiffies;
452 if (rt->u.dst.error == 0)
454 dst_release(&rt->u.dst);
458 /* ip6_ins_rt is called with FREE rt6_lock.
459 It takes new route entry, the addition fails by any reason the
460 route is freed. In any case, if caller does not hold it, it may
464 int ip6_ins_rt(struct rt6_info *rt, struct nlmsghdr *nlh,
465 void *_rtattr, struct netlink_skb_parms *req)
469 write_lock_bh(&rt6_lock);
470 err = fib6_add(&ip6_routing_table, rt, nlh, _rtattr, req);
471 write_unlock_bh(&rt6_lock);
476 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
477 struct in6_addr *saddr)
485 rt = ip6_rt_copy(ort);
488 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
489 if (rt->rt6i_dst.plen != 128 &&
490 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
491 rt->rt6i_flags |= RTF_ANYCAST;
492 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
495 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
496 rt->rt6i_dst.plen = 128;
497 rt->rt6i_flags |= RTF_CACHE;
498 rt->u.dst.flags |= DST_HOST;
500 #ifdef CONFIG_IPV6_SUBTREES
501 if (rt->rt6i_src.plen && saddr) {
502 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
503 rt->rt6i_src.plen = 128;
507 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
514 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
516 struct rt6_info *rt = ip6_rt_copy(ort);
518 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
519 rt->rt6i_dst.plen = 128;
520 rt->rt6i_flags |= RTF_CACHE;
521 if (rt->rt6i_flags & RTF_REJECT)
522 rt->u.dst.error = ort->u.dst.error;
523 rt->u.dst.flags |= DST_HOST;
524 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
529 #define BACKTRACK() \
530 if (rt == &ip6_null_entry) { \
531 while ((fn = fn->parent) != NULL) { \
532 if (fn->fn_flags & RTN_ROOT) { \
535 if (fn->fn_flags & RTN_RTINFO) \
541 void ip6_route_input(struct sk_buff *skb)
543 struct fib6_node *fn;
544 struct rt6_info *rt, *nrt;
548 int reachable = RT6_SELECT_F_REACHABLE;
550 strict = ipv6_addr_type(&skb->nh.ipv6h->daddr) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL) ? RT6_SELECT_F_IFACE : 0;
553 read_lock_bh(&rt6_lock);
556 fn = fib6_lookup(&ip6_routing_table, &skb->nh.ipv6h->daddr,
557 &skb->nh.ipv6h->saddr);
560 rt = rt6_select(&fn->leaf, skb->dev->ifindex, strict | reachable);
562 if (rt == &ip6_null_entry ||
563 rt->rt6i_flags & RTF_CACHE)
566 dst_hold(&rt->u.dst);
567 read_unlock_bh(&rt6_lock);
569 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
570 nrt = rt6_alloc_cow(rt, &skb->nh.ipv6h->daddr, &skb->nh.ipv6h->saddr);
572 #if CLONE_OFFLINK_ROUTE
573 nrt = rt6_alloc_clone(rt, &skb->nh.ipv6h->daddr);
579 dst_release(&rt->u.dst);
580 rt = nrt ? : &ip6_null_entry;
582 dst_hold(&rt->u.dst);
584 err = ip6_ins_rt(nrt, NULL, NULL, &NETLINK_CB(skb));
593 * Race condition! In the gap, when rt6_lock was
594 * released someone could insert this route. Relookup.
596 dst_release(&rt->u.dst);
604 dst_hold(&rt->u.dst);
605 read_unlock_bh(&rt6_lock);
607 rt->u.dst.lastuse = jiffies;
609 skb->dst = (struct dst_entry *) rt;
613 struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
615 struct fib6_node *fn;
616 struct rt6_info *rt, *nrt;
620 int reachable = RT6_SELECT_F_REACHABLE;
622 strict = ipv6_addr_type(&fl->fl6_dst) & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL) ? RT6_SELECT_F_IFACE : 0;
625 read_lock_bh(&rt6_lock);
628 fn = fib6_lookup(&ip6_routing_table, &fl->fl6_dst, &fl->fl6_src);
631 rt = rt6_select(&fn->leaf, fl->oif, strict | reachable);
633 if (rt == &ip6_null_entry ||
634 rt->rt6i_flags & RTF_CACHE)
637 dst_hold(&rt->u.dst);
638 read_unlock_bh(&rt6_lock);
640 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
641 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
643 #if CLONE_OFFLINK_ROUTE
644 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
650 dst_release(&rt->u.dst);
651 rt = nrt ? : &ip6_null_entry;
653 dst_hold(&rt->u.dst);
655 err = ip6_ins_rt(nrt, NULL, NULL, NULL);
664 * Race condition! In the gap, when rt6_lock was
665 * released someone could insert this route. Relookup.
667 dst_release(&rt->u.dst);
675 dst_hold(&rt->u.dst);
676 read_unlock_bh(&rt6_lock);
678 rt->u.dst.lastuse = jiffies;
685 * Destination cache support functions
688 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
692 rt = (struct rt6_info *) dst;
694 if (rt && rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
700 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
702 struct rt6_info *rt = (struct rt6_info *) dst;
705 if (rt->rt6i_flags & RTF_CACHE)
706 ip6_del_rt(rt, NULL, NULL, NULL);
713 static void ip6_link_failure(struct sk_buff *skb)
717 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
719 rt = (struct rt6_info *) skb->dst;
721 if (rt->rt6i_flags&RTF_CACHE) {
722 dst_set_expires(&rt->u.dst, 0);
723 rt->rt6i_flags |= RTF_EXPIRES;
724 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
725 rt->rt6i_node->fn_sernum = -1;
729 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
731 struct rt6_info *rt6 = (struct rt6_info*)dst;
733 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
734 rt6->rt6i_flags |= RTF_MODIFIED;
735 if (mtu < IPV6_MIN_MTU) {
737 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
739 dst->metrics[RTAX_MTU-1] = mtu;
743 /* Protected by rt6_lock. */
744 static struct dst_entry *ndisc_dst_gc_list;
745 static int ipv6_get_mtu(struct net_device *dev);
747 static inline unsigned int ipv6_advmss(unsigned int mtu)
749 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
751 if (mtu < ip6_rt_min_advmss)
752 mtu = ip6_rt_min_advmss;
755 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
756 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
757 * IPV6_MAXPLEN is also valid and means: "any MSS,
758 * rely only on pmtu discovery"
760 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
765 struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
766 struct neighbour *neigh,
767 struct in6_addr *addr,
768 int (*output)(struct sk_buff *))
771 struct inet6_dev *idev = in6_dev_get(dev);
773 if (unlikely(idev == NULL))
776 rt = ip6_dst_alloc();
777 if (unlikely(rt == NULL)) {
786 neigh = ndisc_get_neigh(dev, addr);
789 rt->rt6i_idev = idev;
790 rt->rt6i_nexthop = neigh;
791 atomic_set(&rt->u.dst.__refcnt, 1);
792 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
793 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
794 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
795 rt->u.dst.output = output;
797 #if 0 /* there's no chance to use these for ndisc */
798 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
801 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
802 rt->rt6i_dst.plen = 128;
805 write_lock_bh(&rt6_lock);
806 rt->u.dst.next = ndisc_dst_gc_list;
807 ndisc_dst_gc_list = &rt->u.dst;
808 write_unlock_bh(&rt6_lock);
810 fib6_force_start_gc();
813 return (struct dst_entry *)rt;
816 int ndisc_dst_gc(int *more)
818 struct dst_entry *dst, *next, **pprev;
822 pprev = &ndisc_dst_gc_list;
824 while ((dst = *pprev) != NULL) {
825 if (!atomic_read(&dst->__refcnt)) {
838 static int ip6_dst_gc(void)
840 static unsigned expire = 30*HZ;
841 static unsigned long last_gc;
842 unsigned long now = jiffies;
844 if (time_after(last_gc + ip6_rt_gc_min_interval, now) &&
845 atomic_read(&ip6_dst_ops.entries) <= ip6_rt_max_size)
851 if (atomic_read(&ip6_dst_ops.entries) < ip6_dst_ops.gc_thresh)
852 expire = ip6_rt_gc_timeout>>1;
855 expire -= expire>>ip6_rt_gc_elasticity;
856 return (atomic_read(&ip6_dst_ops.entries) > ip6_rt_max_size);
859 /* Clean host part of a prefix. Not necessary in radix tree,
860 but results in cleaner routing tables.
862 Remove it only when all the things will work!
865 static int ipv6_get_mtu(struct net_device *dev)
867 int mtu = IPV6_MIN_MTU;
868 struct inet6_dev *idev;
870 idev = in6_dev_get(dev);
872 mtu = idev->cnf.mtu6;
878 int ipv6_get_hoplimit(struct net_device *dev)
880 int hoplimit = ipv6_devconf.hop_limit;
881 struct inet6_dev *idev;
883 idev = in6_dev_get(dev);
885 hoplimit = idev->cnf.hop_limit;
895 int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh,
896 void *_rtattr, struct netlink_skb_parms *req)
901 struct rt6_info *rt = NULL;
902 struct net_device *dev = NULL;
903 struct inet6_dev *idev = NULL;
906 rta = (struct rtattr **) _rtattr;
908 if (rtmsg->rtmsg_dst_len > 128 || rtmsg->rtmsg_src_len > 128)
910 #ifndef CONFIG_IPV6_SUBTREES
911 if (rtmsg->rtmsg_src_len)
914 if (rtmsg->rtmsg_ifindex) {
916 dev = dev_get_by_index(rtmsg->rtmsg_ifindex);
919 idev = in6_dev_get(dev);
924 if (rtmsg->rtmsg_metric == 0)
925 rtmsg->rtmsg_metric = IP6_RT_PRIO_USER;
927 rt = ip6_dst_alloc();
934 rt->u.dst.obsolete = -1;
935 rt->rt6i_expires = jiffies + clock_t_to_jiffies(rtmsg->rtmsg_info);
936 if (nlh && (r = NLMSG_DATA(nlh))) {
937 rt->rt6i_protocol = r->rtm_protocol;
939 rt->rt6i_protocol = RTPROT_BOOT;
942 addr_type = ipv6_addr_type(&rtmsg->rtmsg_dst);
944 if (addr_type & IPV6_ADDR_MULTICAST)
945 rt->u.dst.input = ip6_mc_input;
947 rt->u.dst.input = ip6_forward;
949 rt->u.dst.output = ip6_output;
951 ipv6_addr_prefix(&rt->rt6i_dst.addr,
952 &rtmsg->rtmsg_dst, rtmsg->rtmsg_dst_len);
953 rt->rt6i_dst.plen = rtmsg->rtmsg_dst_len;
954 if (rt->rt6i_dst.plen == 128)
955 rt->u.dst.flags = DST_HOST;
957 #ifdef CONFIG_IPV6_SUBTREES
958 ipv6_addr_prefix(&rt->rt6i_src.addr,
959 &rtmsg->rtmsg_src, rtmsg->rtmsg_src_len);
960 rt->rt6i_src.plen = rtmsg->rtmsg_src_len;
963 rt->rt6i_metric = rtmsg->rtmsg_metric;
965 /* We cannot add true routes via loopback here,
966 they would result in kernel looping; promote them to reject routes
968 if ((rtmsg->rtmsg_flags&RTF_REJECT) ||
969 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
970 /* hold loopback dev/idev if we haven't done so. */
971 if (dev != &loopback_dev) {
978 idev = in6_dev_get(dev);
984 rt->u.dst.output = ip6_pkt_discard_out;
985 rt->u.dst.input = ip6_pkt_discard;
986 rt->u.dst.error = -ENETUNREACH;
987 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
991 if (rtmsg->rtmsg_flags & RTF_GATEWAY) {
992 struct in6_addr *gw_addr;
995 gw_addr = &rtmsg->rtmsg_gateway;
996 ipv6_addr_copy(&rt->rt6i_gateway, &rtmsg->rtmsg_gateway);
997 gwa_type = ipv6_addr_type(gw_addr);
999 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1000 struct rt6_info *grt;
1002 /* IPv6 strictly inhibits using not link-local
1003 addresses as nexthop address.
1004 Otherwise, router will not able to send redirects.
1005 It is very good, but in some (rare!) circumstances
1006 (SIT, PtP, NBMA NOARP links) it is handy to allow
1007 some exceptions. --ANK
1010 if (!(gwa_type&IPV6_ADDR_UNICAST))
1013 grt = rt6_lookup(gw_addr, NULL, rtmsg->rtmsg_ifindex, 1);
1015 err = -EHOSTUNREACH;
1019 if (dev != grt->rt6i_dev) {
1020 dst_release(&grt->u.dst);
1024 dev = grt->rt6i_dev;
1025 idev = grt->rt6i_idev;
1027 in6_dev_hold(grt->rt6i_idev);
1029 if (!(grt->rt6i_flags&RTF_GATEWAY))
1031 dst_release(&grt->u.dst);
1037 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1045 if (rtmsg->rtmsg_flags & (RTF_GATEWAY|RTF_NONEXTHOP)) {
1046 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1047 if (IS_ERR(rt->rt6i_nexthop)) {
1048 err = PTR_ERR(rt->rt6i_nexthop);
1049 rt->rt6i_nexthop = NULL;
1054 rt->rt6i_flags = rtmsg->rtmsg_flags;
1057 if (rta && rta[RTA_METRICS-1]) {
1058 int attrlen = RTA_PAYLOAD(rta[RTA_METRICS-1]);
1059 struct rtattr *attr = RTA_DATA(rta[RTA_METRICS-1]);
1061 while (RTA_OK(attr, attrlen)) {
1062 unsigned flavor = attr->rta_type;
1064 if (flavor > RTAX_MAX) {
1068 rt->u.dst.metrics[flavor-1] =
1069 *(u32 *)RTA_DATA(attr);
1071 attr = RTA_NEXT(attr, attrlen);
1075 if (rt->u.dst.metrics[RTAX_HOPLIMIT-1] == 0)
1076 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1077 if (!rt->u.dst.metrics[RTAX_MTU-1])
1078 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1079 if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
1080 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1081 rt->u.dst.dev = dev;
1082 rt->rt6i_idev = idev;
1083 return ip6_ins_rt(rt, nlh, _rtattr, req);
1091 dst_free((struct dst_entry *) rt);
1095 int ip6_del_rt(struct rt6_info *rt, struct nlmsghdr *nlh, void *_rtattr, struct netlink_skb_parms *req)
1099 write_lock_bh(&rt6_lock);
1101 err = fib6_del(rt, nlh, _rtattr, req);
1102 dst_release(&rt->u.dst);
1104 write_unlock_bh(&rt6_lock);
1109 static int ip6_route_del(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr, struct netlink_skb_parms *req)
1111 struct fib6_node *fn;
1112 struct rt6_info *rt;
1115 read_lock_bh(&rt6_lock);
1117 fn = fib6_locate(&ip6_routing_table,
1118 &rtmsg->rtmsg_dst, rtmsg->rtmsg_dst_len,
1119 &rtmsg->rtmsg_src, rtmsg->rtmsg_src_len);
1122 for (rt = fn->leaf; rt; rt = rt->u.next) {
1123 if (rtmsg->rtmsg_ifindex &&
1124 (rt->rt6i_dev == NULL ||
1125 rt->rt6i_dev->ifindex != rtmsg->rtmsg_ifindex))
1127 if (rtmsg->rtmsg_flags&RTF_GATEWAY &&
1128 !ipv6_addr_equal(&rtmsg->rtmsg_gateway, &rt->rt6i_gateway))
1130 if (rtmsg->rtmsg_metric &&
1131 rtmsg->rtmsg_metric != rt->rt6i_metric)
1133 dst_hold(&rt->u.dst);
1134 read_unlock_bh(&rt6_lock);
1136 return ip6_del_rt(rt, nlh, _rtattr, req);
1139 read_unlock_bh(&rt6_lock);
1147 void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
1148 struct neighbour *neigh, u8 *lladdr, int on_link)
1150 struct rt6_info *rt, *nrt = NULL;
1152 struct fib6_node *fn;
1155 * Get the "current" route for this destination and
1156 * check if the redirect has come from approriate router.
1158 * RFC 2461 specifies that redirects should only be
1159 * accepted if they come from the nexthop to the target.
1160 * Due to the way the routes are chosen, this notion
1161 * is a bit fuzzy and one might need to check all possible
1164 strict = ipv6_addr_type(dest) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL);
1166 read_lock_bh(&rt6_lock);
1167 fn = fib6_lookup(&ip6_routing_table, dest, NULL);
1169 for (rt = fn->leaf; rt; rt = rt->u.next) {
1171 * Current route is on-link; redirect is always invalid.
1173 * Seems, previous statement is not true. It could
1174 * be node, which looks for us as on-link (f.e. proxy ndisc)
1175 * But then router serving it might decide, that we should
1176 * know truth 8)8) --ANK (980726).
1178 if (rt6_check_expired(rt))
1180 if (!(rt->rt6i_flags & RTF_GATEWAY))
1182 if (neigh->dev != rt->rt6i_dev)
1184 if (!ipv6_addr_equal(saddr, &rt->rt6i_gateway))
1189 dst_hold(&rt->u.dst);
1191 while ((fn = fn->parent) != NULL) {
1192 if (fn->fn_flags & RTN_ROOT)
1194 if (fn->fn_flags & RTN_RTINFO)
1198 read_unlock_bh(&rt6_lock);
1201 if (net_ratelimit())
1202 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1203 "for redirect target\n");
1208 * We have finally decided to accept it.
1211 neigh_update(neigh, lladdr, NUD_STALE,
1212 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1213 NEIGH_UPDATE_F_OVERRIDE|
1214 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1215 NEIGH_UPDATE_F_ISROUTER))
1219 * Redirect received -> path was valid.
1220 * Look, redirects are sent only in response to data packets,
1221 * so that this nexthop apparently is reachable. --ANK
1223 dst_confirm(&rt->u.dst);
1225 /* Duplicate redirect: silently ignore. */
1226 if (neigh == rt->u.dst.neighbour)
1229 nrt = ip6_rt_copy(rt);
1233 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1235 nrt->rt6i_flags &= ~RTF_GATEWAY;
1237 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1238 nrt->rt6i_dst.plen = 128;
1239 nrt->u.dst.flags |= DST_HOST;
1241 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1242 nrt->rt6i_nexthop = neigh_clone(neigh);
1243 /* Reset pmtu, it may be better */
1244 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1245 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&nrt->u.dst));
1247 if (ip6_ins_rt(nrt, NULL, NULL, NULL))
1250 if (rt->rt6i_flags&RTF_CACHE) {
1251 ip6_del_rt(rt, NULL, NULL, NULL);
1256 dst_release(&rt->u.dst);
1261 * Handle ICMP "packet too big" messages
1262 * i.e. Path MTU discovery
1265 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1266 struct net_device *dev, u32 pmtu)
1268 struct rt6_info *rt, *nrt;
1271 rt = rt6_lookup(daddr, saddr, dev->ifindex, 0);
1275 if (pmtu >= dst_mtu(&rt->u.dst))
1278 if (pmtu < IPV6_MIN_MTU) {
1280 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1281 * MTU (1280) and a fragment header should always be included
1282 * after a node receiving Too Big message reporting PMTU is
1283 * less than the IPv6 Minimum Link MTU.
1285 pmtu = IPV6_MIN_MTU;
1289 /* New mtu received -> path was valid.
1290 They are sent only in response to data packets,
1291 so that this nexthop apparently is reachable. --ANK
1293 dst_confirm(&rt->u.dst);
1295 /* Host route. If it is static, it would be better
1296 not to override it, but add new one, so that
1297 when cache entry will expire old pmtu
1298 would return automatically.
1300 if (rt->rt6i_flags & RTF_CACHE) {
1301 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1303 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1304 dst_set_expires(&rt->u.dst, ip6_rt_mtu_expires);
1305 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1310 Two cases are possible:
1311 1. It is connected route. Action: COW
1312 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1314 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1315 nrt = rt6_alloc_cow(rt, daddr, saddr);
1317 nrt = rt6_alloc_clone(rt, daddr);
1320 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1322 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1324 /* According to RFC 1981, detecting PMTU increase shouldn't be
1325 * happened within 5 mins, the recommended timer is 10 mins.
1326 * Here this route expiration time is set to ip6_rt_mtu_expires
1327 * which is 10 mins. After 10 mins the decreased pmtu is expired
1328 * and detecting PMTU increase will be automatically happened.
1330 dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
1331 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1333 ip6_ins_rt(nrt, NULL, NULL, NULL);
1336 dst_release(&rt->u.dst);
1340 * Misc support functions
1343 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1345 struct rt6_info *rt = ip6_dst_alloc();
1348 rt->u.dst.input = ort->u.dst.input;
1349 rt->u.dst.output = ort->u.dst.output;
1351 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1352 rt->u.dst.dev = ort->u.dst.dev;
1354 dev_hold(rt->u.dst.dev);
1355 rt->rt6i_idev = ort->rt6i_idev;
1357 in6_dev_hold(rt->rt6i_idev);
1358 rt->u.dst.lastuse = jiffies;
1359 rt->rt6i_expires = 0;
1361 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1362 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1363 rt->rt6i_metric = 0;
1365 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1366 #ifdef CONFIG_IPV6_SUBTREES
1367 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1373 #ifdef CONFIG_IPV6_ROUTE_INFO
1374 static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixlen,
1375 struct in6_addr *gwaddr, int ifindex)
1377 struct fib6_node *fn;
1378 struct rt6_info *rt = NULL;
1380 write_lock_bh(&rt6_lock);
1381 fn = fib6_locate(&ip6_routing_table, prefix ,prefixlen, NULL, 0);
1385 for (rt = fn->leaf; rt; rt = rt->u.next) {
1386 if (rt->rt6i_dev->ifindex != ifindex)
1388 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1390 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1392 dst_hold(&rt->u.dst);
1396 write_unlock_bh(&rt6_lock);
1400 static struct rt6_info *rt6_add_route_info(struct in6_addr *prefix, int prefixlen,
1401 struct in6_addr *gwaddr, int ifindex,
1404 struct in6_rtmsg rtmsg;
1406 memset(&rtmsg, 0, sizeof(rtmsg));
1407 rtmsg.rtmsg_type = RTMSG_NEWROUTE;
1408 ipv6_addr_copy(&rtmsg.rtmsg_dst, prefix);
1409 rtmsg.rtmsg_dst_len = prefixlen;
1410 ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
1411 rtmsg.rtmsg_metric = 1024;
1412 rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref);
1413 /* We should treat it as a default route if prefix length is 0. */
1415 rtmsg.rtmsg_flags |= RTF_DEFAULT;
1416 rtmsg.rtmsg_ifindex = ifindex;
1418 ip6_route_add(&rtmsg, NULL, NULL, NULL);
1420 return rt6_get_route_info(prefix, prefixlen, gwaddr, ifindex);
1424 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1426 struct rt6_info *rt;
1427 struct fib6_node *fn;
1429 fn = &ip6_routing_table;
1431 write_lock_bh(&rt6_lock);
1432 for (rt = fn->leaf; rt; rt=rt->u.next) {
1433 if (dev == rt->rt6i_dev &&
1434 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1435 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1439 dst_hold(&rt->u.dst);
1440 write_unlock_bh(&rt6_lock);
1444 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1445 struct net_device *dev,
1448 struct in6_rtmsg rtmsg;
1450 memset(&rtmsg, 0, sizeof(struct in6_rtmsg));
1451 rtmsg.rtmsg_type = RTMSG_NEWROUTE;
1452 ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
1453 rtmsg.rtmsg_metric = 1024;
1454 rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES |
1457 rtmsg.rtmsg_ifindex = dev->ifindex;
1459 ip6_route_add(&rtmsg, NULL, NULL, NULL);
1460 return rt6_get_dflt_router(gwaddr, dev);
1463 void rt6_purge_dflt_routers(void)
1465 struct rt6_info *rt;
1468 read_lock_bh(&rt6_lock);
1469 for (rt = ip6_routing_table.leaf; rt; rt = rt->u.next) {
1470 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1471 dst_hold(&rt->u.dst);
1473 read_unlock_bh(&rt6_lock);
1475 ip6_del_rt(rt, NULL, NULL, NULL);
1480 read_unlock_bh(&rt6_lock);
1483 int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
1485 struct in6_rtmsg rtmsg;
1489 case SIOCADDRT: /* Add a route */
1490 case SIOCDELRT: /* Delete a route */
1491 if (!capable(CAP_NET_ADMIN))
1493 err = copy_from_user(&rtmsg, arg,
1494 sizeof(struct in6_rtmsg));
1501 err = ip6_route_add(&rtmsg, NULL, NULL, NULL);
1504 err = ip6_route_del(&rtmsg, NULL, NULL, NULL);
1518 * Drop the packet on the floor
1521 static int ip6_pkt_discard(struct sk_buff *skb)
1523 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
1524 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
1529 static int ip6_pkt_discard_out(struct sk_buff *skb)
1531 skb->dev = skb->dst->dev;
1532 return ip6_pkt_discard(skb);
1536 * Allocate a dst for local (unicast / anycast) address.
1539 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1540 const struct in6_addr *addr,
1543 struct rt6_info *rt = ip6_dst_alloc();
1546 return ERR_PTR(-ENOMEM);
1548 dev_hold(&loopback_dev);
1551 rt->u.dst.flags = DST_HOST;
1552 rt->u.dst.input = ip6_input;
1553 rt->u.dst.output = ip6_output;
1554 rt->rt6i_dev = &loopback_dev;
1555 rt->rt6i_idev = idev;
1556 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1557 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst));
1558 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1559 rt->u.dst.obsolete = -1;
1561 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1563 rt->rt6i_flags |= RTF_ANYCAST;
1565 rt->rt6i_flags |= RTF_LOCAL;
1566 rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1567 if (rt->rt6i_nexthop == NULL) {
1568 dst_free((struct dst_entry *) rt);
1569 return ERR_PTR(-ENOMEM);
1572 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1573 rt->rt6i_dst.plen = 128;
1575 atomic_set(&rt->u.dst.__refcnt, 1);
1580 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1582 if (((void*)rt->rt6i_dev == arg || arg == NULL) &&
1583 rt != &ip6_null_entry) {
1584 RT6_TRACE("deleted by ifdown %p\n", rt);
1590 void rt6_ifdown(struct net_device *dev)
1592 write_lock_bh(&rt6_lock);
1593 fib6_clean_tree(&ip6_routing_table, fib6_ifdown, 0, dev);
1594 write_unlock_bh(&rt6_lock);
1597 struct rt6_mtu_change_arg
1599 struct net_device *dev;
1603 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
1605 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
1606 struct inet6_dev *idev;
1608 /* In IPv6 pmtu discovery is not optional,
1609 so that RTAX_MTU lock cannot disable it.
1610 We still use this lock to block changes
1611 caused by addrconf/ndisc.
1614 idev = __in6_dev_get(arg->dev);
1618 /* For administrative MTU increase, there is no way to discover
1619 IPv6 PMTU increase, so PMTU increase should be updated here.
1620 Since RFC 1981 doesn't include administrative MTU increase
1621 update PMTU increase is a MUST. (i.e. jumbo frame)
1624 If new MTU is less than route PMTU, this new MTU will be the
1625 lowest MTU in the path, update the route PMTU to reflect PMTU
1626 decreases; if new MTU is greater than route PMTU, and the
1627 old MTU is the lowest MTU in the path, update the route PMTU
1628 to reflect the increase. In this case if the other nodes' MTU
1629 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1632 if (rt->rt6i_dev == arg->dev &&
1633 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1634 (dst_mtu(&rt->u.dst) > arg->mtu ||
1635 (dst_mtu(&rt->u.dst) < arg->mtu &&
1636 dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
1637 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
1638 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
1642 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
1644 struct rt6_mtu_change_arg arg;
1648 read_lock_bh(&rt6_lock);
1649 fib6_clean_tree(&ip6_routing_table, rt6_mtu_change_route, 0, &arg);
1650 read_unlock_bh(&rt6_lock);
1653 static int inet6_rtm_to_rtmsg(struct rtmsg *r, struct rtattr **rta,
1654 struct in6_rtmsg *rtmsg)
1656 memset(rtmsg, 0, sizeof(*rtmsg));
1658 rtmsg->rtmsg_dst_len = r->rtm_dst_len;
1659 rtmsg->rtmsg_src_len = r->rtm_src_len;
1660 rtmsg->rtmsg_flags = RTF_UP;
1661 if (r->rtm_type == RTN_UNREACHABLE)
1662 rtmsg->rtmsg_flags |= RTF_REJECT;
1664 if (rta[RTA_GATEWAY-1]) {
1665 if (rta[RTA_GATEWAY-1]->rta_len != RTA_LENGTH(16))
1667 memcpy(&rtmsg->rtmsg_gateway, RTA_DATA(rta[RTA_GATEWAY-1]), 16);
1668 rtmsg->rtmsg_flags |= RTF_GATEWAY;
1670 if (rta[RTA_DST-1]) {
1671 if (RTA_PAYLOAD(rta[RTA_DST-1]) < ((r->rtm_dst_len+7)>>3))
1673 memcpy(&rtmsg->rtmsg_dst, RTA_DATA(rta[RTA_DST-1]), ((r->rtm_dst_len+7)>>3));
1675 if (rta[RTA_SRC-1]) {
1676 if (RTA_PAYLOAD(rta[RTA_SRC-1]) < ((r->rtm_src_len+7)>>3))
1678 memcpy(&rtmsg->rtmsg_src, RTA_DATA(rta[RTA_SRC-1]), ((r->rtm_src_len+7)>>3));
1680 if (rta[RTA_OIF-1]) {
1681 if (rta[RTA_OIF-1]->rta_len != RTA_LENGTH(sizeof(int)))
1683 memcpy(&rtmsg->rtmsg_ifindex, RTA_DATA(rta[RTA_OIF-1]), sizeof(int));
1685 if (rta[RTA_PRIORITY-1]) {
1686 if (rta[RTA_PRIORITY-1]->rta_len != RTA_LENGTH(4))
1688 memcpy(&rtmsg->rtmsg_metric, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
1693 int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1695 struct rtmsg *r = NLMSG_DATA(nlh);
1696 struct in6_rtmsg rtmsg;
1698 if (inet6_rtm_to_rtmsg(r, arg, &rtmsg))
1700 return ip6_route_del(&rtmsg, nlh, arg, &NETLINK_CB(skb));
1703 int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1705 struct rtmsg *r = NLMSG_DATA(nlh);
1706 struct in6_rtmsg rtmsg;
1708 if (inet6_rtm_to_rtmsg(r, arg, &rtmsg))
1710 return ip6_route_add(&rtmsg, nlh, arg, &NETLINK_CB(skb));
1713 struct rt6_rtnl_dump_arg
1715 struct sk_buff *skb;
1716 struct netlink_callback *cb;
1719 static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
1720 struct in6_addr *dst, struct in6_addr *src,
1721 int iif, int type, u32 pid, u32 seq,
1722 int prefix, unsigned int flags)
1725 struct nlmsghdr *nlh;
1726 unsigned char *b = skb->tail;
1727 struct rta_cacheinfo ci;
1729 if (prefix) { /* user wants prefix routes only */
1730 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
1731 /* success since this is not a prefix route */
1736 nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*rtm), flags);
1737 rtm = NLMSG_DATA(nlh);
1738 rtm->rtm_family = AF_INET6;
1739 rtm->rtm_dst_len = rt->rt6i_dst.plen;
1740 rtm->rtm_src_len = rt->rt6i_src.plen;
1742 rtm->rtm_table = RT_TABLE_MAIN;
1743 if (rt->rt6i_flags&RTF_REJECT)
1744 rtm->rtm_type = RTN_UNREACHABLE;
1745 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
1746 rtm->rtm_type = RTN_LOCAL;
1748 rtm->rtm_type = RTN_UNICAST;
1750 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1751 rtm->rtm_protocol = rt->rt6i_protocol;
1752 if (rt->rt6i_flags&RTF_DYNAMIC)
1753 rtm->rtm_protocol = RTPROT_REDIRECT;
1754 else if (rt->rt6i_flags & RTF_ADDRCONF)
1755 rtm->rtm_protocol = RTPROT_KERNEL;
1756 else if (rt->rt6i_flags&RTF_DEFAULT)
1757 rtm->rtm_protocol = RTPROT_RA;
1759 if (rt->rt6i_flags&RTF_CACHE)
1760 rtm->rtm_flags |= RTM_F_CLONED;
1763 RTA_PUT(skb, RTA_DST, 16, dst);
1764 rtm->rtm_dst_len = 128;
1765 } else if (rtm->rtm_dst_len)
1766 RTA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
1767 #ifdef CONFIG_IPV6_SUBTREES
1769 RTA_PUT(skb, RTA_SRC, 16, src);
1770 rtm->rtm_src_len = 128;
1771 } else if (rtm->rtm_src_len)
1772 RTA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
1775 RTA_PUT(skb, RTA_IIF, 4, &iif);
1777 struct in6_addr saddr_buf;
1778 if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
1779 RTA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
1781 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
1782 goto rtattr_failure;
1783 if (rt->u.dst.neighbour)
1784 RTA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
1786 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->rt6i_dev->ifindex);
1787 RTA_PUT(skb, RTA_PRIORITY, 4, &rt->rt6i_metric);
1788 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
1789 if (rt->rt6i_expires)
1790 ci.rta_expires = jiffies_to_clock_t(rt->rt6i_expires - jiffies);
1793 ci.rta_used = rt->u.dst.__use;
1794 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
1795 ci.rta_error = rt->u.dst.error;
1799 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
1800 nlh->nlmsg_len = skb->tail - b;
1805 skb_trim(skb, b - skb->data);
1809 static int rt6_dump_route(struct rt6_info *rt, void *p_arg)
1811 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
1814 if (arg->cb->nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(struct rtmsg))) {
1815 struct rtmsg *rtm = NLMSG_DATA(arg->cb->nlh);
1816 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
1820 return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
1821 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
1822 prefix, NLM_F_MULTI);
1825 static int fib6_dump_node(struct fib6_walker_t *w)
1828 struct rt6_info *rt;
1830 for (rt = w->leaf; rt; rt = rt->u.next) {
1831 res = rt6_dump_route(rt, w->args);
1833 /* Frame is full, suspend walking */
1843 static void fib6_dump_end(struct netlink_callback *cb)
1845 struct fib6_walker_t *w = (void*)cb->args[0];
1849 fib6_walker_unlink(w);
1852 cb->done = (void*)cb->args[1];
1856 static int fib6_dump_done(struct netlink_callback *cb)
1859 return cb->done ? cb->done(cb) : 0;
1862 int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
1864 struct rt6_rtnl_dump_arg arg;
1865 struct fib6_walker_t *w;
1871 w = (void*)cb->args[0];
1875 * 1. hook callback destructor.
1877 cb->args[1] = (long)cb->done;
1878 cb->done = fib6_dump_done;
1881 * 2. allocate and initialize walker.
1883 w = kzalloc(sizeof(*w), GFP_ATOMIC);
1886 RT6_TRACE("dump<%p", w);
1887 w->root = &ip6_routing_table;
1888 w->func = fib6_dump_node;
1890 cb->args[0] = (long)w;
1891 read_lock_bh(&rt6_lock);
1893 read_unlock_bh(&rt6_lock);
1896 read_lock_bh(&rt6_lock);
1897 res = fib6_walk_continue(w);
1898 read_unlock_bh(&rt6_lock);
1901 if (res <= 0 && skb->len == 0)
1902 RT6_TRACE("%p>dump end\n", w);
1904 res = res < 0 ? res : skb->len;
1905 /* res < 0 is an error. (really, impossible)
1906 res == 0 means that dump is complete, but skb still can contain data.
1907 res > 0 dump is not complete, but frame is full.
1909 /* Destroy walker, if dump of this table is complete. */
1915 int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1917 struct rtattr **rta = arg;
1920 struct sk_buff *skb;
1922 struct rt6_info *rt;
1924 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1928 /* Reserve room for dummy headers, this skb can pass
1929 through good chunk of routing engine.
1931 skb->mac.raw = skb->data;
1932 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
1934 memset(&fl, 0, sizeof(fl));
1936 ipv6_addr_copy(&fl.fl6_src,
1937 (struct in6_addr*)RTA_DATA(rta[RTA_SRC-1]));
1939 ipv6_addr_copy(&fl.fl6_dst,
1940 (struct in6_addr*)RTA_DATA(rta[RTA_DST-1]));
1943 memcpy(&iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
1946 struct net_device *dev;
1947 dev = __dev_get_by_index(iif);
1956 memcpy(&fl.oif, RTA_DATA(rta[RTA_OIF-1]), sizeof(int));
1958 rt = (struct rt6_info*)ip6_route_output(NULL, &fl);
1960 skb->dst = &rt->u.dst;
1962 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
1963 err = rt6_fill_node(skb, rt,
1964 &fl.fl6_dst, &fl.fl6_src,
1966 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
1967 nlh->nlmsg_seq, 0, 0);
1973 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1983 void inet6_rt_notify(int event, struct rt6_info *rt, struct nlmsghdr *nlh,
1984 struct netlink_skb_parms *req)
1986 struct sk_buff *skb;
1987 int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
1988 u32 pid = current->pid;
1994 seq = nlh->nlmsg_seq;
1996 skb = alloc_skb(size, gfp_any());
1998 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, ENOBUFS);
2001 if (rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0) < 0) {
2003 netlink_set_err(rtnl, 0, RTNLGRP_IPV6_ROUTE, EINVAL);
2006 NETLINK_CB(skb).dst_group = RTNLGRP_IPV6_ROUTE;
2007 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV6_ROUTE, gfp_any());
2014 #ifdef CONFIG_PROC_FS
2016 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2027 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2029 struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
2032 if (arg->skip < arg->offset / RT6_INFO_LEN) {
2037 if (arg->len >= arg->length)
2040 for (i=0; i<16; i++) {
2041 sprintf(arg->buffer + arg->len, "%02x",
2042 rt->rt6i_dst.addr.s6_addr[i]);
2045 arg->len += sprintf(arg->buffer + arg->len, " %02x ",
2048 #ifdef CONFIG_IPV6_SUBTREES
2049 for (i=0; i<16; i++) {
2050 sprintf(arg->buffer + arg->len, "%02x",
2051 rt->rt6i_src.addr.s6_addr[i]);
2054 arg->len += sprintf(arg->buffer + arg->len, " %02x ",
2057 sprintf(arg->buffer + arg->len,
2058 "00000000000000000000000000000000 00 ");
2062 if (rt->rt6i_nexthop) {
2063 for (i=0; i<16; i++) {
2064 sprintf(arg->buffer + arg->len, "%02x",
2065 rt->rt6i_nexthop->primary_key[i]);
2069 sprintf(arg->buffer + arg->len,
2070 "00000000000000000000000000000000");
2073 arg->len += sprintf(arg->buffer + arg->len,
2074 " %08x %08x %08x %08x %8s\n",
2075 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2076 rt->u.dst.__use, rt->rt6i_flags,
2077 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2081 static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
2083 struct rt6_proc_arg arg;
2084 arg.buffer = buffer;
2085 arg.offset = offset;
2086 arg.length = length;
2090 read_lock_bh(&rt6_lock);
2091 fib6_clean_tree(&ip6_routing_table, rt6_info_route, 0, &arg);
2092 read_unlock_bh(&rt6_lock);
2096 *start += offset % RT6_INFO_LEN;
2098 arg.len -= offset % RT6_INFO_LEN;
2100 if (arg.len > length)
2108 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2110 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2111 rt6_stats.fib_nodes, rt6_stats.fib_route_nodes,
2112 rt6_stats.fib_rt_alloc, rt6_stats.fib_rt_entries,
2113 rt6_stats.fib_rt_cache,
2114 atomic_read(&ip6_dst_ops.entries),
2115 rt6_stats.fib_discarded_routes);
2120 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2122 return single_open(file, rt6_stats_seq_show, NULL);
2125 static struct file_operations rt6_stats_seq_fops = {
2126 .owner = THIS_MODULE,
2127 .open = rt6_stats_seq_open,
2129 .llseek = seq_lseek,
2130 .release = single_release,
2132 #endif /* CONFIG_PROC_FS */
2134 #ifdef CONFIG_SYSCTL
2136 static int flush_delay;
2139 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
2140 void __user *buffer, size_t *lenp, loff_t *ppos)
2143 proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
2144 fib6_run_gc(flush_delay <= 0 ? ~0UL : (unsigned long)flush_delay);
2150 ctl_table ipv6_route_table[] = {
2152 .ctl_name = NET_IPV6_ROUTE_FLUSH,
2153 .procname = "flush",
2154 .data = &flush_delay,
2155 .maxlen = sizeof(int),
2157 .proc_handler = &ipv6_sysctl_rtcache_flush
2160 .ctl_name = NET_IPV6_ROUTE_GC_THRESH,
2161 .procname = "gc_thresh",
2162 .data = &ip6_dst_ops.gc_thresh,
2163 .maxlen = sizeof(int),
2165 .proc_handler = &proc_dointvec,
2168 .ctl_name = NET_IPV6_ROUTE_MAX_SIZE,
2169 .procname = "max_size",
2170 .data = &ip6_rt_max_size,
2171 .maxlen = sizeof(int),
2173 .proc_handler = &proc_dointvec,
2176 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL,
2177 .procname = "gc_min_interval",
2178 .data = &ip6_rt_gc_min_interval,
2179 .maxlen = sizeof(int),
2181 .proc_handler = &proc_dointvec_jiffies,
2182 .strategy = &sysctl_jiffies,
2185 .ctl_name = NET_IPV6_ROUTE_GC_TIMEOUT,
2186 .procname = "gc_timeout",
2187 .data = &ip6_rt_gc_timeout,
2188 .maxlen = sizeof(int),
2190 .proc_handler = &proc_dointvec_jiffies,
2191 .strategy = &sysctl_jiffies,
2194 .ctl_name = NET_IPV6_ROUTE_GC_INTERVAL,
2195 .procname = "gc_interval",
2196 .data = &ip6_rt_gc_interval,
2197 .maxlen = sizeof(int),
2199 .proc_handler = &proc_dointvec_jiffies,
2200 .strategy = &sysctl_jiffies,
2203 .ctl_name = NET_IPV6_ROUTE_GC_ELASTICITY,
2204 .procname = "gc_elasticity",
2205 .data = &ip6_rt_gc_elasticity,
2206 .maxlen = sizeof(int),
2208 .proc_handler = &proc_dointvec_jiffies,
2209 .strategy = &sysctl_jiffies,
2212 .ctl_name = NET_IPV6_ROUTE_MTU_EXPIRES,
2213 .procname = "mtu_expires",
2214 .data = &ip6_rt_mtu_expires,
2215 .maxlen = sizeof(int),
2217 .proc_handler = &proc_dointvec_jiffies,
2218 .strategy = &sysctl_jiffies,
2221 .ctl_name = NET_IPV6_ROUTE_MIN_ADVMSS,
2222 .procname = "min_adv_mss",
2223 .data = &ip6_rt_min_advmss,
2224 .maxlen = sizeof(int),
2226 .proc_handler = &proc_dointvec_jiffies,
2227 .strategy = &sysctl_jiffies,
2230 .ctl_name = NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS,
2231 .procname = "gc_min_interval_ms",
2232 .data = &ip6_rt_gc_min_interval,
2233 .maxlen = sizeof(int),
2235 .proc_handler = &proc_dointvec_ms_jiffies,
2236 .strategy = &sysctl_ms_jiffies,
2243 void __init ip6_route_init(void)
2245 struct proc_dir_entry *p;
2247 ip6_dst_ops.kmem_cachep = kmem_cache_create("ip6_dst_cache",
2248 sizeof(struct rt6_info),
2249 0, SLAB_HWCACHE_ALIGN,
2251 if (!ip6_dst_ops.kmem_cachep)
2252 panic("cannot create ip6_dst_cache");
2255 #ifdef CONFIG_PROC_FS
2256 p = proc_net_create("ipv6_route", 0, rt6_proc_info);
2258 p->owner = THIS_MODULE;
2260 proc_net_fops_create("rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2267 void ip6_route_cleanup(void)
2269 #ifdef CONFIG_PROC_FS
2270 proc_net_remove("ipv6_route");
2271 proc_net_remove("rt6_stats");
2278 kmem_cache_destroy(ip6_dst_ops.kmem_cachep);