2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/bitops.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
25 #include <linux/string.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/errno.h>
30 #include <linux/inet.h>
31 #include <linux/inetdevice.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/proc_fs.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
44 #include <net/ip_fib.h>
45 #include <net/ip_mp_alg.h>
46 #include <net/netlink.h>
47 #include <net/nexthop.h>
49 #include "fib_lookup.h"
51 #define FSprintk(a...)
53 static DEFINE_SPINLOCK(fib_info_lock);
54 static struct hlist_head *fib_info_hash;
55 static struct hlist_head *fib_info_laddrhash;
56 static unsigned int fib_hash_size;
57 static unsigned int fib_info_cnt;
59 #define DEVINDEX_HASHBITS 8
60 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
61 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
63 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65 static DEFINE_SPINLOCK(fib_multipath_lock);
67 #define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
68 for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
70 #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \
71 for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
73 #else /* CONFIG_IP_ROUTE_MULTIPATH */
75 /* Hope, that gcc will optimize it to get rid of dummy loop */
77 #define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \
78 for (nhsel=0; nhsel < 1; nhsel++)
80 #define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \
81 for (nhsel=0; nhsel < 1; nhsel++)
83 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
85 #define endfor_nexthops(fi) }
92 } fib_props[RTN_MAX + 1] = {
95 .scope = RT_SCOPE_NOWHERE,
99 .scope = RT_SCOPE_UNIVERSE,
103 .scope = RT_SCOPE_HOST,
107 .scope = RT_SCOPE_LINK,
108 }, /* RTN_BROADCAST */
111 .scope = RT_SCOPE_LINK,
115 .scope = RT_SCOPE_UNIVERSE,
116 }, /* RTN_MULTICAST */
119 .scope = RT_SCOPE_UNIVERSE,
120 }, /* RTN_BLACKHOLE */
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
124 }, /* RTN_UNREACHABLE */
127 .scope = RT_SCOPE_UNIVERSE,
128 }, /* RTN_PROHIBIT */
131 .scope = RT_SCOPE_UNIVERSE,
135 .scope = RT_SCOPE_NOWHERE,
139 .scope = RT_SCOPE_NOWHERE,
140 }, /* RTN_XRESOLVE */
144 /* Release a nexthop info record */
146 void free_fib_info(struct fib_info *fi)
148 if (fi->fib_dead == 0) {
149 printk("Freeing alive fib_info %p\n", fi);
152 change_nexthops(fi) {
156 } endfor_nexthops(fi);
161 void fib_release_info(struct fib_info *fi)
163 spin_lock_bh(&fib_info_lock);
164 if (fi && --fi->fib_treeref == 0) {
165 hlist_del(&fi->fib_hash);
167 hlist_del(&fi->fib_lhash);
168 change_nexthops(fi) {
171 hlist_del(&nh->nh_hash);
172 } endfor_nexthops(fi)
176 spin_unlock_bh(&fib_info_lock);
179 static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
181 const struct fib_nh *onh = ofi->fib_nh;
184 if (nh->nh_oif != onh->nh_oif ||
185 nh->nh_gw != onh->nh_gw ||
186 nh->nh_scope != onh->nh_scope ||
187 #ifdef CONFIG_IP_ROUTE_MULTIPATH
188 nh->nh_weight != onh->nh_weight ||
190 #ifdef CONFIG_NET_CLS_ROUTE
191 nh->nh_tclassid != onh->nh_tclassid ||
193 ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD))
196 } endfor_nexthops(fi);
200 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
202 unsigned int mask = (fib_hash_size - 1);
203 unsigned int val = fi->fib_nhs;
205 val ^= fi->fib_protocol;
206 val ^= (__force u32)fi->fib_prefsrc;
207 val ^= fi->fib_priority;
209 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
212 static struct fib_info *fib_find_info(const struct fib_info *nfi)
214 struct hlist_head *head;
215 struct hlist_node *node;
219 hash = fib_info_hashfn(nfi);
220 head = &fib_info_hash[hash];
222 hlist_for_each_entry(fi, node, head, fib_hash) {
223 if (fi->fib_nhs != nfi->fib_nhs)
225 if (nfi->fib_protocol == fi->fib_protocol &&
226 nfi->fib_prefsrc == fi->fib_prefsrc &&
227 nfi->fib_priority == fi->fib_priority &&
228 memcmp(nfi->fib_metrics, fi->fib_metrics,
229 sizeof(fi->fib_metrics)) == 0 &&
230 ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
231 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
238 static inline unsigned int fib_devindex_hashfn(unsigned int val)
240 unsigned int mask = DEVINDEX_HASHSIZE - 1;
243 (val >> DEVINDEX_HASHBITS) ^
244 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
247 /* Check, that the gateway is already configured.
248 Used only by redirect accept routine.
251 int ip_fib_check_default(__be32 gw, struct net_device *dev)
253 struct hlist_head *head;
254 struct hlist_node *node;
258 spin_lock(&fib_info_lock);
260 hash = fib_devindex_hashfn(dev->ifindex);
261 head = &fib_info_devhash[hash];
262 hlist_for_each_entry(nh, node, head, nh_hash) {
263 if (nh->nh_dev == dev &&
265 !(nh->nh_flags&RTNH_F_DEAD)) {
266 spin_unlock(&fib_info_lock);
271 spin_unlock(&fib_info_lock);
276 static inline size_t fib_nlmsg_size(struct fib_info *fi)
278 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
279 + nla_total_size(4) /* RTA_TABLE */
280 + nla_total_size(4) /* RTA_DST */
281 + nla_total_size(4) /* RTA_PRIORITY */
282 + nla_total_size(4); /* RTA_PREFSRC */
284 /* space for nested metrics */
285 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
288 /* Also handles the special case fib_nhs == 1 */
290 /* each nexthop is packed in an attribute */
291 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
293 /* may contain flow and gateway attribute */
294 nhsize += 2 * nla_total_size(4);
296 /* all nexthops are packed in a nested attribute */
297 payload += nla_total_size(fi->fib_nhs * nhsize);
303 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
304 int dst_len, u32 tb_id, struct nl_info *info)
307 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
310 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
314 err = fib_dump_info(skb, info->pid, seq, event, tb_id,
315 fa->fa_type, fa->fa_scope, key, dst_len,
316 fa->fa_tos, fa->fa_info, 0);
318 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
319 WARN_ON(err == -EMSGSIZE);
323 err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE,
324 info->nlh, GFP_KERNEL);
327 rtnl_set_sk_err(RTNLGRP_IPV4_ROUTE, err);
330 /* Return the first fib alias matching TOS with
331 * priority less than or equal to PRIO.
333 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
336 struct fib_alias *fa;
337 list_for_each_entry(fa, fah, fa_list) {
338 if (fa->fa_tos > tos)
340 if (fa->fa_info->fib_priority >= prio ||
348 int fib_detect_death(struct fib_info *fi, int order,
349 struct fib_info **last_resort, int *last_idx, int *dflt)
352 int state = NUD_NONE;
354 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
356 state = n->nud_state;
359 if (state==NUD_REACHABLE)
361 if ((state&NUD_VALID) && order != *dflt)
363 if ((state&NUD_VALID) ||
364 (*last_idx<0 && order > *dflt)) {
371 #ifdef CONFIG_IP_ROUTE_MULTIPATH
373 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
377 while (rtnh_ok(rtnh, remaining)) {
379 rtnh = rtnh_next(rtnh, &remaining);
382 /* leftover implies invalid nexthop configuration, discard it */
383 return remaining > 0 ? 0 : nhs;
386 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
387 int remaining, struct fib_config *cfg)
389 change_nexthops(fi) {
392 if (!rtnh_ok(rtnh, remaining))
395 nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
396 nh->nh_oif = rtnh->rtnh_ifindex;
397 nh->nh_weight = rtnh->rtnh_hops + 1;
399 attrlen = rtnh_attrlen(rtnh);
401 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
403 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
404 nh->nh_gw = nla ? nla_get_be32(nla) : 0;
405 #ifdef CONFIG_NET_CLS_ROUTE
406 nla = nla_find(attrs, attrlen, RTA_FLOW);
407 nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
411 rtnh = rtnh_next(rtnh, &remaining);
412 } endfor_nexthops(fi);
419 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
421 #ifdef CONFIG_IP_ROUTE_MULTIPATH
422 struct rtnexthop *rtnh;
426 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
429 if (cfg->fc_oif || cfg->fc_gw) {
430 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
431 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
436 #ifdef CONFIG_IP_ROUTE_MULTIPATH
437 if (cfg->fc_mp == NULL)
441 remaining = cfg->fc_mp_len;
446 if (!rtnh_ok(rtnh, remaining))
449 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
452 attrlen = rtnh_attrlen(rtnh);
454 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
456 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
457 if (nla && nla_get_be32(nla) != nh->nh_gw)
459 #ifdef CONFIG_NET_CLS_ROUTE
460 nla = nla_find(attrs, attrlen, RTA_FLOW);
461 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
466 rtnh = rtnh_next(rtnh, &remaining);
467 } endfor_nexthops(fi);
477 Semantics of nexthop is very messy by historical reasons.
478 We have to take into account, that:
479 a) gateway can be actually local interface address,
480 so that gatewayed route is direct.
481 b) gateway must be on-link address, possibly
482 described not by an ifaddr, but also by a direct route.
483 c) If both gateway and interface are specified, they should not
485 d) If we use tunnel routes, gateway could be not on-link.
487 Attempt to reconcile all of these (alas, self-contradictory) conditions
488 results in pretty ugly and hairy code with obscure logic.
490 I chose to generalized it instead, so that the size
491 of code does not increase practically, but it becomes
493 Every prefix is assigned a "scope" value: "host" is local address,
494 "link" is direct route,
495 [ ... "site" ... "interior" ... ]
496 and "universe" is true gateway route with global meaning.
498 Every prefix refers to a set of "nexthop"s (gw, oif),
499 where gw must have narrower scope. This recursion stops
500 when gw has LOCAL scope or if "nexthop" is declared ONLINK,
501 which means that gw is forced to be on link.
503 Code is still hairy, but now it is apparently logically
504 consistent and very flexible. F.e. as by-product it allows
505 to co-exists in peace independent exterior and interior
508 Normally it looks as following.
510 {universe prefix} -> (gw, oif) [scope link]
512 |-> {link prefix} -> (gw, oif) [scope local]
514 |-> {local prefix} (terminal node)
517 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
523 struct fib_result res;
525 #ifdef CONFIG_IP_ROUTE_PERVASIVE
526 if (nh->nh_flags&RTNH_F_PERVASIVE)
529 if (nh->nh_flags&RTNH_F_ONLINK) {
530 struct net_device *dev;
532 if (cfg->fc_scope >= RT_SCOPE_LINK)
534 if (inet_addr_type(nh->nh_gw) != RTN_UNICAST)
536 if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL)
538 if (!(dev->flags&IFF_UP))
542 nh->nh_scope = RT_SCOPE_LINK;
550 .scope = cfg->fc_scope + 1,
556 /* It is not necessary, but requires a bit of thinking */
557 if (fl.fl4_scope < RT_SCOPE_LINK)
558 fl.fl4_scope = RT_SCOPE_LINK;
559 if ((err = fib_lookup(&fl, &res)) != 0)
563 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
565 nh->nh_scope = res.scope;
566 nh->nh_oif = FIB_RES_OIF(res);
567 if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL)
569 dev_hold(nh->nh_dev);
571 if (!(nh->nh_dev->flags & IFF_UP))
578 struct in_device *in_dev;
580 if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
583 in_dev = inetdev_by_index(nh->nh_oif);
586 if (!(in_dev->dev->flags&IFF_UP)) {
590 nh->nh_dev = in_dev->dev;
591 dev_hold(nh->nh_dev);
592 nh->nh_scope = RT_SCOPE_HOST;
598 static inline unsigned int fib_laddr_hashfn(__be32 val)
600 unsigned int mask = (fib_hash_size - 1);
602 return ((__force u32)val ^ ((__force u32)val >> 7) ^ ((__force u32)val >> 14)) & mask;
605 static struct hlist_head *fib_hash_alloc(int bytes)
607 if (bytes <= PAGE_SIZE)
608 return kmalloc(bytes, GFP_KERNEL);
610 return (struct hlist_head *)
611 __get_free_pages(GFP_KERNEL, get_order(bytes));
614 static void fib_hash_free(struct hlist_head *hash, int bytes)
619 if (bytes <= PAGE_SIZE)
622 free_pages((unsigned long) hash, get_order(bytes));
625 static void fib_hash_move(struct hlist_head *new_info_hash,
626 struct hlist_head *new_laddrhash,
627 unsigned int new_size)
629 struct hlist_head *old_info_hash, *old_laddrhash;
630 unsigned int old_size = fib_hash_size;
631 unsigned int i, bytes;
633 spin_lock_bh(&fib_info_lock);
634 old_info_hash = fib_info_hash;
635 old_laddrhash = fib_info_laddrhash;
636 fib_hash_size = new_size;
638 for (i = 0; i < old_size; i++) {
639 struct hlist_head *head = &fib_info_hash[i];
640 struct hlist_node *node, *n;
643 hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
644 struct hlist_head *dest;
645 unsigned int new_hash;
647 hlist_del(&fi->fib_hash);
649 new_hash = fib_info_hashfn(fi);
650 dest = &new_info_hash[new_hash];
651 hlist_add_head(&fi->fib_hash, dest);
654 fib_info_hash = new_info_hash;
656 for (i = 0; i < old_size; i++) {
657 struct hlist_head *lhead = &fib_info_laddrhash[i];
658 struct hlist_node *node, *n;
661 hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
662 struct hlist_head *ldest;
663 unsigned int new_hash;
665 hlist_del(&fi->fib_lhash);
667 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
668 ldest = &new_laddrhash[new_hash];
669 hlist_add_head(&fi->fib_lhash, ldest);
672 fib_info_laddrhash = new_laddrhash;
674 spin_unlock_bh(&fib_info_lock);
676 bytes = old_size * sizeof(struct hlist_head *);
677 fib_hash_free(old_info_hash, bytes);
678 fib_hash_free(old_laddrhash, bytes);
681 struct fib_info *fib_create_info(struct fib_config *cfg)
684 struct fib_info *fi = NULL;
685 struct fib_info *ofi;
688 /* Fast check to catch the most weird cases */
689 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
692 #ifdef CONFIG_IP_ROUTE_MULTIPATH
694 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
699 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
700 if (cfg->fc_mp_alg) {
701 if (cfg->fc_mp_alg < IP_MP_ALG_NONE ||
702 cfg->fc_mp_alg > IP_MP_ALG_MAX)
708 if (fib_info_cnt >= fib_hash_size) {
709 unsigned int new_size = fib_hash_size << 1;
710 struct hlist_head *new_info_hash;
711 struct hlist_head *new_laddrhash;
716 bytes = new_size * sizeof(struct hlist_head *);
717 new_info_hash = fib_hash_alloc(bytes);
718 new_laddrhash = fib_hash_alloc(bytes);
719 if (!new_info_hash || !new_laddrhash) {
720 fib_hash_free(new_info_hash, bytes);
721 fib_hash_free(new_laddrhash, bytes);
723 memset(new_info_hash, 0, bytes);
724 memset(new_laddrhash, 0, bytes);
726 fib_hash_move(new_info_hash, new_laddrhash, new_size);
733 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
738 fi->fib_protocol = cfg->fc_protocol;
739 fi->fib_flags = cfg->fc_flags;
740 fi->fib_priority = cfg->fc_priority;
741 fi->fib_prefsrc = cfg->fc_prefsrc;
744 change_nexthops(fi) {
746 } endfor_nexthops(fi)
752 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
753 int type = nla->nla_type;
758 fi->fib_metrics[type - 1] = nla_get_u32(nla);
764 #ifdef CONFIG_IP_ROUTE_MULTIPATH
765 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
768 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
770 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
772 #ifdef CONFIG_NET_CLS_ROUTE
773 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
780 struct fib_nh *nh = fi->fib_nh;
782 nh->nh_oif = cfg->fc_oif;
783 nh->nh_gw = cfg->fc_gw;
784 nh->nh_flags = cfg->fc_flags;
785 #ifdef CONFIG_NET_CLS_ROUTE
786 nh->nh_tclassid = cfg->fc_flow;
788 #ifdef CONFIG_IP_ROUTE_MULTIPATH
793 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
794 fi->fib_mp_alg = cfg->fc_mp_alg;
797 if (fib_props[cfg->fc_type].error) {
798 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
803 if (cfg->fc_scope > RT_SCOPE_HOST)
806 if (cfg->fc_scope == RT_SCOPE_HOST) {
807 struct fib_nh *nh = fi->fib_nh;
809 /* Local address is added. */
810 if (nhs != 1 || nh->nh_gw)
812 nh->nh_scope = RT_SCOPE_NOWHERE;
813 nh->nh_dev = dev_get_by_index(fi->fib_nh->nh_oif);
815 if (nh->nh_dev == NULL)
818 change_nexthops(fi) {
819 if ((err = fib_check_nh(cfg, fi, nh)) != 0)
821 } endfor_nexthops(fi)
824 if (fi->fib_prefsrc) {
825 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
826 fi->fib_prefsrc != cfg->fc_dst)
827 if (inet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
832 if ((ofi = fib_find_info(fi)) != NULL) {
840 atomic_inc(&fi->fib_clntref);
841 spin_lock_bh(&fib_info_lock);
842 hlist_add_head(&fi->fib_hash,
843 &fib_info_hash[fib_info_hashfn(fi)]);
844 if (fi->fib_prefsrc) {
845 struct hlist_head *head;
847 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
848 hlist_add_head(&fi->fib_lhash, head);
850 change_nexthops(fi) {
851 struct hlist_head *head;
856 hash = fib_devindex_hashfn(nh->nh_dev->ifindex);
857 head = &fib_info_devhash[hash];
858 hlist_add_head(&nh->nh_hash, head);
859 } endfor_nexthops(fi)
860 spin_unlock_bh(&fib_info_lock);
875 /* Note! fib_semantic_match intentionally uses RCU list functions. */
876 int fib_semantic_match(struct list_head *head, const struct flowi *flp,
877 struct fib_result *res, __be32 zone, __be32 mask,
880 struct fib_alias *fa;
883 list_for_each_entry_rcu(fa, head, fa_list) {
887 fa->fa_tos != flp->fl4_tos)
890 if (fa->fa_scope < flp->fl4_scope)
893 fa->fa_state |= FA_S_ACCESSED;
895 err = fib_props[fa->fa_type].error;
897 struct fib_info *fi = fa->fa_info;
899 if (fi->fib_flags & RTNH_F_DEAD)
902 switch (fa->fa_type) {
909 if (nh->nh_flags&RTNH_F_DEAD)
911 if (!flp->oif || flp->oif == nh->nh_oif)
914 #ifdef CONFIG_IP_ROUTE_MULTIPATH
915 if (nhsel < fi->fib_nhs) {
928 printk(KERN_DEBUG "impossible 102\n");
937 res->prefixlen = prefixlen;
938 res->nh_sel = nh_sel;
939 res->type = fa->fa_type;
940 res->scope = fa->fa_scope;
941 res->fi = fa->fa_info;
942 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
944 res->network = zone & inet_make_mask(prefixlen);
946 atomic_inc(&res->fi->fib_clntref);
950 /* Find appropriate source address to this destination */
952 __be32 __fib_res_prefsrc(struct fib_result *res)
954 return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
957 int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
958 u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
959 struct fib_info *fi, unsigned int flags)
961 struct nlmsghdr *nlh;
964 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
968 rtm = nlmsg_data(nlh);
969 rtm->rtm_family = AF_INET;
970 rtm->rtm_dst_len = dst_len;
971 rtm->rtm_src_len = 0;
973 rtm->rtm_table = tb_id;
974 NLA_PUT_U32(skb, RTA_TABLE, tb_id);
975 rtm->rtm_type = type;
976 rtm->rtm_flags = fi->fib_flags;
977 rtm->rtm_scope = scope;
978 rtm->rtm_protocol = fi->fib_protocol;
980 if (rtm->rtm_dst_len)
981 NLA_PUT_BE32(skb, RTA_DST, dst);
983 if (fi->fib_priority)
984 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority);
986 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
987 goto nla_put_failure;
990 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc);
992 if (fi->fib_nhs == 1) {
993 if (fi->fib_nh->nh_gw)
994 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw);
996 if (fi->fib_nh->nh_oif)
997 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
998 #ifdef CONFIG_NET_CLS_ROUTE
999 if (fi->fib_nh[0].nh_tclassid)
1000 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
1003 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1004 if (fi->fib_nhs > 1) {
1005 struct rtnexthop *rtnh;
1008 mp = nla_nest_start(skb, RTA_MULTIPATH);
1010 goto nla_put_failure;
1013 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1015 goto nla_put_failure;
1017 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1018 rtnh->rtnh_hops = nh->nh_weight - 1;
1019 rtnh->rtnh_ifindex = nh->nh_oif;
1022 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
1023 #ifdef CONFIG_NET_CLS_ROUTE
1024 if (nh->nh_tclassid)
1025 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
1027 /* length of rtnetlink header + attributes */
1028 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1029 } endfor_nexthops(fi);
1031 nla_nest_end(skb, mp);
1034 return nlmsg_end(skb, nlh);
1037 nlmsg_cancel(skb, nlh);
1043 - local address disappeared -> we must delete all the entries
1045 - device went down -> we must shutdown all nexthops going via it.
1048 int fib_sync_down(__be32 local, struct net_device *dev, int force)
1051 int scope = RT_SCOPE_NOWHERE;
1056 if (local && fib_info_laddrhash) {
1057 unsigned int hash = fib_laddr_hashfn(local);
1058 struct hlist_head *head = &fib_info_laddrhash[hash];
1059 struct hlist_node *node;
1060 struct fib_info *fi;
1062 hlist_for_each_entry(fi, node, head, fib_lhash) {
1063 if (fi->fib_prefsrc == local) {
1064 fi->fib_flags |= RTNH_F_DEAD;
1071 struct fib_info *prev_fi = NULL;
1072 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1073 struct hlist_head *head = &fib_info_devhash[hash];
1074 struct hlist_node *node;
1077 hlist_for_each_entry(nh, node, head, nh_hash) {
1078 struct fib_info *fi = nh->nh_parent;
1081 BUG_ON(!fi->fib_nhs);
1082 if (nh->nh_dev != dev || fi == prev_fi)
1086 change_nexthops(fi) {
1087 if (nh->nh_flags&RTNH_F_DEAD)
1089 else if (nh->nh_dev == dev &&
1090 nh->nh_scope != scope) {
1091 nh->nh_flags |= RTNH_F_DEAD;
1092 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1093 spin_lock_bh(&fib_multipath_lock);
1094 fi->fib_power -= nh->nh_power;
1096 spin_unlock_bh(&fib_multipath_lock);
1100 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1101 if (force > 1 && nh->nh_dev == dev) {
1106 } endfor_nexthops(fi)
1107 if (dead == fi->fib_nhs) {
1108 fi->fib_flags |= RTNH_F_DEAD;
1117 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1120 Dead device goes up. We wake up dead nexthops.
1121 It takes sense only on multipath routes.
1124 int fib_sync_up(struct net_device *dev)
1126 struct fib_info *prev_fi;
1128 struct hlist_head *head;
1129 struct hlist_node *node;
1133 if (!(dev->flags&IFF_UP))
1137 hash = fib_devindex_hashfn(dev->ifindex);
1138 head = &fib_info_devhash[hash];
1141 hlist_for_each_entry(nh, node, head, nh_hash) {
1142 struct fib_info *fi = nh->nh_parent;
1145 BUG_ON(!fi->fib_nhs);
1146 if (nh->nh_dev != dev || fi == prev_fi)
1151 change_nexthops(fi) {
1152 if (!(nh->nh_flags&RTNH_F_DEAD)) {
1156 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
1158 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
1161 spin_lock_bh(&fib_multipath_lock);
1163 nh->nh_flags &= ~RTNH_F_DEAD;
1164 spin_unlock_bh(&fib_multipath_lock);
1165 } endfor_nexthops(fi)
1168 fi->fib_flags &= ~RTNH_F_DEAD;
1177 The algorithm is suboptimal, but it provides really
1178 fair weighted route distribution.
1181 void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
1183 struct fib_info *fi = res->fi;
1186 spin_lock_bh(&fib_multipath_lock);
1187 if (fi->fib_power <= 0) {
1189 change_nexthops(fi) {
1190 if (!(nh->nh_flags&RTNH_F_DEAD)) {
1191 power += nh->nh_weight;
1192 nh->nh_power = nh->nh_weight;
1194 } endfor_nexthops(fi);
1195 fi->fib_power = power;
1197 spin_unlock_bh(&fib_multipath_lock);
1198 /* Race condition: route has just become dead. */
1205 /* w should be random number [0..fi->fib_power-1],
1206 it is pretty bad approximation.
1209 w = jiffies % fi->fib_power;
1211 change_nexthops(fi) {
1212 if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
1213 if ((w -= nh->nh_power) <= 0) {
1216 res->nh_sel = nhsel;
1217 spin_unlock_bh(&fib_multipath_lock);
1221 } endfor_nexthops(fi);
1223 /* Race condition: route has just become dead. */
1225 spin_unlock_bh(&fib_multipath_lock);