2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.408"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #define MAX_STAT_DEPTH 32
87 #define KEYLENGTH (8*sizeof(t_key))
89 typedef unsigned int t_key;
93 #define NODE_TYPE_MASK 0x1UL
94 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96 #define IS_TNODE(n) (!(n->parent & T_LEAF))
97 #define IS_LEAF(n) (n->parent & T_LEAF)
100 unsigned long parent;
105 unsigned long parent;
107 struct hlist_head list;
112 struct hlist_node hlist;
115 struct list_head falh;
119 unsigned long parent;
121 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
122 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
123 unsigned int full_children; /* KEYLENGTH bits needed */
124 unsigned int empty_children; /* KEYLENGTH bits needed */
126 struct node *child[0];
129 #ifdef CONFIG_IP_FIB_TRIE_STATS
130 struct trie_use_stats {
132 unsigned int backtrack;
133 unsigned int semantic_match_passed;
134 unsigned int semantic_match_miss;
135 unsigned int null_node_hit;
136 unsigned int resize_node_skipped;
141 unsigned int totdepth;
142 unsigned int maxdepth;
145 unsigned int nullpointers;
146 unsigned int prefixes;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
157 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
158 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
160 static struct node *resize(struct trie *t, struct tnode *tn);
161 static struct tnode *inflate(struct trie *t, struct tnode *tn);
162 static struct tnode *halve(struct trie *t, struct tnode *tn);
163 static void tnode_free(struct tnode *tn);
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static struct kmem_cache *trie_leaf_kmem __read_mostly;
168 static inline struct tnode *node_parent(struct node *node)
170 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 static inline struct tnode *node_parent_rcu(struct node *node)
175 struct tnode *ret = node_parent(node);
177 return rcu_dereference(ret);
180 /* Same as rcu_assign_pointer
181 * but that macro() assumes that value is a pointer.
183 static inline void node_set_parent(struct node *node, struct tnode *ptr)
186 node->parent = (unsigned long)ptr | NODE_TYPE(node);
189 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
191 BUG_ON(i >= 1U << tn->bits);
196 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
198 struct node *ret = tnode_get_child(tn, i);
200 return rcu_dereference(ret);
203 static inline int tnode_child_length(const struct tnode *tn)
205 return 1 << tn->bits;
208 static inline t_key mask_pfx(t_key k, unsigned short l)
210 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
213 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
215 if (offset < KEYLENGTH)
216 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
221 static inline int tkey_equals(t_key a, t_key b)
226 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
228 if (bits == 0 || offset >= KEYLENGTH)
230 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
231 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
234 static inline int tkey_mismatch(t_key a, int offset, t_key b)
241 while ((diff << i) >> (KEYLENGTH-1) == 0)
247 To understand this stuff, an understanding of keys and all their bits is
248 necessary. Every node in the trie has a key associated with it, but not
249 all of the bits in that key are significant.
251 Consider a node 'n' and its parent 'tp'.
253 If n is a leaf, every bit in its key is significant. Its presence is
254 necessitated by path compression, since during a tree traversal (when
255 searching for a leaf - unless we are doing an insertion) we will completely
256 ignore all skipped bits we encounter. Thus we need to verify, at the end of
257 a potentially successful search, that we have indeed been walking the
260 Note that we can never "miss" the correct key in the tree if present by
261 following the wrong path. Path compression ensures that segments of the key
262 that are the same for all keys with a given prefix are skipped, but the
263 skipped part *is* identical for each node in the subtrie below the skipped
264 bit! trie_insert() in this implementation takes care of that - note the
265 call to tkey_sub_equals() in trie_insert().
267 if n is an internal node - a 'tnode' here, the various parts of its key
268 have many different meanings.
271 _________________________________________________________________
272 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
273 -----------------------------------------------------------------
274 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
276 _________________________________________________________________
277 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
278 -----------------------------------------------------------------
279 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
286 First, let's just ignore the bits that come before the parent tp, that is
287 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
288 not use them for anything.
290 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
291 index into the parent's child array. That is, they will be used to find
292 'n' among tp's children.
294 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
297 All the bits we have seen so far are significant to the node n. The rest
298 of the bits are really not needed or indeed known in n->key.
300 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
301 n's child array, and will of course be different for each child.
304 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
309 static inline void check_tnode(const struct tnode *tn)
311 WARN_ON(tn && tn->pos+tn->bits > 32);
314 static const int halve_threshold = 25;
315 static const int inflate_threshold = 50;
316 static const int halve_threshold_root = 8;
317 static const int inflate_threshold_root = 15;
320 static void __alias_free_mem(struct rcu_head *head)
322 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
323 kmem_cache_free(fn_alias_kmem, fa);
326 static inline void alias_free_mem_rcu(struct fib_alias *fa)
328 call_rcu(&fa->rcu, __alias_free_mem);
331 static void __leaf_free_rcu(struct rcu_head *head)
333 struct leaf *l = container_of(head, struct leaf, rcu);
334 kmem_cache_free(trie_leaf_kmem, l);
337 static void __leaf_info_free_rcu(struct rcu_head *head)
339 kfree(container_of(head, struct leaf_info, rcu));
342 static inline void free_leaf_info(struct leaf_info *leaf)
344 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
347 static struct tnode *tnode_alloc(size_t size)
351 if (size <= PAGE_SIZE)
352 return kzalloc(size, GFP_KERNEL);
354 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
358 return page_address(pages);
361 static void __tnode_free_rcu(struct rcu_head *head)
363 struct tnode *tn = container_of(head, struct tnode, rcu);
364 size_t size = sizeof(struct tnode) +
365 (sizeof(struct node *) << tn->bits);
367 if (size <= PAGE_SIZE)
370 free_pages((unsigned long)tn, get_order(size));
373 static inline void tnode_free(struct tnode *tn)
376 struct leaf *l = (struct leaf *) tn;
377 call_rcu_bh(&l->rcu, __leaf_free_rcu);
379 call_rcu(&tn->rcu, __tnode_free_rcu);
382 static struct leaf *leaf_new(void)
384 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
387 INIT_HLIST_HEAD(&l->list);
392 static struct leaf_info *leaf_info_new(int plen)
394 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
397 INIT_LIST_HEAD(&li->falh);
402 static struct tnode *tnode_new(t_key key, int pos, int bits)
404 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
405 struct tnode *tn = tnode_alloc(sz);
408 tn->parent = T_TNODE;
412 tn->full_children = 0;
413 tn->empty_children = 1<<bits;
416 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
417 (unsigned long) (sizeof(struct node) << bits));
422 * Check whether a tnode 'n' is "full", i.e. it is an internal node
423 * and no bits are skipped. See discussion in dyntree paper p. 6
426 static inline int tnode_full(const struct tnode *tn, const struct node *n)
428 if (n == NULL || IS_LEAF(n))
431 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
434 static inline void put_child(struct trie *t, struct tnode *tn, int i,
437 tnode_put_child_reorg(tn, i, n, -1);
441 * Add a child at position i overwriting the old value.
442 * Update the value of full_children and empty_children.
445 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
448 struct node *chi = tn->child[i];
451 BUG_ON(i >= 1<<tn->bits);
453 /* update emptyChildren */
454 if (n == NULL && chi != NULL)
455 tn->empty_children++;
456 else if (n != NULL && chi == NULL)
457 tn->empty_children--;
459 /* update fullChildren */
461 wasfull = tnode_full(tn, chi);
463 isfull = tnode_full(tn, n);
464 if (wasfull && !isfull)
466 else if (!wasfull && isfull)
470 node_set_parent(n, tn);
472 rcu_assign_pointer(tn->child[i], n);
475 static struct node *resize(struct trie *t, struct tnode *tn)
479 struct tnode *old_tn;
480 int inflate_threshold_use;
481 int halve_threshold_use;
487 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
488 tn, inflate_threshold, halve_threshold);
491 if (tn->empty_children == tnode_child_length(tn)) {
496 if (tn->empty_children == tnode_child_length(tn) - 1)
497 for (i = 0; i < tnode_child_length(tn); i++) {
504 /* compress one level */
505 node_set_parent(n, NULL);
510 * Double as long as the resulting node has a number of
511 * nonempty nodes that are above the threshold.
515 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
516 * the Helsinki University of Technology and Matti Tikkanen of Nokia
517 * Telecommunications, page 6:
518 * "A node is doubled if the ratio of non-empty children to all
519 * children in the *doubled* node is at least 'high'."
521 * 'high' in this instance is the variable 'inflate_threshold'. It
522 * is expressed as a percentage, so we multiply it with
523 * tnode_child_length() and instead of multiplying by 2 (since the
524 * child array will be doubled by inflate()) and multiplying
525 * the left-hand side by 100 (to handle the percentage thing) we
526 * multiply the left-hand side by 50.
528 * The left-hand side may look a bit weird: tnode_child_length(tn)
529 * - tn->empty_children is of course the number of non-null children
530 * in the current node. tn->full_children is the number of "full"
531 * children, that is non-null tnodes with a skip value of 0.
532 * All of those will be doubled in the resulting inflated tnode, so
533 * we just count them one extra time here.
535 * A clearer way to write this would be:
537 * to_be_doubled = tn->full_children;
538 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
541 * new_child_length = tnode_child_length(tn) * 2;
543 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
545 * if (new_fill_factor >= inflate_threshold)
547 * ...and so on, tho it would mess up the while () loop.
550 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
554 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
555 * inflate_threshold * new_child_length
557 * expand not_to_be_doubled and to_be_doubled, and shorten:
558 * 100 * (tnode_child_length(tn) - tn->empty_children +
559 * tn->full_children) >= inflate_threshold * new_child_length
561 * expand new_child_length:
562 * 100 * (tnode_child_length(tn) - tn->empty_children +
563 * tn->full_children) >=
564 * inflate_threshold * tnode_child_length(tn) * 2
567 * 50 * (tn->full_children + tnode_child_length(tn) -
568 * tn->empty_children) >= inflate_threshold *
569 * tnode_child_length(tn)
575 /* Keep root node larger */
578 inflate_threshold_use = inflate_threshold_root;
580 inflate_threshold_use = inflate_threshold;
584 while ((tn->full_children > 0 && max_resize-- &&
585 50 * (tn->full_children + tnode_child_length(tn)
586 - tn->empty_children)
587 >= inflate_threshold_use * tnode_child_length(tn))) {
594 #ifdef CONFIG_IP_FIB_TRIE_STATS
595 t->stats.resize_node_skipped++;
601 if (max_resize < 0) {
603 pr_warning("Fix inflate_threshold_root."
604 " Now=%d size=%d bits\n",
605 inflate_threshold_root, tn->bits);
607 pr_warning("Fix inflate_threshold."
608 " Now=%d size=%d bits\n",
609 inflate_threshold, tn->bits);
615 * Halve as long as the number of empty children in this
616 * node is above threshold.
620 /* Keep root node larger */
623 halve_threshold_use = halve_threshold_root;
625 halve_threshold_use = halve_threshold;
629 while (tn->bits > 1 && max_resize-- &&
630 100 * (tnode_child_length(tn) - tn->empty_children) <
631 halve_threshold_use * tnode_child_length(tn)) {
637 #ifdef CONFIG_IP_FIB_TRIE_STATS
638 t->stats.resize_node_skipped++;
644 if (max_resize < 0) {
646 pr_warning("Fix halve_threshold_root."
647 " Now=%d size=%d bits\n",
648 halve_threshold_root, tn->bits);
650 pr_warning("Fix halve_threshold."
651 " Now=%d size=%d bits\n",
652 halve_threshold, tn->bits);
655 /* Only one child remains */
656 if (tn->empty_children == tnode_child_length(tn) - 1)
657 for (i = 0; i < tnode_child_length(tn); i++) {
664 /* compress one level */
666 node_set_parent(n, NULL);
671 return (struct node *) tn;
674 static struct tnode *inflate(struct trie *t, struct tnode *tn)
676 struct tnode *oldtnode = tn;
677 int olen = tnode_child_length(tn);
680 pr_debug("In inflate\n");
682 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
685 return ERR_PTR(-ENOMEM);
688 * Preallocate and store tnodes before the actual work so we
689 * don't get into an inconsistent state if memory allocation
690 * fails. In case of failure we return the oldnode and inflate
691 * of tnode is ignored.
694 for (i = 0; i < olen; i++) {
697 inode = (struct tnode *) tnode_get_child(oldtnode, i);
700 inode->pos == oldtnode->pos + oldtnode->bits &&
702 struct tnode *left, *right;
703 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
705 left = tnode_new(inode->key&(~m), inode->pos + 1,
710 right = tnode_new(inode->key|m, inode->pos + 1,
718 put_child(t, tn, 2*i, (struct node *) left);
719 put_child(t, tn, 2*i+1, (struct node *) right);
723 for (i = 0; i < olen; i++) {
725 struct node *node = tnode_get_child(oldtnode, i);
726 struct tnode *left, *right;
733 /* A leaf or an internal node with skipped bits */
735 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
736 tn->pos + tn->bits - 1) {
737 if (tkey_extract_bits(node->key,
738 oldtnode->pos + oldtnode->bits,
740 put_child(t, tn, 2*i, node);
742 put_child(t, tn, 2*i+1, node);
746 /* An internal node with two children */
747 inode = (struct tnode *) node;
749 if (inode->bits == 1) {
750 put_child(t, tn, 2*i, inode->child[0]);
751 put_child(t, tn, 2*i+1, inode->child[1]);
757 /* An internal node with more than two children */
759 /* We will replace this node 'inode' with two new
760 * ones, 'left' and 'right', each with half of the
761 * original children. The two new nodes will have
762 * a position one bit further down the key and this
763 * means that the "significant" part of their keys
764 * (see the discussion near the top of this file)
765 * will differ by one bit, which will be "0" in
766 * left's key and "1" in right's key. Since we are
767 * moving the key position by one step, the bit that
768 * we are moving away from - the bit at position
769 * (inode->pos) - is the one that will differ between
770 * left and right. So... we synthesize that bit in the
772 * The mask 'm' below will be a single "one" bit at
773 * the position (inode->pos)
776 /* Use the old key, but set the new significant
780 left = (struct tnode *) tnode_get_child(tn, 2*i);
781 put_child(t, tn, 2*i, NULL);
785 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
786 put_child(t, tn, 2*i+1, NULL);
790 size = tnode_child_length(left);
791 for (j = 0; j < size; j++) {
792 put_child(t, left, j, inode->child[j]);
793 put_child(t, right, j, inode->child[j + size]);
795 put_child(t, tn, 2*i, resize(t, left));
796 put_child(t, tn, 2*i+1, resize(t, right));
800 tnode_free(oldtnode);
804 int size = tnode_child_length(tn);
807 for (j = 0; j < size; j++)
809 tnode_free((struct tnode *)tn->child[j]);
813 return ERR_PTR(-ENOMEM);
817 static struct tnode *halve(struct trie *t, struct tnode *tn)
819 struct tnode *oldtnode = tn;
820 struct node *left, *right;
822 int olen = tnode_child_length(tn);
824 pr_debug("In halve\n");
826 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
829 return ERR_PTR(-ENOMEM);
832 * Preallocate and store tnodes before the actual work so we
833 * don't get into an inconsistent state if memory allocation
834 * fails. In case of failure we return the oldnode and halve
835 * of tnode is ignored.
838 for (i = 0; i < olen; i += 2) {
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
842 /* Two nonempty children */
846 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
851 put_child(t, tn, i/2, (struct node *)newn);
856 for (i = 0; i < olen; i += 2) {
857 struct tnode *newBinNode;
859 left = tnode_get_child(oldtnode, i);
860 right = tnode_get_child(oldtnode, i+1);
862 /* At least one of the children is empty */
864 if (right == NULL) /* Both are empty */
866 put_child(t, tn, i/2, right);
871 put_child(t, tn, i/2, left);
875 /* Two nonempty children */
876 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
877 put_child(t, tn, i/2, NULL);
878 put_child(t, newBinNode, 0, left);
879 put_child(t, newBinNode, 1, right);
880 put_child(t, tn, i/2, resize(t, newBinNode));
882 tnode_free(oldtnode);
886 int size = tnode_child_length(tn);
889 for (j = 0; j < size; j++)
891 tnode_free((struct tnode *)tn->child[j]);
895 return ERR_PTR(-ENOMEM);
899 /* readside must use rcu_read_lock currently dump routines
900 via get_fa_head and dump */
902 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
904 struct hlist_head *head = &l->list;
905 struct hlist_node *node;
906 struct leaf_info *li;
908 hlist_for_each_entry_rcu(li, node, head, hlist)
909 if (li->plen == plen)
915 static inline struct list_head *get_fa_head(struct leaf *l, int plen)
917 struct leaf_info *li = find_leaf_info(l, plen);
925 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
927 struct leaf_info *li = NULL, *last = NULL;
928 struct hlist_node *node;
930 if (hlist_empty(head)) {
931 hlist_add_head_rcu(&new->hlist, head);
933 hlist_for_each_entry(li, node, head, hlist) {
934 if (new->plen > li->plen)
940 hlist_add_after_rcu(&last->hlist, &new->hlist);
942 hlist_add_before_rcu(&new->hlist, &li->hlist);
946 /* rcu_read_lock needs to be hold by caller from readside */
949 fib_find_node(struct trie *t, u32 key)
956 n = rcu_dereference(t->trie);
958 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
959 tn = (struct tnode *) n;
963 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
964 pos = tn->pos + tn->bits;
965 n = tnode_get_child_rcu(tn,
966 tkey_extract_bits(key,
972 /* Case we have found a leaf. Compare prefixes */
974 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
975 return (struct leaf *)n;
980 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
983 t_key cindex, key = tn->key;
986 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
987 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
988 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
989 tn = (struct tnode *) resize(t, (struct tnode *)tn);
991 tnode_put_child_reorg((struct tnode *)tp, cindex,
992 (struct node *)tn, wasfull);
994 tp = node_parent((struct node *) tn);
1000 /* Handle last (top) tnode */
1002 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1004 return (struct node *)tn;
1007 /* only used from updater-side */
1009 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1012 struct tnode *tp = NULL, *tn = NULL;
1016 struct list_head *fa_head = NULL;
1017 struct leaf_info *li;
1023 /* If we point to NULL, stop. Either the tree is empty and we should
1024 * just put a new leaf in if, or we have reached an empty child slot,
1025 * and we should just put our new leaf in that.
1026 * If we point to a T_TNODE, check if it matches our key. Note that
1027 * a T_TNODE might be skipping any number of bits - its 'pos' need
1028 * not be the parent's 'pos'+'bits'!
1030 * If it does match the current key, get pos/bits from it, extract
1031 * the index from our key, push the T_TNODE and walk the tree.
1033 * If it doesn't, we have to replace it with a new T_TNODE.
1035 * If we point to a T_LEAF, it might or might not have the same key
1036 * as we do. If it does, just change the value, update the T_LEAF's
1037 * value, and return it.
1038 * If it doesn't, we need to replace it with a T_TNODE.
1041 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1042 tn = (struct tnode *) n;
1046 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1048 pos = tn->pos + tn->bits;
1049 n = tnode_get_child(tn,
1050 tkey_extract_bits(key,
1054 BUG_ON(n && node_parent(n) != tn);
1060 * n ----> NULL, LEAF or TNODE
1062 * tp is n's (parent) ----> NULL or TNODE
1065 BUG_ON(tp && IS_LEAF(tp));
1067 /* Case 1: n is a leaf. Compare prefixes */
1069 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1070 l = (struct leaf *) n;
1071 li = leaf_info_new(plen);
1076 fa_head = &li->falh;
1077 insert_leaf_info(&l->list, li);
1086 li = leaf_info_new(plen);
1089 tnode_free((struct tnode *) l);
1093 fa_head = &li->falh;
1094 insert_leaf_info(&l->list, li);
1096 if (t->trie && n == NULL) {
1097 /* Case 2: n is NULL, and will just insert a new leaf */
1099 node_set_parent((struct node *)l, tp);
1101 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1102 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1104 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1106 * Add a new tnode here
1107 * first tnode need some special handling
1111 pos = tp->pos+tp->bits;
1116 newpos = tkey_mismatch(key, pos, n->key);
1117 tn = tnode_new(n->key, newpos, 1);
1120 tn = tnode_new(key, newpos, 1); /* First tnode */
1125 tnode_free((struct tnode *) l);
1129 node_set_parent((struct node *)tn, tp);
1131 missbit = tkey_extract_bits(key, newpos, 1);
1132 put_child(t, tn, missbit, (struct node *)l);
1133 put_child(t, tn, 1-missbit, n);
1136 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1137 put_child(t, (struct tnode *)tp, cindex,
1140 rcu_assign_pointer(t->trie, (struct node *)tn);
1145 if (tp && tp->pos + tp->bits > 32)
1146 pr_warning("fib_trie"
1147 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1148 tp, tp->pos, tp->bits, key, plen);
1150 /* Rebalance the trie */
1152 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1158 * Caller must hold RTNL.
1160 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1162 struct trie *t = (struct trie *) tb->tb_data;
1163 struct fib_alias *fa, *new_fa;
1164 struct list_head *fa_head = NULL;
1165 struct fib_info *fi;
1166 int plen = cfg->fc_dst_len;
1167 u8 tos = cfg->fc_tos;
1175 key = ntohl(cfg->fc_dst);
1177 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1179 mask = ntohl(inet_make_mask(plen));
1186 fi = fib_create_info(cfg);
1192 l = fib_find_node(t, key);
1196 fa_head = get_fa_head(l, plen);
1197 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1200 /* Now fa, if non-NULL, points to the first fib alias
1201 * with the same keys [prefix,tos,priority], if such key already
1202 * exists or to the node before which we will insert new one.
1204 * If fa is NULL, we will need to allocate a new one and
1205 * insert to the head of f.
1207 * If f is NULL, no fib node matched the destination key
1208 * and we need to allocate a new one of those as well.
1211 if (fa && fa->fa_tos == tos &&
1212 fa->fa_info->fib_priority == fi->fib_priority) {
1213 struct fib_alias *fa_first, *fa_match;
1216 if (cfg->fc_nlflags & NLM_F_EXCL)
1220 * 1. Find exact match for type, scope, fib_info to avoid
1222 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1226 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1227 list_for_each_entry_continue(fa, fa_head, fa_list) {
1228 if (fa->fa_tos != tos)
1230 if (fa->fa_info->fib_priority != fi->fib_priority)
1232 if (fa->fa_type == cfg->fc_type &&
1233 fa->fa_scope == cfg->fc_scope &&
1234 fa->fa_info == fi) {
1240 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1241 struct fib_info *fi_drop;
1251 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1255 fi_drop = fa->fa_info;
1256 new_fa->fa_tos = fa->fa_tos;
1257 new_fa->fa_info = fi;
1258 new_fa->fa_type = cfg->fc_type;
1259 new_fa->fa_scope = cfg->fc_scope;
1260 state = fa->fa_state;
1261 new_fa->fa_state = state & ~FA_S_ACCESSED;
1263 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1264 alias_free_mem_rcu(fa);
1266 fib_release_info(fi_drop);
1267 if (state & FA_S_ACCESSED)
1269 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1270 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1274 /* Error if we find a perfect match which
1275 * uses the same scope, type, and nexthop
1281 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1285 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1289 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1293 new_fa->fa_info = fi;
1294 new_fa->fa_tos = tos;
1295 new_fa->fa_type = cfg->fc_type;
1296 new_fa->fa_scope = cfg->fc_scope;
1297 new_fa->fa_state = 0;
1299 * Insert new entry to the list.
1303 fa_head = fib_insert_node(t, key, plen);
1304 if (unlikely(!fa_head)) {
1306 goto out_free_new_fa;
1310 list_add_tail_rcu(&new_fa->fa_list,
1311 (fa ? &fa->fa_list : fa_head));
1314 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1315 &cfg->fc_nlinfo, 0);
1320 kmem_cache_free(fn_alias_kmem, new_fa);
1322 fib_release_info(fi);
1327 /* should be called with rcu_read_lock */
1328 static int check_leaf(struct trie *t, struct leaf *l,
1329 t_key key, const struct flowi *flp,
1330 struct fib_result *res)
1332 struct leaf_info *li;
1333 struct hlist_head *hhead = &l->list;
1334 struct hlist_node *node;
1336 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1338 int plen = li->plen;
1339 __be32 mask = inet_make_mask(plen);
1341 if (l->key != (key & ntohl(mask)))
1344 err = fib_semantic_match(&li->falh, flp, res,
1345 htonl(l->key), mask, plen);
1347 #ifdef CONFIG_IP_FIB_TRIE_STATS
1349 t->stats.semantic_match_passed++;
1351 t->stats.semantic_match_miss++;
1360 static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
1361 struct fib_result *res)
1363 struct trie *t = (struct trie *) tb->tb_data;
1368 t_key key = ntohl(flp->fl4_dst);
1371 int current_prefix_length = KEYLENGTH;
1373 t_key node_prefix, key_prefix, pref_mismatch;
1378 n = rcu_dereference(t->trie);
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1388 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1395 pn = (struct tnode *) n;
1403 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1406 n = tnode_get_child(pn, cindex);
1409 #ifdef CONFIG_IP_FIB_TRIE_STATS
1410 t->stats.null_node_hit++;
1416 plen = check_leaf(t, (struct leaf *)n, key, flp, res);
1424 cn = (struct tnode *)n;
1427 * It's a tnode, and we can do some extra checks here if we
1428 * like, to avoid descending into a dead-end branch.
1429 * This tnode is in the parent's child array at index
1430 * key[p_pos..p_pos+p_bits] but potentially with some bits
1431 * chopped off, so in reality the index may be just a
1432 * subprefix, padded with zero at the end.
1433 * We can also take a look at any skipped bits in this
1434 * tnode - everything up to p_pos is supposed to be ok,
1435 * and the non-chopped bits of the index (se previous
1436 * paragraph) are also guaranteed ok, but the rest is
1437 * considered unknown.
1439 * The skipped bits are key[pos+bits..cn->pos].
1442 /* If current_prefix_length < pos+bits, we are already doing
1443 * actual prefix matching, which means everything from
1444 * pos+(bits-chopped_off) onward must be zero along some
1445 * branch of this subtree - otherwise there is *no* valid
1446 * prefix present. Here we can only check the skipped
1447 * bits. Remember, since we have already indexed into the
1448 * parent's child array, we know that the bits we chopped of
1452 /* NOTA BENE: Checking only skipped bits
1453 for the new node here */
1455 if (current_prefix_length < pos+bits) {
1456 if (tkey_extract_bits(cn->key, current_prefix_length,
1457 cn->pos - current_prefix_length)
1463 * If chopped_off=0, the index is fully validated and we
1464 * only need to look at the skipped bits for this, the new,
1465 * tnode. What we actually want to do is to find out if
1466 * these skipped bits match our key perfectly, or if we will
1467 * have to count on finding a matching prefix further down,
1468 * because if we do, we would like to have some way of
1469 * verifying the existence of such a prefix at this point.
1472 /* The only thing we can do at this point is to verify that
1473 * any such matching prefix can indeed be a prefix to our
1474 * key, and if the bits in the node we are inspecting that
1475 * do not match our key are not ZERO, this cannot be true.
1476 * Thus, find out where there is a mismatch (before cn->pos)
1477 * and verify that all the mismatching bits are zero in the
1482 * Note: We aren't very concerned about the piece of
1483 * the key that precede pn->pos+pn->bits, since these
1484 * have already been checked. The bits after cn->pos
1485 * aren't checked since these are by definition
1486 * "unknown" at this point. Thus, what we want to see
1487 * is if we are about to enter the "prefix matching"
1488 * state, and in that case verify that the skipped
1489 * bits that will prevail throughout this subtree are
1490 * zero, as they have to be if we are to find a
1494 node_prefix = mask_pfx(cn->key, cn->pos);
1495 key_prefix = mask_pfx(key, cn->pos);
1496 pref_mismatch = key_prefix^node_prefix;
1500 * In short: If skipped bits in this node do not match
1501 * the search key, enter the "prefix matching"
1504 if (pref_mismatch) {
1505 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1507 pref_mismatch = pref_mismatch << 1;
1509 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1511 if (key_prefix != 0)
1514 if (current_prefix_length >= cn->pos)
1515 current_prefix_length = mp;
1518 pn = (struct tnode *)n; /* Descend */
1525 /* As zero don't change the child key (cindex) */
1526 while ((chopped_off <= pn->bits)
1527 && !(cindex & (1<<(chopped_off-1))))
1530 /* Decrease current_... with bits chopped off */
1531 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1532 current_prefix_length = pn->pos + pn->bits
1536 * Either we do the actual chop off according or if we have
1537 * chopped off all bits in this tnode walk up to our parent.
1540 if (chopped_off <= pn->bits) {
1541 cindex &= ~(1 << (chopped_off-1));
1543 struct tnode *parent = node_parent((struct node *) pn);
1547 /* Get Child's index */
1548 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1552 #ifdef CONFIG_IP_FIB_TRIE_STATS
1553 t->stats.backtrack++;
1566 * Remove the leaf and return parent.
1568 static void trie_leaf_remove(struct trie *t, struct leaf *l)
1570 struct tnode *tp = node_parent((struct node *) l);
1572 pr_debug("entering trie_leaf_remove(%p)\n", l);
1575 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1576 put_child(t, (struct tnode *)tp, cindex, NULL);
1577 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1579 rcu_assign_pointer(t->trie, NULL);
1581 tnode_free((struct tnode *) l);
1585 * Caller must hold RTNL.
1587 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1589 struct trie *t = (struct trie *) tb->tb_data;
1591 int plen = cfg->fc_dst_len;
1592 u8 tos = cfg->fc_tos;
1593 struct fib_alias *fa, *fa_to_delete;
1594 struct list_head *fa_head;
1596 struct leaf_info *li;
1601 key = ntohl(cfg->fc_dst);
1602 mask = ntohl(inet_make_mask(plen));
1608 l = fib_find_node(t, key);
1613 fa_head = get_fa_head(l, plen);
1614 fa = fib_find_alias(fa_head, tos, 0);
1619 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1621 fa_to_delete = NULL;
1622 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1623 list_for_each_entry_continue(fa, fa_head, fa_list) {
1624 struct fib_info *fi = fa->fa_info;
1626 if (fa->fa_tos != tos)
1629 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1630 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1631 fa->fa_scope == cfg->fc_scope) &&
1632 (!cfg->fc_protocol ||
1633 fi->fib_protocol == cfg->fc_protocol) &&
1634 fib_nh_match(cfg, fi) == 0) {
1644 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1645 &cfg->fc_nlinfo, 0);
1647 l = fib_find_node(t, key);
1648 li = find_leaf_info(l, plen);
1650 list_del_rcu(&fa->fa_list);
1652 if (list_empty(fa_head)) {
1653 hlist_del_rcu(&li->hlist);
1657 if (hlist_empty(&l->list))
1658 trie_leaf_remove(t, l);
1660 if (fa->fa_state & FA_S_ACCESSED)
1663 fib_release_info(fa->fa_info);
1664 alias_free_mem_rcu(fa);
1668 static int trie_flush_list(struct trie *t, struct list_head *head)
1670 struct fib_alias *fa, *fa_node;
1673 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1674 struct fib_info *fi = fa->fa_info;
1676 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1677 list_del_rcu(&fa->fa_list);
1678 fib_release_info(fa->fa_info);
1679 alias_free_mem_rcu(fa);
1686 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1689 struct hlist_head *lih = &l->list;
1690 struct hlist_node *node, *tmp;
1691 struct leaf_info *li = NULL;
1693 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1694 found += trie_flush_list(t, &li->falh);
1696 if (list_empty(&li->falh)) {
1697 hlist_del_rcu(&li->hlist);
1705 * Scan for the next right leaf starting at node p->child[idx]
1706 * Since we have back pointer, no recursion necessary.
1708 static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1714 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
1718 while (idx < 1u << p->bits) {
1719 c = tnode_get_child_rcu(p, idx++);
1724 prefetch(p->child[idx]);
1725 return (struct leaf *) c;
1728 /* Rescan start scanning in new node */
1729 p = (struct tnode *) c;
1733 /* Node empty, walk back up to parent */
1734 c = (struct node *) p;
1735 } while ( (p = node_parent_rcu(c)) != NULL);
1737 return NULL; /* Root of trie */
1740 static struct leaf *trie_firstleaf(struct trie *t)
1742 struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
1747 if (IS_LEAF(n)) /* trie is just a leaf */
1748 return (struct leaf *) n;
1750 return leaf_walk_rcu(n, NULL);
1753 static struct leaf *trie_nextleaf(struct leaf *l)
1755 struct node *c = (struct node *) l;
1756 struct tnode *p = node_parent(c);
1759 return NULL; /* trie with just one leaf */
1761 return leaf_walk_rcu(p, c);
1764 static struct leaf *trie_leafindex(struct trie *t, int index)
1766 struct leaf *l = trie_firstleaf(t);
1768 while (l && index-- > 0)
1769 l = trie_nextleaf(l);
1776 * Caller must hold RTNL.
1778 static int fn_trie_flush(struct fib_table *tb)
1780 struct trie *t = (struct trie *) tb->tb_data;
1781 struct leaf *l, *ll = NULL;
1784 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1785 found += trie_flush_leaf(t, l);
1787 if (ll && hlist_empty(&ll->list))
1788 trie_leaf_remove(t, ll);
1792 if (ll && hlist_empty(&ll->list))
1793 trie_leaf_remove(t, ll);
1795 pr_debug("trie_flush found=%d\n", found);
1799 static void fn_trie_select_default(struct fib_table *tb,
1800 const struct flowi *flp,
1801 struct fib_result *res)
1803 struct trie *t = (struct trie *) tb->tb_data;
1804 int order, last_idx;
1805 struct fib_info *fi = NULL;
1806 struct fib_info *last_resort;
1807 struct fib_alias *fa = NULL;
1808 struct list_head *fa_head;
1817 l = fib_find_node(t, 0);
1821 fa_head = get_fa_head(l, 0);
1825 if (list_empty(fa_head))
1828 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1829 struct fib_info *next_fi = fa->fa_info;
1831 if (fa->fa_scope != res->scope ||
1832 fa->fa_type != RTN_UNICAST)
1835 if (next_fi->fib_priority > res->fi->fib_priority)
1837 if (!next_fi->fib_nh[0].nh_gw ||
1838 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1840 fa->fa_state |= FA_S_ACCESSED;
1843 if (next_fi != res->fi)
1845 } else if (!fib_detect_death(fi, order, &last_resort,
1846 &last_idx, tb->tb_default)) {
1847 fib_result_assign(res, fi);
1848 tb->tb_default = order;
1854 if (order <= 0 || fi == NULL) {
1855 tb->tb_default = -1;
1859 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1861 fib_result_assign(res, fi);
1862 tb->tb_default = order;
1866 fib_result_assign(res, last_resort);
1867 tb->tb_default = last_idx;
1872 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1873 struct fib_table *tb,
1874 struct sk_buff *skb, struct netlink_callback *cb)
1877 struct fib_alias *fa;
1878 __be32 xkey = htonl(key);
1883 /* rcu_read_lock is hold by caller */
1885 list_for_each_entry_rcu(fa, fah, fa_list) {
1891 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1900 fa->fa_info, NLM_F_MULTI) < 0) {
1910 static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1911 struct sk_buff *skb, struct netlink_callback *cb)
1913 struct leaf_info *li;
1914 struct hlist_node *node;
1920 /* rcu_read_lock is hold by caller */
1921 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1930 if (list_empty(&li->falh))
1933 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1944 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
1945 struct netlink_callback *cb)
1948 struct trie *t = (struct trie *) tb->tb_data;
1949 t_key key = cb->args[2];
1950 int count = cb->args[3];
1953 /* Dump starting at last key.
1954 * Note: 0.0.0.0/0 (ie default) is first key.
1957 l = trie_firstleaf(t);
1959 /* Normally, continue from last key, but if that is missing
1960 * fallback to using slow rescan
1962 l = fib_find_node(t, key);
1964 l = trie_leafindex(t, count);
1968 cb->args[2] = l->key;
1969 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1970 cb->args[3] = count;
1976 l = trie_nextleaf(l);
1977 memset(&cb->args[4], 0,
1978 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1980 cb->args[3] = count;
1986 void __init fib_hash_init(void)
1988 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1989 sizeof(struct fib_alias),
1990 0, SLAB_PANIC, NULL);
1992 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1993 max(sizeof(struct leaf),
1994 sizeof(struct leaf_info)),
1995 0, SLAB_PANIC, NULL);
1999 /* Fix more generic FIB names for init later */
2000 struct fib_table *fib_hash_table(u32 id)
2002 struct fib_table *tb;
2005 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
2011 tb->tb_default = -1;
2012 tb->tb_lookup = fn_trie_lookup;
2013 tb->tb_insert = fn_trie_insert;
2014 tb->tb_delete = fn_trie_delete;
2015 tb->tb_flush = fn_trie_flush;
2016 tb->tb_select_default = fn_trie_select_default;
2017 tb->tb_dump = fn_trie_dump;
2019 t = (struct trie *) tb->tb_data;
2020 memset(t, 0, sizeof(*t));
2022 if (id == RT_TABLE_LOCAL)
2023 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
2028 #ifdef CONFIG_PROC_FS
2029 /* Depth first Trie walk iterator */
2030 struct fib_trie_iter {
2031 struct seq_net_private p;
2032 struct trie *trie_local, *trie_main;
2033 struct tnode *tnode;
2039 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2041 struct tnode *tn = iter->tnode;
2042 unsigned cindex = iter->index;
2045 /* A single entry routing table */
2049 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2050 iter->tnode, iter->index, iter->depth);
2052 while (cindex < (1<<tn->bits)) {
2053 struct node *n = tnode_get_child_rcu(tn, cindex);
2058 iter->index = cindex + 1;
2060 /* push down one level */
2061 iter->tnode = (struct tnode *) n;
2071 /* Current node exhausted, pop back up */
2072 p = node_parent_rcu((struct node *)tn);
2074 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2084 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2092 n = rcu_dereference(t->trie);
2099 iter->tnode = (struct tnode *) n;
2114 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2117 struct fib_trie_iter iter;
2119 memset(s, 0, sizeof(*s));
2122 for (n = fib_trie_get_first(&iter, t); n;
2123 n = fib_trie_get_next(&iter)) {
2125 struct leaf *l = (struct leaf *)n;
2126 struct leaf_info *li;
2127 struct hlist_node *tmp;
2130 s->totdepth += iter.depth;
2131 if (iter.depth > s->maxdepth)
2132 s->maxdepth = iter.depth;
2134 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2137 const struct tnode *tn = (const struct tnode *) n;
2141 if (tn->bits < MAX_STAT_DEPTH)
2142 s->nodesizes[tn->bits]++;
2144 for (i = 0; i < (1<<tn->bits); i++)
2153 * This outputs /proc/net/fib_triestats
2155 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2157 unsigned i, max, pointers, bytes, avdepth;
2160 avdepth = stat->totdepth*100 / stat->leaves;
2164 seq_printf(seq, "\tAver depth: %u.%02d\n",
2165 avdepth / 100, avdepth % 100);
2166 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2168 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2169 bytes = sizeof(struct leaf) * stat->leaves;
2171 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2172 bytes += sizeof(struct leaf_info) * stat->prefixes;
2174 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2175 bytes += sizeof(struct tnode) * stat->tnodes;
2177 max = MAX_STAT_DEPTH;
2178 while (max > 0 && stat->nodesizes[max-1] == 0)
2182 for (i = 1; i <= max; i++)
2183 if (stat->nodesizes[i] != 0) {
2184 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2185 pointers += (1<<i) * stat->nodesizes[i];
2187 seq_putc(seq, '\n');
2188 seq_printf(seq, "\tPointers: %u\n", pointers);
2190 bytes += sizeof(struct node *) * pointers;
2191 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2192 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2195 #ifdef CONFIG_IP_FIB_TRIE_STATS
2196 static void trie_show_usage(struct seq_file *seq,
2197 const struct trie_use_stats *stats)
2199 seq_printf(seq, "\nCounters:\n---------\n");
2200 seq_printf(seq, "gets = %u\n", stats->gets);
2201 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2202 seq_printf(seq, "semantic match passed = %u\n",
2203 stats->semantic_match_passed);
2204 seq_printf(seq, "semantic match miss = %u\n",
2205 stats->semantic_match_miss);
2206 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2207 seq_printf(seq, "skipped node resize = %u\n\n",
2208 stats->resize_node_skipped);
2210 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2212 static void fib_trie_show(struct seq_file *seq, const char *name,
2215 struct trie_stat stat;
2217 trie_collect_stats(trie, &stat);
2218 seq_printf(seq, "%s:\n", name);
2219 trie_show_stats(seq, &stat);
2220 #ifdef CONFIG_IP_FIB_TRIE_STATS
2221 trie_show_usage(seq, &trie->stats);
2225 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2227 struct net *net = (struct net *)seq->private;
2228 struct fib_table *tb;
2231 "Basic info: size of leaf:"
2232 " %Zd bytes, size of tnode: %Zd bytes.\n",
2233 sizeof(struct leaf), sizeof(struct tnode));
2235 tb = fib_get_table(net, RT_TABLE_LOCAL);
2237 fib_trie_show(seq, "Local", (struct trie *) tb->tb_data);
2239 tb = fib_get_table(net, RT_TABLE_MAIN);
2241 fib_trie_show(seq, "Main", (struct trie *) tb->tb_data);
2246 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2251 net = get_proc_net(inode);
2254 err = single_open(file, fib_triestat_seq_show, net);
2262 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2264 struct seq_file *seq = f->private_data;
2265 put_net(seq->private);
2266 return single_release(ino, f);
2269 static const struct file_operations fib_triestat_fops = {
2270 .owner = THIS_MODULE,
2271 .open = fib_triestat_seq_open,
2273 .llseek = seq_lseek,
2274 .release = fib_triestat_seq_release,
2277 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2283 for (n = fib_trie_get_first(iter, iter->trie_local);
2284 n; ++idx, n = fib_trie_get_next(iter)) {
2289 for (n = fib_trie_get_first(iter, iter->trie_main);
2290 n; ++idx, n = fib_trie_get_next(iter)) {
2297 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2300 struct fib_trie_iter *iter = seq->private;
2301 struct fib_table *tb;
2303 if (!iter->trie_local) {
2304 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2306 iter->trie_local = (struct trie *) tb->tb_data;
2308 if (!iter->trie_main) {
2309 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2311 iter->trie_main = (struct trie *) tb->tb_data;
2315 return SEQ_START_TOKEN;
2316 return fib_trie_get_idx(iter, *pos - 1);
2319 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2321 struct fib_trie_iter *iter = seq->private;
2325 if (v == SEQ_START_TOKEN)
2326 return fib_trie_get_idx(iter, 0);
2328 v = fib_trie_get_next(iter);
2333 /* continue scan in next trie */
2334 if (iter->trie == iter->trie_local)
2335 return fib_trie_get_first(iter, iter->trie_main);
2340 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2346 static void seq_indent(struct seq_file *seq, int n)
2348 while (n-- > 0) seq_puts(seq, " ");
2351 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2354 case RT_SCOPE_UNIVERSE: return "universe";
2355 case RT_SCOPE_SITE: return "site";
2356 case RT_SCOPE_LINK: return "link";
2357 case RT_SCOPE_HOST: return "host";
2358 case RT_SCOPE_NOWHERE: return "nowhere";
2360 snprintf(buf, len, "scope=%d", s);
2365 static const char *rtn_type_names[__RTN_MAX] = {
2366 [RTN_UNSPEC] = "UNSPEC",
2367 [RTN_UNICAST] = "UNICAST",
2368 [RTN_LOCAL] = "LOCAL",
2369 [RTN_BROADCAST] = "BROADCAST",
2370 [RTN_ANYCAST] = "ANYCAST",
2371 [RTN_MULTICAST] = "MULTICAST",
2372 [RTN_BLACKHOLE] = "BLACKHOLE",
2373 [RTN_UNREACHABLE] = "UNREACHABLE",
2374 [RTN_PROHIBIT] = "PROHIBIT",
2375 [RTN_THROW] = "THROW",
2377 [RTN_XRESOLVE] = "XRESOLVE",
2380 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2382 if (t < __RTN_MAX && rtn_type_names[t])
2383 return rtn_type_names[t];
2384 snprintf(buf, len, "type %u", t);
2388 /* Pretty print the trie */
2389 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2391 const struct fib_trie_iter *iter = seq->private;
2394 if (v == SEQ_START_TOKEN)
2397 if (!node_parent_rcu(n)) {
2398 if (iter->trie == iter->trie_local)
2399 seq_puts(seq, "<local>:\n");
2401 seq_puts(seq, "<main>:\n");
2405 struct tnode *tn = (struct tnode *) n;
2406 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2408 seq_indent(seq, iter->depth-1);
2409 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2410 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2411 tn->empty_children);
2414 struct leaf *l = (struct leaf *) n;
2415 struct leaf_info *li;
2416 struct hlist_node *node;
2417 __be32 val = htonl(l->key);
2419 seq_indent(seq, iter->depth);
2420 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2422 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2423 struct fib_alias *fa;
2425 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2426 char buf1[32], buf2[32];
2428 seq_indent(seq, iter->depth+1);
2429 seq_printf(seq, " /%d %s %s", li->plen,
2430 rtn_scope(buf1, sizeof(buf1),
2432 rtn_type(buf2, sizeof(buf2),
2435 seq_printf(seq, " tos=%d", fa->fa_tos);
2436 seq_putc(seq, '\n');
2444 static const struct seq_operations fib_trie_seq_ops = {
2445 .start = fib_trie_seq_start,
2446 .next = fib_trie_seq_next,
2447 .stop = fib_trie_seq_stop,
2448 .show = fib_trie_seq_show,
2451 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2453 return seq_open_net(inode, file, &fib_trie_seq_ops,
2454 sizeof(struct fib_trie_iter));
2457 static const struct file_operations fib_trie_fops = {
2458 .owner = THIS_MODULE,
2459 .open = fib_trie_seq_open,
2461 .llseek = seq_lseek,
2462 .release = seq_release_net,
2465 struct fib_route_iter {
2466 struct seq_net_private p;
2467 struct trie *main_trie;
2472 static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2474 struct leaf *l = NULL;
2475 struct trie *t = iter->main_trie;
2477 /* use cache location of last found key */
2478 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2482 l = trie_firstleaf(t);
2485 while (l && pos-- > 0) {
2487 l = trie_nextleaf(l);
2491 iter->key = pos; /* remember it */
2493 iter->pos = 0; /* forget it */
2498 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2501 struct fib_route_iter *iter = seq->private;
2502 struct fib_table *tb;
2505 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2509 iter->main_trie = (struct trie *) tb->tb_data;
2511 return SEQ_START_TOKEN;
2513 return fib_route_get_idx(iter, *pos - 1);
2516 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2518 struct fib_route_iter *iter = seq->private;
2522 if (v == SEQ_START_TOKEN) {
2524 l = trie_firstleaf(iter->main_trie);
2527 l = trie_nextleaf(l);
2537 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2543 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2545 static unsigned type2flags[RTN_MAX + 1] = {
2546 [7] = RTF_REJECT, [8] = RTF_REJECT,
2548 unsigned flags = type2flags[type];
2550 if (fi && fi->fib_nh->nh_gw)
2551 flags |= RTF_GATEWAY;
2552 if (mask == htonl(0xFFFFFFFF))
2559 * This outputs /proc/net/route.
2560 * The format of the file is not supposed to be changed
2561 * and needs to be same as fib_hash output to avoid breaking
2564 static int fib_route_seq_show(struct seq_file *seq, void *v)
2567 struct leaf_info *li;
2568 struct hlist_node *node;
2570 if (v == SEQ_START_TOKEN) {
2571 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2572 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2577 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2578 struct fib_alias *fa;
2579 __be32 mask, prefix;
2581 mask = inet_make_mask(li->plen);
2582 prefix = htonl(l->key);
2584 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2585 const struct fib_info *fi = fa->fa_info;
2586 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2589 if (fa->fa_type == RTN_BROADCAST
2590 || fa->fa_type == RTN_MULTICAST)
2594 snprintf(bf, sizeof(bf),
2595 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2596 fi->fib_dev ? fi->fib_dev->name : "*",
2598 fi->fib_nh->nh_gw, flags, 0, 0,
2602 fi->fib_advmss + 40 : 0),
2606 snprintf(bf, sizeof(bf),
2607 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2608 prefix, 0, flags, 0, 0, 0,
2611 seq_printf(seq, "%-127s\n", bf);
2618 static const struct seq_operations fib_route_seq_ops = {
2619 .start = fib_route_seq_start,
2620 .next = fib_route_seq_next,
2621 .stop = fib_route_seq_stop,
2622 .show = fib_route_seq_show,
2625 static int fib_route_seq_open(struct inode *inode, struct file *file)
2627 return seq_open_net(inode, file, &fib_route_seq_ops,
2628 sizeof(struct fib_route_iter));
2631 static const struct file_operations fib_route_fops = {
2632 .owner = THIS_MODULE,
2633 .open = fib_route_seq_open,
2635 .llseek = seq_lseek,
2636 .release = seq_release_net,
2639 int __net_init fib_proc_init(struct net *net)
2641 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2644 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2645 &fib_triestat_fops))
2648 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2654 proc_net_remove(net, "fib_triestat");
2656 proc_net_remove(net, "fib_trie");
2661 void __net_exit fib_proc_exit(struct net *net)
2663 proc_net_remove(net, "fib_trie");
2664 proc_net_remove(net, "fib_triestat");
2665 proc_net_remove(net, "route");
2668 #endif /* CONFIG_PROC_FS */