2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
45 * Substantial contributions to this work comes from:
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
53 #define VERSION "0.407"
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <asm/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/sched.h>
62 #include <linux/string.h>
63 #include <linux/socket.h>
64 #include <linux/sockios.h>
65 #include <linux/errno.h>
67 #include <linux/inet.h>
68 #include <linux/inetdevice.h>
69 #include <linux/netdevice.h>
70 #include <linux/if_arp.h>
71 #include <linux/proc_fs.h>
72 #include <linux/rcupdate.h>
73 #include <linux/skbuff.h>
74 #include <linux/netlink.h>
75 #include <linux/init.h>
76 #include <linux/list.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
88 #define KEYLENGTH (8*sizeof(t_key))
89 #define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l))
90 #define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset))
92 typedef unsigned int t_key;
96 #define NODE_TYPE_MASK 0x1UL
97 #define NODE_PARENT(node) \
98 ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK)))
100 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
102 #define NODE_SET_PARENT(node, ptr) \
103 rcu_assign_pointer((node)->parent, \
104 ((unsigned long)(ptr)) | NODE_TYPE(node))
106 #define IS_TNODE(n) (!(n->parent & T_LEAF))
107 #define IS_LEAF(n) (n->parent & T_LEAF)
111 unsigned long parent;
116 unsigned long parent;
117 struct hlist_head list;
122 struct hlist_node hlist;
125 struct list_head falh;
130 unsigned long parent;
131 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
132 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
133 unsigned short full_children; /* KEYLENGTH bits needed */
134 unsigned short empty_children; /* KEYLENGTH bits needed */
136 struct node *child[0];
139 #ifdef CONFIG_IP_FIB_TRIE_STATS
140 struct trie_use_stats {
142 unsigned int backtrack;
143 unsigned int semantic_match_passed;
144 unsigned int semantic_match_miss;
145 unsigned int null_node_hit;
146 unsigned int resize_node_skipped;
151 unsigned int totdepth;
152 unsigned int maxdepth;
155 unsigned int nullpointers;
156 unsigned int nodesizes[MAX_STAT_DEPTH];
161 #ifdef CONFIG_IP_FIB_TRIE_STATS
162 struct trie_use_stats stats;
165 unsigned int revision;
168 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
169 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
170 static struct node *resize(struct trie *t, struct tnode *tn);
171 static struct tnode *inflate(struct trie *t, struct tnode *tn);
172 static struct tnode *halve(struct trie *t, struct tnode *tn);
173 static void tnode_free(struct tnode *tn);
175 static kmem_cache_t *fn_alias_kmem __read_mostly;
176 static struct trie *trie_local = NULL, *trie_main = NULL;
179 /* rcu_read_lock needs to be hold by caller from readside */
181 static inline struct node *tnode_get_child(struct tnode *tn, int i)
183 BUG_ON(i >= 1 << tn->bits);
185 return rcu_dereference(tn->child[i]);
188 static inline int tnode_child_length(const struct tnode *tn)
190 return 1 << tn->bits;
193 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
195 if (offset < KEYLENGTH)
196 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
201 static inline int tkey_equals(t_key a, t_key b)
206 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
208 if (bits == 0 || offset >= KEYLENGTH)
210 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
211 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
214 static inline int tkey_mismatch(t_key a, int offset, t_key b)
221 while ((diff << i) >> (KEYLENGTH-1) == 0)
227 To understand this stuff, an understanding of keys and all their bits is
228 necessary. Every node in the trie has a key associated with it, but not
229 all of the bits in that key are significant.
231 Consider a node 'n' and its parent 'tp'.
233 If n is a leaf, every bit in its key is significant. Its presence is
234 necessitated by path compression, since during a tree traversal (when
235 searching for a leaf - unless we are doing an insertion) we will completely
236 ignore all skipped bits we encounter. Thus we need to verify, at the end of
237 a potentially successful search, that we have indeed been walking the
240 Note that we can never "miss" the correct key in the tree if present by
241 following the wrong path. Path compression ensures that segments of the key
242 that are the same for all keys with a given prefix are skipped, but the
243 skipped part *is* identical for each node in the subtrie below the skipped
244 bit! trie_insert() in this implementation takes care of that - note the
245 call to tkey_sub_equals() in trie_insert().
247 if n is an internal node - a 'tnode' here, the various parts of its key
248 have many different meanings.
251 _________________________________________________________________
252 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
253 -----------------------------------------------------------------
254 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
256 _________________________________________________________________
257 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
258 -----------------------------------------------------------------
259 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
266 First, let's just ignore the bits that come before the parent tp, that is
267 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
268 not use them for anything.
270 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
271 index into the parent's child array. That is, they will be used to find
272 'n' among tp's children.
274 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
277 All the bits we have seen so far are significant to the node n. The rest
278 of the bits are really not needed or indeed known in n->key.
280 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
281 n's child array, and will of course be different for each child.
284 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
289 static inline void check_tnode(const struct tnode *tn)
291 WARN_ON(tn && tn->pos+tn->bits > 32);
294 static int halve_threshold = 25;
295 static int inflate_threshold = 50;
296 static int halve_threshold_root = 15;
297 static int inflate_threshold_root = 25;
300 static void __alias_free_mem(struct rcu_head *head)
302 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
303 kmem_cache_free(fn_alias_kmem, fa);
306 static inline void alias_free_mem_rcu(struct fib_alias *fa)
308 call_rcu(&fa->rcu, __alias_free_mem);
311 static void __leaf_free_rcu(struct rcu_head *head)
313 kfree(container_of(head, struct leaf, rcu));
316 static void __leaf_info_free_rcu(struct rcu_head *head)
318 kfree(container_of(head, struct leaf_info, rcu));
321 static inline void free_leaf_info(struct leaf_info *leaf)
323 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
326 static struct tnode *tnode_alloc(unsigned int size)
330 if (size <= PAGE_SIZE)
331 return kcalloc(size, 1, GFP_KERNEL);
333 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
337 return page_address(pages);
340 static void __tnode_free_rcu(struct rcu_head *head)
342 struct tnode *tn = container_of(head, struct tnode, rcu);
343 unsigned int size = sizeof(struct tnode) +
344 (1 << tn->bits) * sizeof(struct node *);
346 if (size <= PAGE_SIZE)
349 free_pages((unsigned long)tn, get_order(size));
352 static inline void tnode_free(struct tnode *tn)
355 struct leaf *l = (struct leaf *) tn;
356 call_rcu_bh(&l->rcu, __leaf_free_rcu);
359 call_rcu(&tn->rcu, __tnode_free_rcu);
362 static struct leaf *leaf_new(void)
364 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
367 INIT_HLIST_HEAD(&l->list);
372 static struct leaf_info *leaf_info_new(int plen)
374 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
377 INIT_LIST_HEAD(&li->falh);
382 static struct tnode* tnode_new(t_key key, int pos, int bits)
384 int nchildren = 1<<bits;
385 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
386 struct tnode *tn = tnode_alloc(sz);
390 tn->parent = T_TNODE;
394 tn->full_children = 0;
395 tn->empty_children = 1<<bits;
398 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
399 (unsigned int) (sizeof(struct node) * 1<<bits));
404 * Check whether a tnode 'n' is "full", i.e. it is an internal node
405 * and no bits are skipped. See discussion in dyntree paper p. 6
408 static inline int tnode_full(const struct tnode *tn, const struct node *n)
410 if (n == NULL || IS_LEAF(n))
413 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
416 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
418 tnode_put_child_reorg(tn, i, n, -1);
422 * Add a child at position i overwriting the old value.
423 * Update the value of full_children and empty_children.
426 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
428 struct node *chi = tn->child[i];
431 BUG_ON(i >= 1<<tn->bits);
434 /* update emptyChildren */
435 if (n == NULL && chi != NULL)
436 tn->empty_children++;
437 else if (n != NULL && chi == NULL)
438 tn->empty_children--;
440 /* update fullChildren */
442 wasfull = tnode_full(tn, chi);
444 isfull = tnode_full(tn, n);
445 if (wasfull && !isfull)
447 else if (!wasfull && isfull)
451 NODE_SET_PARENT(n, tn);
453 rcu_assign_pointer(tn->child[i], n);
456 static struct node *resize(struct trie *t, struct tnode *tn)
460 struct tnode *old_tn;
461 int inflate_threshold_use;
462 int halve_threshold_use;
467 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
468 tn, inflate_threshold, halve_threshold);
471 if (tn->empty_children == tnode_child_length(tn)) {
476 if (tn->empty_children == tnode_child_length(tn) - 1)
477 for (i = 0; i < tnode_child_length(tn); i++) {
484 /* compress one level */
485 NODE_SET_PARENT(n, NULL);
490 * Double as long as the resulting node has a number of
491 * nonempty nodes that are above the threshold.
495 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
496 * the Helsinki University of Technology and Matti Tikkanen of Nokia
497 * Telecommunications, page 6:
498 * "A node is doubled if the ratio of non-empty children to all
499 * children in the *doubled* node is at least 'high'."
501 * 'high' in this instance is the variable 'inflate_threshold'. It
502 * is expressed as a percentage, so we multiply it with
503 * tnode_child_length() and instead of multiplying by 2 (since the
504 * child array will be doubled by inflate()) and multiplying
505 * the left-hand side by 100 (to handle the percentage thing) we
506 * multiply the left-hand side by 50.
508 * The left-hand side may look a bit weird: tnode_child_length(tn)
509 * - tn->empty_children is of course the number of non-null children
510 * in the current node. tn->full_children is the number of "full"
511 * children, that is non-null tnodes with a skip value of 0.
512 * All of those will be doubled in the resulting inflated tnode, so
513 * we just count them one extra time here.
515 * A clearer way to write this would be:
517 * to_be_doubled = tn->full_children;
518 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
521 * new_child_length = tnode_child_length(tn) * 2;
523 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
525 * if (new_fill_factor >= inflate_threshold)
527 * ...and so on, tho it would mess up the while () loop.
530 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
534 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
535 * inflate_threshold * new_child_length
537 * expand not_to_be_doubled and to_be_doubled, and shorten:
538 * 100 * (tnode_child_length(tn) - tn->empty_children +
539 * tn->full_children) >= inflate_threshold * new_child_length
541 * expand new_child_length:
542 * 100 * (tnode_child_length(tn) - tn->empty_children +
543 * tn->full_children) >=
544 * inflate_threshold * tnode_child_length(tn) * 2
547 * 50 * (tn->full_children + tnode_child_length(tn) -
548 * tn->empty_children) >= inflate_threshold *
549 * tnode_child_length(tn)
555 /* Keep root node larger */
558 inflate_threshold_use = inflate_threshold_root;
560 inflate_threshold_use = inflate_threshold;
563 while ((tn->full_children > 0 &&
564 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
565 inflate_threshold_use * tnode_child_length(tn))) {
571 #ifdef CONFIG_IP_FIB_TRIE_STATS
572 t->stats.resize_node_skipped++;
581 * Halve as long as the number of empty children in this
582 * node is above threshold.
586 /* Keep root node larger */
589 halve_threshold_use = halve_threshold_root;
591 halve_threshold_use = halve_threshold;
594 while (tn->bits > 1 &&
595 100 * (tnode_child_length(tn) - tn->empty_children) <
596 halve_threshold_use * tnode_child_length(tn)) {
602 #ifdef CONFIG_IP_FIB_TRIE_STATS
603 t->stats.resize_node_skipped++;
610 /* Only one child remains */
611 if (tn->empty_children == tnode_child_length(tn) - 1)
612 for (i = 0; i < tnode_child_length(tn); i++) {
619 /* compress one level */
621 NODE_SET_PARENT(n, NULL);
626 return (struct node *) tn;
629 static struct tnode *inflate(struct trie *t, struct tnode *tn)
632 struct tnode *oldtnode = tn;
633 int olen = tnode_child_length(tn);
636 pr_debug("In inflate\n");
638 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
641 return ERR_PTR(-ENOMEM);
644 * Preallocate and store tnodes before the actual work so we
645 * don't get into an inconsistent state if memory allocation
646 * fails. In case of failure we return the oldnode and inflate
647 * of tnode is ignored.
650 for (i = 0; i < olen; i++) {
651 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
655 inode->pos == oldtnode->pos + oldtnode->bits &&
657 struct tnode *left, *right;
658 t_key m = TKEY_GET_MASK(inode->pos, 1);
660 left = tnode_new(inode->key&(~m), inode->pos + 1,
665 right = tnode_new(inode->key|m, inode->pos + 1,
673 put_child(t, tn, 2*i, (struct node *) left);
674 put_child(t, tn, 2*i+1, (struct node *) right);
678 for (i = 0; i < olen; i++) {
679 struct node *node = tnode_get_child(oldtnode, i);
680 struct tnode *left, *right;
687 /* A leaf or an internal node with skipped bits */
689 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
690 tn->pos + tn->bits - 1) {
691 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
693 put_child(t, tn, 2*i, node);
695 put_child(t, tn, 2*i+1, node);
699 /* An internal node with two children */
700 inode = (struct tnode *) node;
702 if (inode->bits == 1) {
703 put_child(t, tn, 2*i, inode->child[0]);
704 put_child(t, tn, 2*i+1, inode->child[1]);
710 /* An internal node with more than two children */
712 /* We will replace this node 'inode' with two new
713 * ones, 'left' and 'right', each with half of the
714 * original children. The two new nodes will have
715 * a position one bit further down the key and this
716 * means that the "significant" part of their keys
717 * (see the discussion near the top of this file)
718 * will differ by one bit, which will be "0" in
719 * left's key and "1" in right's key. Since we are
720 * moving the key position by one step, the bit that
721 * we are moving away from - the bit at position
722 * (inode->pos) - is the one that will differ between
723 * left and right. So... we synthesize that bit in the
725 * The mask 'm' below will be a single "one" bit at
726 * the position (inode->pos)
729 /* Use the old key, but set the new significant
733 left = (struct tnode *) tnode_get_child(tn, 2*i);
734 put_child(t, tn, 2*i, NULL);
738 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
739 put_child(t, tn, 2*i+1, NULL);
743 size = tnode_child_length(left);
744 for (j = 0; j < size; j++) {
745 put_child(t, left, j, inode->child[j]);
746 put_child(t, right, j, inode->child[j + size]);
748 put_child(t, tn, 2*i, resize(t, left));
749 put_child(t, tn, 2*i+1, resize(t, right));
753 tnode_free(oldtnode);
757 int size = tnode_child_length(tn);
760 for (j = 0; j < size; j++)
762 tnode_free((struct tnode *)tn->child[j]);
766 return ERR_PTR(-ENOMEM);
770 static struct tnode *halve(struct trie *t, struct tnode *tn)
772 struct tnode *oldtnode = tn;
773 struct node *left, *right;
775 int olen = tnode_child_length(tn);
777 pr_debug("In halve\n");
779 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
782 return ERR_PTR(-ENOMEM);
785 * Preallocate and store tnodes before the actual work so we
786 * don't get into an inconsistent state if memory allocation
787 * fails. In case of failure we return the oldnode and halve
788 * of tnode is ignored.
791 for (i = 0; i < olen; i += 2) {
792 left = tnode_get_child(oldtnode, i);
793 right = tnode_get_child(oldtnode, i+1);
795 /* Two nonempty children */
799 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
804 put_child(t, tn, i/2, (struct node *)newn);
809 for (i = 0; i < olen; i += 2) {
810 struct tnode *newBinNode;
812 left = tnode_get_child(oldtnode, i);
813 right = tnode_get_child(oldtnode, i+1);
815 /* At least one of the children is empty */
817 if (right == NULL) /* Both are empty */
819 put_child(t, tn, i/2, right);
824 put_child(t, tn, i/2, left);
828 /* Two nonempty children */
829 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
830 put_child(t, tn, i/2, NULL);
831 put_child(t, newBinNode, 0, left);
832 put_child(t, newBinNode, 1, right);
833 put_child(t, tn, i/2, resize(t, newBinNode));
835 tnode_free(oldtnode);
839 int size = tnode_child_length(tn);
842 for (j = 0; j < size; j++)
844 tnode_free((struct tnode *)tn->child[j]);
848 return ERR_PTR(-ENOMEM);
852 static void trie_init(struct trie *t)
858 rcu_assign_pointer(t->trie, NULL);
860 #ifdef CONFIG_IP_FIB_TRIE_STATS
861 memset(&t->stats, 0, sizeof(struct trie_use_stats));
865 /* readside must use rcu_read_lock currently dump routines
866 via get_fa_head and dump */
868 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
870 struct hlist_head *head = &l->list;
871 struct hlist_node *node;
872 struct leaf_info *li;
874 hlist_for_each_entry_rcu(li, node, head, hlist)
875 if (li->plen == plen)
881 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
883 struct leaf_info *li = find_leaf_info(l, plen);
891 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
893 struct leaf_info *li = NULL, *last = NULL;
894 struct hlist_node *node;
896 if (hlist_empty(head)) {
897 hlist_add_head_rcu(&new->hlist, head);
899 hlist_for_each_entry(li, node, head, hlist) {
900 if (new->plen > li->plen)
906 hlist_add_after_rcu(&last->hlist, &new->hlist);
908 hlist_add_before_rcu(&new->hlist, &li->hlist);
912 /* rcu_read_lock needs to be hold by caller from readside */
915 fib_find_node(struct trie *t, u32 key)
922 n = rcu_dereference(t->trie);
924 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
925 tn = (struct tnode *) n;
929 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
930 pos = tn->pos + tn->bits;
931 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
935 /* Case we have found a leaf. Compare prefixes */
937 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
938 return (struct leaf *)n;
943 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
947 struct tnode *tp = NULL;
951 while (tn != NULL && NODE_PARENT(tn) != NULL) {
953 tp = NODE_PARENT(tn);
954 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
955 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
956 tn = (struct tnode *) resize (t, (struct tnode *)tn);
957 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
959 if (!NODE_PARENT(tn))
962 tn = NODE_PARENT(tn);
964 /* Handle last (top) tnode */
966 tn = (struct tnode*) resize(t, (struct tnode *)tn);
968 return (struct node*) tn;
971 /* only used from updater-side */
973 static struct list_head *
974 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
977 struct tnode *tp = NULL, *tn = NULL;
981 struct list_head *fa_head = NULL;
982 struct leaf_info *li;
988 /* If we point to NULL, stop. Either the tree is empty and we should
989 * just put a new leaf in if, or we have reached an empty child slot,
990 * and we should just put our new leaf in that.
991 * If we point to a T_TNODE, check if it matches our key. Note that
992 * a T_TNODE might be skipping any number of bits - its 'pos' need
993 * not be the parent's 'pos'+'bits'!
995 * If it does match the current key, get pos/bits from it, extract
996 * the index from our key, push the T_TNODE and walk the tree.
998 * If it doesn't, we have to replace it with a new T_TNODE.
1000 * If we point to a T_LEAF, it might or might not have the same key
1001 * as we do. If it does, just change the value, update the T_LEAF's
1002 * value, and return it.
1003 * If it doesn't, we need to replace it with a T_TNODE.
1006 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1007 tn = (struct tnode *) n;
1011 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1013 pos = tn->pos + tn->bits;
1014 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1016 BUG_ON(n && NODE_PARENT(n) != tn);
1022 * n ----> NULL, LEAF or TNODE
1024 * tp is n's (parent) ----> NULL or TNODE
1027 BUG_ON(tp && IS_LEAF(tp));
1029 /* Case 1: n is a leaf. Compare prefixes */
1031 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1032 struct leaf *l = (struct leaf *) n;
1034 li = leaf_info_new(plen);
1041 fa_head = &li->falh;
1042 insert_leaf_info(&l->list, li);
1054 li = leaf_info_new(plen);
1057 tnode_free((struct tnode *) l);
1062 fa_head = &li->falh;
1063 insert_leaf_info(&l->list, li);
1065 if (t->trie && n == NULL) {
1066 /* Case 2: n is NULL, and will just insert a new leaf */
1068 NODE_SET_PARENT(l, tp);
1070 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1071 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1073 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1075 * Add a new tnode here
1076 * first tnode need some special handling
1080 pos = tp->pos+tp->bits;
1085 newpos = tkey_mismatch(key, pos, n->key);
1086 tn = tnode_new(n->key, newpos, 1);
1089 tn = tnode_new(key, newpos, 1); /* First tnode */
1094 tnode_free((struct tnode *) l);
1099 NODE_SET_PARENT(tn, tp);
1101 missbit = tkey_extract_bits(key, newpos, 1);
1102 put_child(t, tn, missbit, (struct node *)l);
1103 put_child(t, tn, 1-missbit, n);
1106 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1107 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1109 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1114 if (tp && tp->pos + tp->bits > 32)
1115 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1116 tp, tp->pos, tp->bits, key, plen);
1118 /* Rebalance the trie */
1120 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1127 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1129 struct trie *t = (struct trie *) tb->tb_data;
1130 struct fib_alias *fa, *new_fa;
1131 struct list_head *fa_head = NULL;
1132 struct fib_info *fi;
1133 int plen = cfg->fc_dst_len;
1134 u8 tos = cfg->fc_tos;
1142 key = ntohl(cfg->fc_dst);
1144 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1146 mask = ntohl(inet_make_mask(plen));
1153 fi = fib_create_info(cfg);
1159 l = fib_find_node(t, key);
1163 fa_head = get_fa_head(l, plen);
1164 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1167 /* Now fa, if non-NULL, points to the first fib alias
1168 * with the same keys [prefix,tos,priority], if such key already
1169 * exists or to the node before which we will insert new one.
1171 * If fa is NULL, we will need to allocate a new one and
1172 * insert to the head of f.
1174 * If f is NULL, no fib node matched the destination key
1175 * and we need to allocate a new one of those as well.
1178 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1179 struct fib_alias *fa_orig;
1182 if (cfg->fc_nlflags & NLM_F_EXCL)
1185 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1186 struct fib_info *fi_drop;
1190 new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
1194 fi_drop = fa->fa_info;
1195 new_fa->fa_tos = fa->fa_tos;
1196 new_fa->fa_info = fi;
1197 new_fa->fa_type = cfg->fc_type;
1198 new_fa->fa_scope = cfg->fc_scope;
1199 state = fa->fa_state;
1200 new_fa->fa_state &= ~FA_S_ACCESSED;
1202 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1203 alias_free_mem_rcu(fa);
1205 fib_release_info(fi_drop);
1206 if (state & FA_S_ACCESSED)
1211 /* Error if we find a perfect match which
1212 * uses the same scope, type, and nexthop
1216 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1217 if (fa->fa_tos != tos)
1219 if (fa->fa_info->fib_priority != fi->fib_priority)
1221 if (fa->fa_type == cfg->fc_type &&
1222 fa->fa_scope == cfg->fc_scope &&
1223 fa->fa_info == fi) {
1227 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1231 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1235 new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
1239 new_fa->fa_info = fi;
1240 new_fa->fa_tos = tos;
1241 new_fa->fa_type = cfg->fc_type;
1242 new_fa->fa_scope = cfg->fc_scope;
1243 new_fa->fa_state = 0;
1245 * Insert new entry to the list.
1250 fa_head = fib_insert_node(t, &err, key, plen);
1252 goto out_free_new_fa;
1255 list_add_tail_rcu(&new_fa->fa_list,
1256 (fa ? &fa->fa_list : fa_head));
1259 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1265 kmem_cache_free(fn_alias_kmem, new_fa);
1267 fib_release_info(fi);
1273 /* should be called with rcu_read_lock */
1274 static inline int check_leaf(struct trie *t, struct leaf *l,
1275 t_key key, int *plen, const struct flowi *flp,
1276 struct fib_result *res)
1280 struct leaf_info *li;
1281 struct hlist_head *hhead = &l->list;
1282 struct hlist_node *node;
1284 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1286 mask = inet_make_mask(i);
1287 if (l->key != (key & ntohl(mask)))
1290 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1292 #ifdef CONFIG_IP_FIB_TRIE_STATS
1293 t->stats.semantic_match_passed++;
1297 #ifdef CONFIG_IP_FIB_TRIE_STATS
1298 t->stats.semantic_match_miss++;
1305 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1307 struct trie *t = (struct trie *) tb->tb_data;
1312 t_key key = ntohl(flp->fl4_dst);
1315 int current_prefix_length = KEYLENGTH;
1317 t_key node_prefix, key_prefix, pref_mismatch;
1322 n = rcu_dereference(t->trie);
1326 #ifdef CONFIG_IP_FIB_TRIE_STATS
1332 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1336 pn = (struct tnode *) n;
1344 cindex = tkey_extract_bits(MASK_PFX(key, current_prefix_length), pos, bits);
1346 n = tnode_get_child(pn, cindex);
1349 #ifdef CONFIG_IP_FIB_TRIE_STATS
1350 t->stats.null_node_hit++;
1356 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1364 cn = (struct tnode *)n;
1367 * It's a tnode, and we can do some extra checks here if we
1368 * like, to avoid descending into a dead-end branch.
1369 * This tnode is in the parent's child array at index
1370 * key[p_pos..p_pos+p_bits] but potentially with some bits
1371 * chopped off, so in reality the index may be just a
1372 * subprefix, padded with zero at the end.
1373 * We can also take a look at any skipped bits in this
1374 * tnode - everything up to p_pos is supposed to be ok,
1375 * and the non-chopped bits of the index (se previous
1376 * paragraph) are also guaranteed ok, but the rest is
1377 * considered unknown.
1379 * The skipped bits are key[pos+bits..cn->pos].
1382 /* If current_prefix_length < pos+bits, we are already doing
1383 * actual prefix matching, which means everything from
1384 * pos+(bits-chopped_off) onward must be zero along some
1385 * branch of this subtree - otherwise there is *no* valid
1386 * prefix present. Here we can only check the skipped
1387 * bits. Remember, since we have already indexed into the
1388 * parent's child array, we know that the bits we chopped of
1392 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1394 if (current_prefix_length < pos+bits) {
1395 if (tkey_extract_bits(cn->key, current_prefix_length,
1396 cn->pos - current_prefix_length) != 0 ||
1402 * If chopped_off=0, the index is fully validated and we
1403 * only need to look at the skipped bits for this, the new,
1404 * tnode. What we actually want to do is to find out if
1405 * these skipped bits match our key perfectly, or if we will
1406 * have to count on finding a matching prefix further down,
1407 * because if we do, we would like to have some way of
1408 * verifying the existence of such a prefix at this point.
1411 /* The only thing we can do at this point is to verify that
1412 * any such matching prefix can indeed be a prefix to our
1413 * key, and if the bits in the node we are inspecting that
1414 * do not match our key are not ZERO, this cannot be true.
1415 * Thus, find out where there is a mismatch (before cn->pos)
1416 * and verify that all the mismatching bits are zero in the
1420 /* Note: We aren't very concerned about the piece of the key
1421 * that precede pn->pos+pn->bits, since these have already been
1422 * checked. The bits after cn->pos aren't checked since these are
1423 * by definition "unknown" at this point. Thus, what we want to
1424 * see is if we are about to enter the "prefix matching" state,
1425 * and in that case verify that the skipped bits that will prevail
1426 * throughout this subtree are zero, as they have to be if we are
1427 * to find a matching prefix.
1430 node_prefix = MASK_PFX(cn->key, cn->pos);
1431 key_prefix = MASK_PFX(key, cn->pos);
1432 pref_mismatch = key_prefix^node_prefix;
1435 /* In short: If skipped bits in this node do not match the search
1436 * key, enter the "prefix matching" state.directly.
1438 if (pref_mismatch) {
1439 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1441 pref_mismatch = pref_mismatch <<1;
1443 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1445 if (key_prefix != 0)
1448 if (current_prefix_length >= cn->pos)
1449 current_prefix_length = mp;
1452 pn = (struct tnode *)n; /* Descend */
1459 /* As zero don't change the child key (cindex) */
1460 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1463 /* Decrease current_... with bits chopped off */
1464 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1465 current_prefix_length = pn->pos + pn->bits - chopped_off;
1468 * Either we do the actual chop off according or if we have
1469 * chopped off all bits in this tnode walk up to our parent.
1472 if (chopped_off <= pn->bits) {
1473 cindex &= ~(1 << (chopped_off-1));
1475 if (NODE_PARENT(pn) == NULL)
1478 /* Get Child's index */
1479 cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits);
1480 pn = NODE_PARENT(pn);
1483 #ifdef CONFIG_IP_FIB_TRIE_STATS
1484 t->stats.backtrack++;
1496 /* only called from updater side */
1497 static int trie_leaf_remove(struct trie *t, t_key key)
1500 struct tnode *tp = NULL;
1501 struct node *n = t->trie;
1504 pr_debug("entering trie_leaf_remove(%p)\n", n);
1506 /* Note that in the case skipped bits, those bits are *not* checked!
1507 * When we finish this, we will have NULL or a T_LEAF, and the
1508 * T_LEAF may or may not match our key.
1511 while (n != NULL && IS_TNODE(n)) {
1512 struct tnode *tn = (struct tnode *) n;
1514 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1516 BUG_ON(n && NODE_PARENT(n) != tn);
1518 l = (struct leaf *) n;
1520 if (!n || !tkey_equals(l->key, key))
1525 * Remove the leaf and rebalance the tree
1532 tp = NODE_PARENT(n);
1533 tnode_free((struct tnode *) n);
1536 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1537 put_child(t, (struct tnode *)tp, cindex, NULL);
1538 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1540 rcu_assign_pointer(t->trie, NULL);
1546 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1548 struct trie *t = (struct trie *) tb->tb_data;
1550 int plen = cfg->fc_dst_len;
1551 u8 tos = cfg->fc_tos;
1552 struct fib_alias *fa, *fa_to_delete;
1553 struct list_head *fa_head;
1555 struct leaf_info *li;
1560 key = ntohl(cfg->fc_dst);
1561 mask = ntohl(inet_make_mask(plen));
1567 l = fib_find_node(t, key);
1572 fa_head = get_fa_head(l, plen);
1573 fa = fib_find_alias(fa_head, tos, 0);
1578 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1580 fa_to_delete = NULL;
1581 fa_head = fa->fa_list.prev;
1583 list_for_each_entry(fa, fa_head, fa_list) {
1584 struct fib_info *fi = fa->fa_info;
1586 if (fa->fa_tos != tos)
1589 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1590 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1591 fa->fa_scope == cfg->fc_scope) &&
1592 (!cfg->fc_protocol ||
1593 fi->fib_protocol == cfg->fc_protocol) &&
1594 fib_nh_match(cfg, fi) == 0) {
1604 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1607 l = fib_find_node(t, key);
1608 li = find_leaf_info(l, plen);
1610 list_del_rcu(&fa->fa_list);
1612 if (list_empty(fa_head)) {
1613 hlist_del_rcu(&li->hlist);
1617 if (hlist_empty(&l->list))
1618 trie_leaf_remove(t, key);
1620 if (fa->fa_state & FA_S_ACCESSED)
1623 fib_release_info(fa->fa_info);
1624 alias_free_mem_rcu(fa);
1628 static int trie_flush_list(struct trie *t, struct list_head *head)
1630 struct fib_alias *fa, *fa_node;
1633 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1634 struct fib_info *fi = fa->fa_info;
1636 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1637 list_del_rcu(&fa->fa_list);
1638 fib_release_info(fa->fa_info);
1639 alias_free_mem_rcu(fa);
1646 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1649 struct hlist_head *lih = &l->list;
1650 struct hlist_node *node, *tmp;
1651 struct leaf_info *li = NULL;
1653 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1654 found += trie_flush_list(t, &li->falh);
1656 if (list_empty(&li->falh)) {
1657 hlist_del_rcu(&li->hlist);
1664 /* rcu_read_lock needs to be hold by caller from readside */
1666 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1668 struct node *c = (struct node *) thisleaf;
1671 struct node *trie = rcu_dereference(t->trie);
1677 if (IS_LEAF(trie)) /* trie w. just a leaf */
1678 return (struct leaf *) trie;
1680 p = (struct tnode*) trie; /* Start */
1682 p = (struct tnode *) NODE_PARENT(c);
1687 /* Find the next child of the parent */
1689 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1693 last = 1 << p->bits;
1694 for (idx = pos; idx < last ; idx++) {
1695 c = rcu_dereference(p->child[idx]);
1700 /* Decend if tnode */
1701 while (IS_TNODE(c)) {
1702 p = (struct tnode *) c;
1705 /* Rightmost non-NULL branch */
1706 if (p && IS_TNODE(p))
1707 while (!(c = rcu_dereference(p->child[idx]))
1708 && idx < (1<<p->bits)) idx++;
1710 /* Done with this tnode? */
1711 if (idx >= (1 << p->bits) || !c)
1714 return (struct leaf *) c;
1717 /* No more children go up one step */
1718 c = (struct node *) p;
1719 p = (struct tnode *) NODE_PARENT(p);
1721 return NULL; /* Ready. Root of trie */
1724 static int fn_trie_flush(struct fib_table *tb)
1726 struct trie *t = (struct trie *) tb->tb_data;
1727 struct leaf *ll = NULL, *l = NULL;
1732 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1733 found += trie_flush_leaf(t, l);
1735 if (ll && hlist_empty(&ll->list))
1736 trie_leaf_remove(t, ll->key);
1740 if (ll && hlist_empty(&ll->list))
1741 trie_leaf_remove(t, ll->key);
1743 pr_debug("trie_flush found=%d\n", found);
1747 static int trie_last_dflt = -1;
1750 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1752 struct trie *t = (struct trie *) tb->tb_data;
1753 int order, last_idx;
1754 struct fib_info *fi = NULL;
1755 struct fib_info *last_resort;
1756 struct fib_alias *fa = NULL;
1757 struct list_head *fa_head;
1766 l = fib_find_node(t, 0);
1770 fa_head = get_fa_head(l, 0);
1774 if (list_empty(fa_head))
1777 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1778 struct fib_info *next_fi = fa->fa_info;
1780 if (fa->fa_scope != res->scope ||
1781 fa->fa_type != RTN_UNICAST)
1784 if (next_fi->fib_priority > res->fi->fib_priority)
1786 if (!next_fi->fib_nh[0].nh_gw ||
1787 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1789 fa->fa_state |= FA_S_ACCESSED;
1792 if (next_fi != res->fi)
1794 } else if (!fib_detect_death(fi, order, &last_resort,
1795 &last_idx, &trie_last_dflt)) {
1797 fib_info_put(res->fi);
1799 atomic_inc(&fi->fib_clntref);
1800 trie_last_dflt = order;
1806 if (order <= 0 || fi == NULL) {
1807 trie_last_dflt = -1;
1811 if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
1813 fib_info_put(res->fi);
1815 atomic_inc(&fi->fib_clntref);
1816 trie_last_dflt = order;
1819 if (last_idx >= 0) {
1821 fib_info_put(res->fi);
1822 res->fi = last_resort;
1824 atomic_inc(&last_resort->fib_clntref);
1826 trie_last_dflt = last_idx;
1831 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1832 struct sk_buff *skb, struct netlink_callback *cb)
1835 struct fib_alias *fa;
1837 __be32 xkey = htonl(key);
1842 /* rcu_read_lock is hold by caller */
1844 list_for_each_entry_rcu(fa, fah, fa_list) {
1849 BUG_ON(!fa->fa_info);
1851 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1860 fa->fa_info, 0) < 0) {
1870 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1871 struct netlink_callback *cb)
1874 struct list_head *fa_head;
1875 struct leaf *l = NULL;
1879 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1883 memset(&cb->args[4], 0,
1884 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1886 fa_head = get_fa_head(l, plen);
1891 if (list_empty(fa_head))
1894 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1903 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1906 struct trie *t = (struct trie *) tb->tb_data;
1911 for (m = 0; m <= 32; m++) {
1915 memset(&cb->args[3], 0,
1916 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1918 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1931 /* Fix more generic FIB names for init later */
1933 #ifdef CONFIG_IP_MULTIPLE_TABLES
1934 struct fib_table * fib_hash_init(u32 id)
1936 struct fib_table * __init fib_hash_init(u32 id)
1939 struct fib_table *tb;
1942 if (fn_alias_kmem == NULL)
1943 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1944 sizeof(struct fib_alias),
1945 0, SLAB_HWCACHE_ALIGN,
1948 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1954 tb->tb_lookup = fn_trie_lookup;
1955 tb->tb_insert = fn_trie_insert;
1956 tb->tb_delete = fn_trie_delete;
1957 tb->tb_flush = fn_trie_flush;
1958 tb->tb_select_default = fn_trie_select_default;
1959 tb->tb_dump = fn_trie_dump;
1960 memset(tb->tb_data, 0, sizeof(struct trie));
1962 t = (struct trie *) tb->tb_data;
1966 if (id == RT_TABLE_LOCAL)
1968 else if (id == RT_TABLE_MAIN)
1971 if (id == RT_TABLE_LOCAL)
1972 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1977 #ifdef CONFIG_PROC_FS
1978 /* Depth first Trie walk iterator */
1979 struct fib_trie_iter {
1980 struct tnode *tnode;
1986 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
1988 struct tnode *tn = iter->tnode;
1989 unsigned cindex = iter->index;
1992 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1993 iter->tnode, iter->index, iter->depth);
1995 while (cindex < (1<<tn->bits)) {
1996 struct node *n = tnode_get_child(tn, cindex);
2001 iter->index = cindex + 1;
2003 /* push down one level */
2004 iter->tnode = (struct tnode *) n;
2014 /* Current node exhausted, pop back up */
2015 p = NODE_PARENT(tn);
2017 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2027 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2035 n = rcu_dereference(t->trie);
2040 if (n && IS_TNODE(n)) {
2041 iter->tnode = (struct tnode *) n;
2050 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2053 struct fib_trie_iter iter;
2055 memset(s, 0, sizeof(*s));
2058 for (n = fib_trie_get_first(&iter, t); n;
2059 n = fib_trie_get_next(&iter)) {
2062 s->totdepth += iter.depth;
2063 if (iter.depth > s->maxdepth)
2064 s->maxdepth = iter.depth;
2066 const struct tnode *tn = (const struct tnode *) n;
2070 if(tn->bits < MAX_STAT_DEPTH)
2071 s->nodesizes[tn->bits]++;
2073 for (i = 0; i < (1<<tn->bits); i++)
2082 * This outputs /proc/net/fib_triestats
2084 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2086 unsigned i, max, pointers, bytes, avdepth;
2089 avdepth = stat->totdepth*100 / stat->leaves;
2093 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2094 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2096 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2098 bytes = sizeof(struct leaf) * stat->leaves;
2099 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2100 bytes += sizeof(struct tnode) * stat->tnodes;
2102 max = MAX_STAT_DEPTH;
2103 while (max > 0 && stat->nodesizes[max-1] == 0)
2107 for (i = 1; i <= max; i++)
2108 if (stat->nodesizes[i] != 0) {
2109 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2110 pointers += (1<<i) * stat->nodesizes[i];
2112 seq_putc(seq, '\n');
2113 seq_printf(seq, "\tPointers: %d\n", pointers);
2115 bytes += sizeof(struct node *) * pointers;
2116 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2117 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2119 #ifdef CONFIG_IP_FIB_TRIE_STATS
2120 seq_printf(seq, "Counters:\n---------\n");
2121 seq_printf(seq,"gets = %d\n", t->stats.gets);
2122 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2123 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2124 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2125 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2126 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2128 memset(&(t->stats), 0, sizeof(t->stats));
2130 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2133 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2135 struct trie_stat *stat;
2137 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2141 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2142 sizeof(struct leaf), sizeof(struct tnode));
2145 seq_printf(seq, "Local:\n");
2146 trie_collect_stats(trie_local, stat);
2147 trie_show_stats(seq, stat);
2151 seq_printf(seq, "Main:\n");
2152 trie_collect_stats(trie_main, stat);
2153 trie_show_stats(seq, stat);
2160 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2162 return single_open(file, fib_triestat_seq_show, NULL);
2165 static struct file_operations fib_triestat_fops = {
2166 .owner = THIS_MODULE,
2167 .open = fib_triestat_seq_open,
2169 .llseek = seq_lseek,
2170 .release = single_release,
2173 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2179 for (n = fib_trie_get_first(iter, trie_local);
2180 n; ++idx, n = fib_trie_get_next(iter)) {
2185 for (n = fib_trie_get_first(iter, trie_main);
2186 n; ++idx, n = fib_trie_get_next(iter)) {
2193 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2197 return SEQ_START_TOKEN;
2198 return fib_trie_get_idx(seq->private, *pos - 1);
2201 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2203 struct fib_trie_iter *iter = seq->private;
2207 if (v == SEQ_START_TOKEN)
2208 return fib_trie_get_idx(iter, 0);
2210 v = fib_trie_get_next(iter);
2215 /* continue scan in next trie */
2216 if (iter->trie == trie_local)
2217 return fib_trie_get_first(iter, trie_main);
2222 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2227 static void seq_indent(struct seq_file *seq, int n)
2229 while (n-- > 0) seq_puts(seq, " ");
2232 static inline const char *rtn_scope(enum rt_scope_t s)
2234 static char buf[32];
2237 case RT_SCOPE_UNIVERSE: return "universe";
2238 case RT_SCOPE_SITE: return "site";
2239 case RT_SCOPE_LINK: return "link";
2240 case RT_SCOPE_HOST: return "host";
2241 case RT_SCOPE_NOWHERE: return "nowhere";
2243 snprintf(buf, sizeof(buf), "scope=%d", s);
2248 static const char *rtn_type_names[__RTN_MAX] = {
2249 [RTN_UNSPEC] = "UNSPEC",
2250 [RTN_UNICAST] = "UNICAST",
2251 [RTN_LOCAL] = "LOCAL",
2252 [RTN_BROADCAST] = "BROADCAST",
2253 [RTN_ANYCAST] = "ANYCAST",
2254 [RTN_MULTICAST] = "MULTICAST",
2255 [RTN_BLACKHOLE] = "BLACKHOLE",
2256 [RTN_UNREACHABLE] = "UNREACHABLE",
2257 [RTN_PROHIBIT] = "PROHIBIT",
2258 [RTN_THROW] = "THROW",
2260 [RTN_XRESOLVE] = "XRESOLVE",
2263 static inline const char *rtn_type(unsigned t)
2265 static char buf[32];
2267 if (t < __RTN_MAX && rtn_type_names[t])
2268 return rtn_type_names[t];
2269 snprintf(buf, sizeof(buf), "type %d", t);
2273 /* Pretty print the trie */
2274 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2276 const struct fib_trie_iter *iter = seq->private;
2279 if (v == SEQ_START_TOKEN)
2283 struct tnode *tn = (struct tnode *) n;
2284 __be32 prf = htonl(MASK_PFX(tn->key, tn->pos));
2286 if (!NODE_PARENT(n)) {
2287 if (iter->trie == trie_local)
2288 seq_puts(seq, "<local>:\n");
2290 seq_puts(seq, "<main>:\n");
2292 seq_indent(seq, iter->depth-1);
2293 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2294 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2295 tn->empty_children);
2298 struct leaf *l = (struct leaf *) n;
2300 __be32 val = htonl(l->key);
2302 seq_indent(seq, iter->depth);
2303 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2304 for (i = 32; i >= 0; i--) {
2305 struct leaf_info *li = find_leaf_info(l, i);
2307 struct fib_alias *fa;
2308 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2309 seq_indent(seq, iter->depth+1);
2310 seq_printf(seq, " /%d %s %s", i,
2311 rtn_scope(fa->fa_scope),
2312 rtn_type(fa->fa_type));
2314 seq_printf(seq, "tos =%d\n",
2316 seq_putc(seq, '\n');
2325 static struct seq_operations fib_trie_seq_ops = {
2326 .start = fib_trie_seq_start,
2327 .next = fib_trie_seq_next,
2328 .stop = fib_trie_seq_stop,
2329 .show = fib_trie_seq_show,
2332 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2334 struct seq_file *seq;
2336 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2341 rc = seq_open(file, &fib_trie_seq_ops);
2345 seq = file->private_data;
2347 memset(s, 0, sizeof(*s));
2355 static struct file_operations fib_trie_fops = {
2356 .owner = THIS_MODULE,
2357 .open = fib_trie_seq_open,
2359 .llseek = seq_lseek,
2360 .release = seq_release_private,
2363 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2365 static unsigned type2flags[RTN_MAX + 1] = {
2366 [7] = RTF_REJECT, [8] = RTF_REJECT,
2368 unsigned flags = type2flags[type];
2370 if (fi && fi->fib_nh->nh_gw)
2371 flags |= RTF_GATEWAY;
2372 if (mask == htonl(0xFFFFFFFF))
2379 * This outputs /proc/net/route.
2380 * The format of the file is not supposed to be changed
2381 * and needs to be same as fib_hash output to avoid breaking
2384 static int fib_route_seq_show(struct seq_file *seq, void *v)
2386 const struct fib_trie_iter *iter = seq->private;
2391 if (v == SEQ_START_TOKEN) {
2392 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2393 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2398 if (iter->trie == trie_local)
2403 for (i=32; i>=0; i--) {
2404 struct leaf_info *li = find_leaf_info(l, i);
2405 struct fib_alias *fa;
2406 __be32 mask, prefix;
2411 mask = inet_make_mask(li->plen);
2412 prefix = htonl(l->key);
2414 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2415 const struct fib_info *fi = fa->fa_info;
2416 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2418 if (fa->fa_type == RTN_BROADCAST
2419 || fa->fa_type == RTN_MULTICAST)
2423 snprintf(bf, sizeof(bf),
2424 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2425 fi->fib_dev ? fi->fib_dev->name : "*",
2427 fi->fib_nh->nh_gw, flags, 0, 0,
2430 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2434 snprintf(bf, sizeof(bf),
2435 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2436 prefix, 0, flags, 0, 0, 0,
2439 seq_printf(seq, "%-127s\n", bf);
2446 static struct seq_operations fib_route_seq_ops = {
2447 .start = fib_trie_seq_start,
2448 .next = fib_trie_seq_next,
2449 .stop = fib_trie_seq_stop,
2450 .show = fib_route_seq_show,
2453 static int fib_route_seq_open(struct inode *inode, struct file *file)
2455 struct seq_file *seq;
2457 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2462 rc = seq_open(file, &fib_route_seq_ops);
2466 seq = file->private_data;
2468 memset(s, 0, sizeof(*s));
2476 static struct file_operations fib_route_fops = {
2477 .owner = THIS_MODULE,
2478 .open = fib_route_seq_open,
2480 .llseek = seq_lseek,
2481 .release = seq_release_private,
2484 int __init fib_proc_init(void)
2486 if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_fops))
2489 if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_fops))
2492 if (!proc_net_fops_create("route", S_IRUGO, &fib_route_fops))
2498 proc_net_remove("fib_triestat");
2500 proc_net_remove("fib_trie");
2505 void __init fib_proc_exit(void)
2507 proc_net_remove("fib_trie");
2508 proc_net_remove("fib_triestat");
2509 proc_net_remove("route");
2512 #endif /* CONFIG_PROC_FS */