2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally descibed in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
28 * Code from fib_hash has been reused which includes the following header:
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
35 * IPv4 FIB: lookup engine and maintenance routines.
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
46 #define VERSION "0.404"
48 #include <linux/config.h>
49 #include <asm/uaccess.h>
50 #include <asm/system.h>
51 #include <asm/bitops.h>
52 #include <linux/types.h>
53 #include <linux/kernel.h>
54 #include <linux/sched.h>
56 #include <linux/string.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/errno.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_arp.h>
64 #include <linux/proc_fs.h>
65 #include <linux/rcupdate.h>
66 #include <linux/skbuff.h>
67 #include <linux/netlink.h>
68 #include <linux/init.h>
69 #include <linux/list.h>
71 #include <net/protocol.h>
72 #include <net/route.h>
75 #include <net/ip_fib.h>
76 #include "fib_lookup.h"
78 #undef CONFIG_IP_FIB_TRIE_STATS
79 #define MAX_CHILDS 16384
81 #define KEYLENGTH (8*sizeof(t_key))
82 #define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l))
83 #define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset))
85 typedef unsigned int t_key;
89 #define NODE_TYPE_MASK 0x1UL
90 #define NODE_PARENT(node) \
91 ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK)))
93 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
95 #define NODE_SET_PARENT(node, ptr) \
96 rcu_assign_pointer((node)->parent, \
97 ((unsigned long)(ptr)) | NODE_TYPE(node))
99 #define IS_TNODE(n) (!(n->parent & T_LEAF))
100 #define IS_LEAF(n) (n->parent & T_LEAF)
104 unsigned long parent;
109 unsigned long parent;
110 struct hlist_head list;
115 struct hlist_node hlist;
118 struct list_head falh;
123 unsigned long parent;
124 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
125 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
126 unsigned short full_children; /* KEYLENGTH bits needed */
127 unsigned short empty_children; /* KEYLENGTH bits needed */
129 struct node *child[0];
132 #ifdef CONFIG_IP_FIB_TRIE_STATS
133 struct trie_use_stats {
135 unsigned int backtrack;
136 unsigned int semantic_match_passed;
137 unsigned int semantic_match_miss;
138 unsigned int null_node_hit;
139 unsigned int resize_node_skipped;
144 unsigned int totdepth;
145 unsigned int maxdepth;
148 unsigned int nullpointers;
149 unsigned int nodesizes[MAX_CHILDS];
154 #ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats;
158 unsigned int revision;
161 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
162 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
163 static struct node *resize(struct trie *t, struct tnode *tn);
164 static struct tnode *inflate(struct trie *t, struct tnode *tn);
165 static struct tnode *halve(struct trie *t, struct tnode *tn);
166 static void tnode_free(struct tnode *tn);
168 static kmem_cache_t *fn_alias_kmem __read_mostly;
169 static struct trie *trie_local = NULL, *trie_main = NULL;
172 /* rcu_read_lock needs to be hold by caller from readside */
174 static inline struct node *tnode_get_child(struct tnode *tn, int i)
176 BUG_ON(i >= 1 << tn->bits);
178 return rcu_dereference(tn->child[i]);
181 static inline int tnode_child_length(const struct tnode *tn)
183 return 1 << tn->bits;
186 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
188 if (offset < KEYLENGTH)
189 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
194 static inline int tkey_equals(t_key a, t_key b)
199 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
201 if (bits == 0 || offset >= KEYLENGTH)
203 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
204 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
207 static inline int tkey_mismatch(t_key a, int offset, t_key b)
214 while ((diff << i) >> (KEYLENGTH-1) == 0)
220 To understand this stuff, an understanding of keys and all their bits is
221 necessary. Every node in the trie has a key associated with it, but not
222 all of the bits in that key are significant.
224 Consider a node 'n' and its parent 'tp'.
226 If n is a leaf, every bit in its key is significant. Its presence is
227 necessitated by path compression, since during a tree traversal (when
228 searching for a leaf - unless we are doing an insertion) we will completely
229 ignore all skipped bits we encounter. Thus we need to verify, at the end of
230 a potentially successful search, that we have indeed been walking the
233 Note that we can never "miss" the correct key in the tree if present by
234 following the wrong path. Path compression ensures that segments of the key
235 that are the same for all keys with a given prefix are skipped, but the
236 skipped part *is* identical for each node in the subtrie below the skipped
237 bit! trie_insert() in this implementation takes care of that - note the
238 call to tkey_sub_equals() in trie_insert().
240 if n is an internal node - a 'tnode' here, the various parts of its key
241 have many different meanings.
244 _________________________________________________________________
245 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
246 -----------------------------------------------------------------
247 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
249 _________________________________________________________________
250 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
251 -----------------------------------------------------------------
252 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
259 First, let's just ignore the bits that come before the parent tp, that is
260 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
261 not use them for anything.
263 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
264 index into the parent's child array. That is, they will be used to find
265 'n' among tp's children.
267 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
270 All the bits we have seen so far are significant to the node n. The rest
271 of the bits are really not needed or indeed known in n->key.
273 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
274 n's child array, and will of course be different for each child.
277 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
282 static inline void check_tnode(const struct tnode *tn)
284 WARN_ON(tn && tn->pos+tn->bits > 32);
287 static int halve_threshold = 25;
288 static int inflate_threshold = 50;
289 static int halve_threshold_root = 15;
290 static int inflate_threshold_root = 25;
293 static void __alias_free_mem(struct rcu_head *head)
295 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
296 kmem_cache_free(fn_alias_kmem, fa);
299 static inline void alias_free_mem_rcu(struct fib_alias *fa)
301 call_rcu(&fa->rcu, __alias_free_mem);
304 static void __leaf_free_rcu(struct rcu_head *head)
306 kfree(container_of(head, struct leaf, rcu));
309 static inline void free_leaf(struct leaf *leaf)
311 call_rcu(&leaf->rcu, __leaf_free_rcu);
314 static void __leaf_info_free_rcu(struct rcu_head *head)
316 kfree(container_of(head, struct leaf_info, rcu));
319 static inline void free_leaf_info(struct leaf_info *leaf)
321 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
324 static struct tnode *tnode_alloc(unsigned int size)
328 if (size <= PAGE_SIZE)
329 return kcalloc(size, 1, GFP_KERNEL);
331 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
335 return page_address(pages);
338 static void __tnode_free_rcu(struct rcu_head *head)
340 struct tnode *tn = container_of(head, struct tnode, rcu);
341 unsigned int size = sizeof(struct tnode) +
342 (1 << tn->bits) * sizeof(struct node *);
344 if (size <= PAGE_SIZE)
347 free_pages((unsigned long)tn, get_order(size));
350 static inline void tnode_free(struct tnode *tn)
352 call_rcu(&tn->rcu, __tnode_free_rcu);
355 static struct leaf *leaf_new(void)
357 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
360 INIT_HLIST_HEAD(&l->list);
365 static struct leaf_info *leaf_info_new(int plen)
367 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
370 INIT_LIST_HEAD(&li->falh);
375 static struct tnode* tnode_new(t_key key, int pos, int bits)
377 int nchildren = 1<<bits;
378 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
379 struct tnode *tn = tnode_alloc(sz);
383 tn->parent = T_TNODE;
387 tn->full_children = 0;
388 tn->empty_children = 1<<bits;
391 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
392 (unsigned int) (sizeof(struct node) * 1<<bits));
397 * Check whether a tnode 'n' is "full", i.e. it is an internal node
398 * and no bits are skipped. See discussion in dyntree paper p. 6
401 static inline int tnode_full(const struct tnode *tn, const struct node *n)
403 if (n == NULL || IS_LEAF(n))
406 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
409 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
411 tnode_put_child_reorg(tn, i, n, -1);
415 * Add a child at position i overwriting the old value.
416 * Update the value of full_children and empty_children.
419 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
421 struct node *chi = tn->child[i];
424 BUG_ON(i >= 1<<tn->bits);
427 /* update emptyChildren */
428 if (n == NULL && chi != NULL)
429 tn->empty_children++;
430 else if (n != NULL && chi == NULL)
431 tn->empty_children--;
433 /* update fullChildren */
435 wasfull = tnode_full(tn, chi);
437 isfull = tnode_full(tn, n);
438 if (wasfull && !isfull)
440 else if (!wasfull && isfull)
444 NODE_SET_PARENT(n, tn);
446 rcu_assign_pointer(tn->child[i], n);
449 static struct node *resize(struct trie *t, struct tnode *tn)
453 struct tnode *old_tn;
454 int inflate_threshold_use;
455 int halve_threshold_use;
460 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
461 tn, inflate_threshold, halve_threshold);
464 if (tn->empty_children == tnode_child_length(tn)) {
469 if (tn->empty_children == tnode_child_length(tn) - 1)
470 for (i = 0; i < tnode_child_length(tn); i++) {
477 /* compress one level */
478 NODE_SET_PARENT(n, NULL);
483 * Double as long as the resulting node has a number of
484 * nonempty nodes that are above the threshold.
488 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
489 * the Helsinki University of Technology and Matti Tikkanen of Nokia
490 * Telecommunications, page 6:
491 * "A node is doubled if the ratio of non-empty children to all
492 * children in the *doubled* node is at least 'high'."
494 * 'high' in this instance is the variable 'inflate_threshold'. It
495 * is expressed as a percentage, so we multiply it with
496 * tnode_child_length() and instead of multiplying by 2 (since the
497 * child array will be doubled by inflate()) and multiplying
498 * the left-hand side by 100 (to handle the percentage thing) we
499 * multiply the left-hand side by 50.
501 * The left-hand side may look a bit weird: tnode_child_length(tn)
502 * - tn->empty_children is of course the number of non-null children
503 * in the current node. tn->full_children is the number of "full"
504 * children, that is non-null tnodes with a skip value of 0.
505 * All of those will be doubled in the resulting inflated tnode, so
506 * we just count them one extra time here.
508 * A clearer way to write this would be:
510 * to_be_doubled = tn->full_children;
511 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
514 * new_child_length = tnode_child_length(tn) * 2;
516 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
518 * if (new_fill_factor >= inflate_threshold)
520 * ...and so on, tho it would mess up the while () loop.
523 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
527 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
528 * inflate_threshold * new_child_length
530 * expand not_to_be_doubled and to_be_doubled, and shorten:
531 * 100 * (tnode_child_length(tn) - tn->empty_children +
532 * tn->full_children) >= inflate_threshold * new_child_length
534 * expand new_child_length:
535 * 100 * (tnode_child_length(tn) - tn->empty_children +
536 * tn->full_children) >=
537 * inflate_threshold * tnode_child_length(tn) * 2
540 * 50 * (tn->full_children + tnode_child_length(tn) -
541 * tn->empty_children) >= inflate_threshold *
542 * tnode_child_length(tn)
548 /* Keep root node larger */
551 inflate_threshold_use = inflate_threshold_root;
553 inflate_threshold_use = inflate_threshold;
556 while ((tn->full_children > 0 &&
557 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
558 inflate_threshold_use * tnode_child_length(tn))) {
564 #ifdef CONFIG_IP_FIB_TRIE_STATS
565 t->stats.resize_node_skipped++;
574 * Halve as long as the number of empty children in this
575 * node is above threshold.
579 /* Keep root node larger */
582 halve_threshold_use = halve_threshold_root;
584 halve_threshold_use = halve_threshold;
587 while (tn->bits > 1 &&
588 100 * (tnode_child_length(tn) - tn->empty_children) <
589 halve_threshold_use * tnode_child_length(tn)) {
595 #ifdef CONFIG_IP_FIB_TRIE_STATS
596 t->stats.resize_node_skipped++;
603 /* Only one child remains */
604 if (tn->empty_children == tnode_child_length(tn) - 1)
605 for (i = 0; i < tnode_child_length(tn); i++) {
612 /* compress one level */
614 NODE_SET_PARENT(n, NULL);
619 return (struct node *) tn;
622 static struct tnode *inflate(struct trie *t, struct tnode *tn)
625 struct tnode *oldtnode = tn;
626 int olen = tnode_child_length(tn);
629 pr_debug("In inflate\n");
631 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
634 return ERR_PTR(-ENOMEM);
637 * Preallocate and store tnodes before the actual work so we
638 * don't get into an inconsistent state if memory allocation
639 * fails. In case of failure we return the oldnode and inflate
640 * of tnode is ignored.
643 for (i = 0; i < olen; i++) {
644 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
648 inode->pos == oldtnode->pos + oldtnode->bits &&
650 struct tnode *left, *right;
651 t_key m = TKEY_GET_MASK(inode->pos, 1);
653 left = tnode_new(inode->key&(~m), inode->pos + 1,
658 right = tnode_new(inode->key|m, inode->pos + 1,
666 put_child(t, tn, 2*i, (struct node *) left);
667 put_child(t, tn, 2*i+1, (struct node *) right);
671 for (i = 0; i < olen; i++) {
672 struct node *node = tnode_get_child(oldtnode, i);
673 struct tnode *left, *right;
680 /* A leaf or an internal node with skipped bits */
682 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
683 tn->pos + tn->bits - 1) {
684 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
686 put_child(t, tn, 2*i, node);
688 put_child(t, tn, 2*i+1, node);
692 /* An internal node with two children */
693 inode = (struct tnode *) node;
695 if (inode->bits == 1) {
696 put_child(t, tn, 2*i, inode->child[0]);
697 put_child(t, tn, 2*i+1, inode->child[1]);
703 /* An internal node with more than two children */
705 /* We will replace this node 'inode' with two new
706 * ones, 'left' and 'right', each with half of the
707 * original children. The two new nodes will have
708 * a position one bit further down the key and this
709 * means that the "significant" part of their keys
710 * (see the discussion near the top of this file)
711 * will differ by one bit, which will be "0" in
712 * left's key and "1" in right's key. Since we are
713 * moving the key position by one step, the bit that
714 * we are moving away from - the bit at position
715 * (inode->pos) - is the one that will differ between
716 * left and right. So... we synthesize that bit in the
718 * The mask 'm' below will be a single "one" bit at
719 * the position (inode->pos)
722 /* Use the old key, but set the new significant
726 left = (struct tnode *) tnode_get_child(tn, 2*i);
727 put_child(t, tn, 2*i, NULL);
731 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
732 put_child(t, tn, 2*i+1, NULL);
736 size = tnode_child_length(left);
737 for (j = 0; j < size; j++) {
738 put_child(t, left, j, inode->child[j]);
739 put_child(t, right, j, inode->child[j + size]);
741 put_child(t, tn, 2*i, resize(t, left));
742 put_child(t, tn, 2*i+1, resize(t, right));
746 tnode_free(oldtnode);
750 int size = tnode_child_length(tn);
753 for (j = 0; j < size; j++)
755 tnode_free((struct tnode *)tn->child[j]);
759 return ERR_PTR(-ENOMEM);
763 static struct tnode *halve(struct trie *t, struct tnode *tn)
765 struct tnode *oldtnode = tn;
766 struct node *left, *right;
768 int olen = tnode_child_length(tn);
770 pr_debug("In halve\n");
772 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
775 return ERR_PTR(-ENOMEM);
778 * Preallocate and store tnodes before the actual work so we
779 * don't get into an inconsistent state if memory allocation
780 * fails. In case of failure we return the oldnode and halve
781 * of tnode is ignored.
784 for (i = 0; i < olen; i += 2) {
785 left = tnode_get_child(oldtnode, i);
786 right = tnode_get_child(oldtnode, i+1);
788 /* Two nonempty children */
792 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
797 put_child(t, tn, i/2, (struct node *)newn);
802 for (i = 0; i < olen; i += 2) {
803 struct tnode *newBinNode;
805 left = tnode_get_child(oldtnode, i);
806 right = tnode_get_child(oldtnode, i+1);
808 /* At least one of the children is empty */
810 if (right == NULL) /* Both are empty */
812 put_child(t, tn, i/2, right);
817 put_child(t, tn, i/2, left);
821 /* Two nonempty children */
822 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
823 put_child(t, tn, i/2, NULL);
824 put_child(t, newBinNode, 0, left);
825 put_child(t, newBinNode, 1, right);
826 put_child(t, tn, i/2, resize(t, newBinNode));
828 tnode_free(oldtnode);
832 int size = tnode_child_length(tn);
835 for (j = 0; j < size; j++)
837 tnode_free((struct tnode *)tn->child[j]);
841 return ERR_PTR(-ENOMEM);
845 static void trie_init(struct trie *t)
851 rcu_assign_pointer(t->trie, NULL);
853 #ifdef CONFIG_IP_FIB_TRIE_STATS
854 memset(&t->stats, 0, sizeof(struct trie_use_stats));
858 /* readside must use rcu_read_lock currently dump routines
859 via get_fa_head and dump */
861 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
863 struct hlist_head *head = &l->list;
864 struct hlist_node *node;
865 struct leaf_info *li;
867 hlist_for_each_entry_rcu(li, node, head, hlist)
868 if (li->plen == plen)
874 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
876 struct leaf_info *li = find_leaf_info(l, plen);
884 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
886 struct leaf_info *li = NULL, *last = NULL;
887 struct hlist_node *node;
889 if (hlist_empty(head)) {
890 hlist_add_head_rcu(&new->hlist, head);
892 hlist_for_each_entry(li, node, head, hlist) {
893 if (new->plen > li->plen)
899 hlist_add_after_rcu(&last->hlist, &new->hlist);
901 hlist_add_before_rcu(&new->hlist, &li->hlist);
905 /* rcu_read_lock needs to be hold by caller from readside */
908 fib_find_node(struct trie *t, u32 key)
915 n = rcu_dereference(t->trie);
917 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
918 tn = (struct tnode *) n;
922 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
923 pos = tn->pos + tn->bits;
924 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
928 /* Case we have found a leaf. Compare prefixes */
930 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
931 return (struct leaf *)n;
936 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
940 struct tnode *tp = NULL;
944 while (tn != NULL && NODE_PARENT(tn) != NULL) {
946 tp = NODE_PARENT(tn);
947 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
948 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
949 tn = (struct tnode *) resize (t, (struct tnode *)tn);
950 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
952 if (!NODE_PARENT(tn))
955 tn = NODE_PARENT(tn);
957 /* Handle last (top) tnode */
959 tn = (struct tnode*) resize(t, (struct tnode *)tn);
961 return (struct node*) tn;
964 /* only used from updater-side */
966 static struct list_head *
967 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
970 struct tnode *tp = NULL, *tn = NULL;
974 struct list_head *fa_head = NULL;
975 struct leaf_info *li;
981 /* If we point to NULL, stop. Either the tree is empty and we should
982 * just put a new leaf in if, or we have reached an empty child slot,
983 * and we should just put our new leaf in that.
984 * If we point to a T_TNODE, check if it matches our key. Note that
985 * a T_TNODE might be skipping any number of bits - its 'pos' need
986 * not be the parent's 'pos'+'bits'!
988 * If it does match the current key, get pos/bits from it, extract
989 * the index from our key, push the T_TNODE and walk the tree.
991 * If it doesn't, we have to replace it with a new T_TNODE.
993 * If we point to a T_LEAF, it might or might not have the same key
994 * as we do. If it does, just change the value, update the T_LEAF's
995 * value, and return it.
996 * If it doesn't, we need to replace it with a T_TNODE.
999 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1000 tn = (struct tnode *) n;
1004 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1006 pos = tn->pos + tn->bits;
1007 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1009 BUG_ON(n && NODE_PARENT(n) != tn);
1015 * n ----> NULL, LEAF or TNODE
1017 * tp is n's (parent) ----> NULL or TNODE
1020 BUG_ON(tp && IS_LEAF(tp));
1022 /* Case 1: n is a leaf. Compare prefixes */
1024 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1025 struct leaf *l = (struct leaf *) n;
1027 li = leaf_info_new(plen);
1034 fa_head = &li->falh;
1035 insert_leaf_info(&l->list, li);
1047 li = leaf_info_new(plen);
1050 tnode_free((struct tnode *) l);
1055 fa_head = &li->falh;
1056 insert_leaf_info(&l->list, li);
1058 if (t->trie && n == NULL) {
1059 /* Case 2: n is NULL, and will just insert a new leaf */
1061 NODE_SET_PARENT(l, tp);
1063 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1064 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1066 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1068 * Add a new tnode here
1069 * first tnode need some special handling
1073 pos = tp->pos+tp->bits;
1078 newpos = tkey_mismatch(key, pos, n->key);
1079 tn = tnode_new(n->key, newpos, 1);
1082 tn = tnode_new(key, newpos, 1); /* First tnode */
1087 tnode_free((struct tnode *) l);
1092 NODE_SET_PARENT(tn, tp);
1094 missbit = tkey_extract_bits(key, newpos, 1);
1095 put_child(t, tn, missbit, (struct node *)l);
1096 put_child(t, tn, 1-missbit, n);
1099 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1100 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1102 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1107 if (tp && tp->pos + tp->bits > 32)
1108 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1109 tp, tp->pos, tp->bits, key, plen);
1111 /* Rebalance the trie */
1113 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1121 fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1122 struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
1124 struct trie *t = (struct trie *) tb->tb_data;
1125 struct fib_alias *fa, *new_fa;
1126 struct list_head *fa_head = NULL;
1127 struct fib_info *fi;
1128 int plen = r->rtm_dst_len;
1129 int type = r->rtm_type;
1130 u8 tos = r->rtm_tos;
1140 memcpy(&key, rta->rta_dst, 4);
1144 pr_debug("Insert table=%d %08x/%d\n", tb->tb_id, key, plen);
1146 mask = ntohl(inet_make_mask(plen));
1153 fi = fib_create_info(r, rta, nlhdr, &err);
1158 l = fib_find_node(t, key);
1162 fa_head = get_fa_head(l, plen);
1163 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1166 /* Now fa, if non-NULL, points to the first fib alias
1167 * with the same keys [prefix,tos,priority], if such key already
1168 * exists or to the node before which we will insert new one.
1170 * If fa is NULL, we will need to allocate a new one and
1171 * insert to the head of f.
1173 * If f is NULL, no fib node matched the destination key
1174 * and we need to allocate a new one of those as well.
1177 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1178 struct fib_alias *fa_orig;
1181 if (nlhdr->nlmsg_flags & NLM_F_EXCL)
1184 if (nlhdr->nlmsg_flags & NLM_F_REPLACE) {
1185 struct fib_info *fi_drop;
1189 new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
1193 fi_drop = fa->fa_info;
1194 new_fa->fa_tos = fa->fa_tos;
1195 new_fa->fa_info = fi;
1196 new_fa->fa_type = type;
1197 new_fa->fa_scope = r->rtm_scope;
1198 state = fa->fa_state;
1199 new_fa->fa_state &= ~FA_S_ACCESSED;
1201 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1202 alias_free_mem_rcu(fa);
1204 fib_release_info(fi_drop);
1205 if (state & FA_S_ACCESSED)
1210 /* Error if we find a perfect match which
1211 * uses the same scope, type, and nexthop
1215 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1216 if (fa->fa_tos != tos)
1218 if (fa->fa_info->fib_priority != fi->fib_priority)
1220 if (fa->fa_type == type &&
1221 fa->fa_scope == r->rtm_scope &&
1222 fa->fa_info == fi) {
1226 if (!(nlhdr->nlmsg_flags & NLM_F_APPEND))
1230 if (!(nlhdr->nlmsg_flags & NLM_F_CREATE))
1234 new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
1238 new_fa->fa_info = fi;
1239 new_fa->fa_tos = tos;
1240 new_fa->fa_type = type;
1241 new_fa->fa_scope = r->rtm_scope;
1242 new_fa->fa_state = 0;
1244 * Insert new entry to the list.
1248 fa_head = fib_insert_node(t, &err, key, plen);
1251 goto out_free_new_fa;
1254 list_add_tail_rcu(&new_fa->fa_list,
1255 (fa ? &fa->fa_list : fa_head));
1258 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, nlhdr, req);
1263 kmem_cache_free(fn_alias_kmem, new_fa);
1265 fib_release_info(fi);
1271 /* should be called with rcu_read_lock */
1272 static inline int check_leaf(struct trie *t, struct leaf *l,
1273 t_key key, int *plen, const struct flowi *flp,
1274 struct fib_result *res)
1278 struct leaf_info *li;
1279 struct hlist_head *hhead = &l->list;
1280 struct hlist_node *node;
1282 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1284 mask = ntohl(inet_make_mask(i));
1285 if (l->key != (key & mask))
1288 if ((err = fib_semantic_match(&li->falh, flp, res, l->key, mask, i)) <= 0) {
1290 #ifdef CONFIG_IP_FIB_TRIE_STATS
1291 t->stats.semantic_match_passed++;
1295 #ifdef CONFIG_IP_FIB_TRIE_STATS
1296 t->stats.semantic_match_miss++;
1303 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1305 struct trie *t = (struct trie *) tb->tb_data;
1310 t_key key = ntohl(flp->fl4_dst);
1313 int current_prefix_length = KEYLENGTH;
1315 t_key node_prefix, key_prefix, pref_mismatch;
1320 n = rcu_dereference(t->trie);
1324 #ifdef CONFIG_IP_FIB_TRIE_STATS
1330 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1334 pn = (struct tnode *) n;
1342 cindex = tkey_extract_bits(MASK_PFX(key, current_prefix_length), pos, bits);
1344 n = tnode_get_child(pn, cindex);
1347 #ifdef CONFIG_IP_FIB_TRIE_STATS
1348 t->stats.null_node_hit++;
1354 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1362 cn = (struct tnode *)n;
1365 * It's a tnode, and we can do some extra checks here if we
1366 * like, to avoid descending into a dead-end branch.
1367 * This tnode is in the parent's child array at index
1368 * key[p_pos..p_pos+p_bits] but potentially with some bits
1369 * chopped off, so in reality the index may be just a
1370 * subprefix, padded with zero at the end.
1371 * We can also take a look at any skipped bits in this
1372 * tnode - everything up to p_pos is supposed to be ok,
1373 * and the non-chopped bits of the index (se previous
1374 * paragraph) are also guaranteed ok, but the rest is
1375 * considered unknown.
1377 * The skipped bits are key[pos+bits..cn->pos].
1380 /* If current_prefix_length < pos+bits, we are already doing
1381 * actual prefix matching, which means everything from
1382 * pos+(bits-chopped_off) onward must be zero along some
1383 * branch of this subtree - otherwise there is *no* valid
1384 * prefix present. Here we can only check the skipped
1385 * bits. Remember, since we have already indexed into the
1386 * parent's child array, we know that the bits we chopped of
1390 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1392 if (current_prefix_length < pos+bits) {
1393 if (tkey_extract_bits(cn->key, current_prefix_length,
1394 cn->pos - current_prefix_length) != 0 ||
1400 * If chopped_off=0, the index is fully validated and we
1401 * only need to look at the skipped bits for this, the new,
1402 * tnode. What we actually want to do is to find out if
1403 * these skipped bits match our key perfectly, or if we will
1404 * have to count on finding a matching prefix further down,
1405 * because if we do, we would like to have some way of
1406 * verifying the existence of such a prefix at this point.
1409 /* The only thing we can do at this point is to verify that
1410 * any such matching prefix can indeed be a prefix to our
1411 * key, and if the bits in the node we are inspecting that
1412 * do not match our key are not ZERO, this cannot be true.
1413 * Thus, find out where there is a mismatch (before cn->pos)
1414 * and verify that all the mismatching bits are zero in the
1418 /* Note: We aren't very concerned about the piece of the key
1419 * that precede pn->pos+pn->bits, since these have already been
1420 * checked. The bits after cn->pos aren't checked since these are
1421 * by definition "unknown" at this point. Thus, what we want to
1422 * see is if we are about to enter the "prefix matching" state,
1423 * and in that case verify that the skipped bits that will prevail
1424 * throughout this subtree are zero, as they have to be if we are
1425 * to find a matching prefix.
1428 node_prefix = MASK_PFX(cn->key, cn->pos);
1429 key_prefix = MASK_PFX(key, cn->pos);
1430 pref_mismatch = key_prefix^node_prefix;
1433 /* In short: If skipped bits in this node do not match the search
1434 * key, enter the "prefix matching" state.directly.
1436 if (pref_mismatch) {
1437 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1439 pref_mismatch = pref_mismatch <<1;
1441 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1443 if (key_prefix != 0)
1446 if (current_prefix_length >= cn->pos)
1447 current_prefix_length = mp;
1450 pn = (struct tnode *)n; /* Descend */
1457 /* As zero don't change the child key (cindex) */
1458 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1461 /* Decrease current_... with bits chopped off */
1462 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1463 current_prefix_length = pn->pos + pn->bits - chopped_off;
1466 * Either we do the actual chop off according or if we have
1467 * chopped off all bits in this tnode walk up to our parent.
1470 if (chopped_off <= pn->bits) {
1471 cindex &= ~(1 << (chopped_off-1));
1473 if (NODE_PARENT(pn) == NULL)
1476 /* Get Child's index */
1477 cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits);
1478 pn = NODE_PARENT(pn);
1481 #ifdef CONFIG_IP_FIB_TRIE_STATS
1482 t->stats.backtrack++;
1494 /* only called from updater side */
1495 static int trie_leaf_remove(struct trie *t, t_key key)
1498 struct tnode *tp = NULL;
1499 struct node *n = t->trie;
1502 pr_debug("entering trie_leaf_remove(%p)\n", n);
1504 /* Note that in the case skipped bits, those bits are *not* checked!
1505 * When we finish this, we will have NULL or a T_LEAF, and the
1506 * T_LEAF may or may not match our key.
1509 while (n != NULL && IS_TNODE(n)) {
1510 struct tnode *tn = (struct tnode *) n;
1512 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1514 BUG_ON(n && NODE_PARENT(n) != tn);
1516 l = (struct leaf *) n;
1518 if (!n || !tkey_equals(l->key, key))
1523 * Remove the leaf and rebalance the tree
1530 tp = NODE_PARENT(n);
1531 tnode_free((struct tnode *) n);
1534 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1535 put_child(t, (struct tnode *)tp, cindex, NULL);
1536 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1538 rcu_assign_pointer(t->trie, NULL);
1545 fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
1546 struct nlmsghdr *nlhdr, struct netlink_skb_parms *req)
1548 struct trie *t = (struct trie *) tb->tb_data;
1550 int plen = r->rtm_dst_len;
1551 u8 tos = r->rtm_tos;
1552 struct fib_alias *fa, *fa_to_delete;
1553 struct list_head *fa_head;
1555 struct leaf_info *li;
1563 memcpy(&key, rta->rta_dst, 4);
1566 mask = ntohl(inet_make_mask(plen));
1572 l = fib_find_node(t, key);
1577 fa_head = get_fa_head(l, plen);
1578 fa = fib_find_alias(fa_head, tos, 0);
1583 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1585 fa_to_delete = NULL;
1586 fa_head = fa->fa_list.prev;
1588 list_for_each_entry(fa, fa_head, fa_list) {
1589 struct fib_info *fi = fa->fa_info;
1591 if (fa->fa_tos != tos)
1594 if ((!r->rtm_type ||
1595 fa->fa_type == r->rtm_type) &&
1596 (r->rtm_scope == RT_SCOPE_NOWHERE ||
1597 fa->fa_scope == r->rtm_scope) &&
1598 (!r->rtm_protocol ||
1599 fi->fib_protocol == r->rtm_protocol) &&
1600 fib_nh_match(r, nlhdr, rta, fi) == 0) {
1610 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
1612 l = fib_find_node(t, key);
1613 li = find_leaf_info(l, plen);
1615 list_del_rcu(&fa->fa_list);
1617 if (list_empty(fa_head)) {
1618 hlist_del_rcu(&li->hlist);
1622 if (hlist_empty(&l->list))
1623 trie_leaf_remove(t, key);
1625 if (fa->fa_state & FA_S_ACCESSED)
1628 fib_release_info(fa->fa_info);
1629 alias_free_mem_rcu(fa);
1633 static int trie_flush_list(struct trie *t, struct list_head *head)
1635 struct fib_alias *fa, *fa_node;
1638 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1639 struct fib_info *fi = fa->fa_info;
1641 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1642 list_del_rcu(&fa->fa_list);
1643 fib_release_info(fa->fa_info);
1644 alias_free_mem_rcu(fa);
1651 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1654 struct hlist_head *lih = &l->list;
1655 struct hlist_node *node, *tmp;
1656 struct leaf_info *li = NULL;
1658 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1659 found += trie_flush_list(t, &li->falh);
1661 if (list_empty(&li->falh)) {
1662 hlist_del_rcu(&li->hlist);
1669 /* rcu_read_lock needs to be hold by caller from readside */
1671 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1673 struct node *c = (struct node *) thisleaf;
1676 struct node *trie = rcu_dereference(t->trie);
1682 if (IS_LEAF(trie)) /* trie w. just a leaf */
1683 return (struct leaf *) trie;
1685 p = (struct tnode*) trie; /* Start */
1687 p = (struct tnode *) NODE_PARENT(c);
1692 /* Find the next child of the parent */
1694 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1698 last = 1 << p->bits;
1699 for (idx = pos; idx < last ; idx++) {
1700 c = rcu_dereference(p->child[idx]);
1705 /* Decend if tnode */
1706 while (IS_TNODE(c)) {
1707 p = (struct tnode *) c;
1710 /* Rightmost non-NULL branch */
1711 if (p && IS_TNODE(p))
1712 while (!(c = rcu_dereference(p->child[idx]))
1713 && idx < (1<<p->bits)) idx++;
1715 /* Done with this tnode? */
1716 if (idx >= (1 << p->bits) || !c)
1719 return (struct leaf *) c;
1722 /* No more children go up one step */
1723 c = (struct node *) p;
1724 p = (struct tnode *) NODE_PARENT(p);
1726 return NULL; /* Ready. Root of trie */
1729 static int fn_trie_flush(struct fib_table *tb)
1731 struct trie *t = (struct trie *) tb->tb_data;
1732 struct leaf *ll = NULL, *l = NULL;
1737 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1738 found += trie_flush_leaf(t, l);
1740 if (ll && hlist_empty(&ll->list))
1741 trie_leaf_remove(t, ll->key);
1745 if (ll && hlist_empty(&ll->list))
1746 trie_leaf_remove(t, ll->key);
1748 pr_debug("trie_flush found=%d\n", found);
1752 static int trie_last_dflt = -1;
1755 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1757 struct trie *t = (struct trie *) tb->tb_data;
1758 int order, last_idx;
1759 struct fib_info *fi = NULL;
1760 struct fib_info *last_resort;
1761 struct fib_alias *fa = NULL;
1762 struct list_head *fa_head;
1771 l = fib_find_node(t, 0);
1775 fa_head = get_fa_head(l, 0);
1779 if (list_empty(fa_head))
1782 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1783 struct fib_info *next_fi = fa->fa_info;
1785 if (fa->fa_scope != res->scope ||
1786 fa->fa_type != RTN_UNICAST)
1789 if (next_fi->fib_priority > res->fi->fib_priority)
1791 if (!next_fi->fib_nh[0].nh_gw ||
1792 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1794 fa->fa_state |= FA_S_ACCESSED;
1797 if (next_fi != res->fi)
1799 } else if (!fib_detect_death(fi, order, &last_resort,
1800 &last_idx, &trie_last_dflt)) {
1802 fib_info_put(res->fi);
1804 atomic_inc(&fi->fib_clntref);
1805 trie_last_dflt = order;
1811 if (order <= 0 || fi == NULL) {
1812 trie_last_dflt = -1;
1816 if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
1818 fib_info_put(res->fi);
1820 atomic_inc(&fi->fib_clntref);
1821 trie_last_dflt = order;
1824 if (last_idx >= 0) {
1826 fib_info_put(res->fi);
1827 res->fi = last_resort;
1829 atomic_inc(&last_resort->fib_clntref);
1831 trie_last_dflt = last_idx;
1836 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1837 struct sk_buff *skb, struct netlink_callback *cb)
1840 struct fib_alias *fa;
1842 u32 xkey = htonl(key);
1847 /* rcu_read_lock is hold by caller */
1849 list_for_each_entry_rcu(fa, fah, fa_list) {
1854 BUG_ON(!fa->fa_info);
1856 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1865 fa->fa_info, 0) < 0) {
1875 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1876 struct netlink_callback *cb)
1879 struct list_head *fa_head;
1880 struct leaf *l = NULL;
1884 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1888 memset(&cb->args[3], 0,
1889 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1891 fa_head = get_fa_head(l, plen);
1896 if (list_empty(fa_head))
1899 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1908 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1911 struct trie *t = (struct trie *) tb->tb_data;
1916 for (m = 0; m <= 32; m++) {
1920 memset(&cb->args[2], 0,
1921 sizeof(cb->args) - 2*sizeof(cb->args[0]));
1923 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1936 /* Fix more generic FIB names for init later */
1938 #ifdef CONFIG_IP_MULTIPLE_TABLES
1939 struct fib_table * fib_hash_init(int id)
1941 struct fib_table * __init fib_hash_init(int id)
1944 struct fib_table *tb;
1947 if (fn_alias_kmem == NULL)
1948 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1949 sizeof(struct fib_alias),
1950 0, SLAB_HWCACHE_ALIGN,
1953 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1959 tb->tb_lookup = fn_trie_lookup;
1960 tb->tb_insert = fn_trie_insert;
1961 tb->tb_delete = fn_trie_delete;
1962 tb->tb_flush = fn_trie_flush;
1963 tb->tb_select_default = fn_trie_select_default;
1964 tb->tb_dump = fn_trie_dump;
1965 memset(tb->tb_data, 0, sizeof(struct trie));
1967 t = (struct trie *) tb->tb_data;
1971 if (id == RT_TABLE_LOCAL)
1973 else if (id == RT_TABLE_MAIN)
1976 if (id == RT_TABLE_LOCAL)
1977 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1982 #ifdef CONFIG_PROC_FS
1983 /* Depth first Trie walk iterator */
1984 struct fib_trie_iter {
1985 struct tnode *tnode;
1991 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
1993 struct tnode *tn = iter->tnode;
1994 unsigned cindex = iter->index;
1997 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1998 iter->tnode, iter->index, iter->depth);
2000 while (cindex < (1<<tn->bits)) {
2001 struct node *n = tnode_get_child(tn, cindex);
2006 iter->index = cindex + 1;
2008 /* push down one level */
2009 iter->tnode = (struct tnode *) n;
2019 /* Current node exhausted, pop back up */
2020 p = NODE_PARENT(tn);
2022 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2032 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2035 struct node *n = rcu_dereference(t->trie);
2037 if (n && IS_TNODE(n)) {
2038 iter->tnode = (struct tnode *) n;
2047 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2050 struct fib_trie_iter iter;
2052 memset(s, 0, sizeof(*s));
2055 for (n = fib_trie_get_first(&iter, t); n;
2056 n = fib_trie_get_next(&iter)) {
2059 s->totdepth += iter.depth;
2060 if (iter.depth > s->maxdepth)
2061 s->maxdepth = iter.depth;
2063 const struct tnode *tn = (const struct tnode *) n;
2067 s->nodesizes[tn->bits]++;
2068 for (i = 0; i < (1<<tn->bits); i++)
2077 * This outputs /proc/net/fib_triestats
2079 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2081 unsigned i, max, pointers, bytes, avdepth;
2084 avdepth = stat->totdepth*100 / stat->leaves;
2088 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2089 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2091 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2093 bytes = sizeof(struct leaf) * stat->leaves;
2094 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2095 bytes += sizeof(struct tnode) * stat->tnodes;
2098 while (max >= 0 && stat->nodesizes[max] == 0)
2102 for (i = 1; i <= max; i++)
2103 if (stat->nodesizes[i] != 0) {
2104 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2105 pointers += (1<<i) * stat->nodesizes[i];
2107 seq_putc(seq, '\n');
2108 seq_printf(seq, "\tPointers: %d\n", pointers);
2110 bytes += sizeof(struct node *) * pointers;
2111 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2112 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2114 #ifdef CONFIG_IP_FIB_TRIE_STATS
2115 seq_printf(seq, "Counters:\n---------\n");
2116 seq_printf(seq,"gets = %d\n", t->stats.gets);
2117 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2118 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2119 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2120 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2121 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2123 memset(&(t->stats), 0, sizeof(t->stats));
2125 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2128 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2130 struct trie_stat *stat;
2132 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2136 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2137 sizeof(struct leaf), sizeof(struct tnode));
2140 seq_printf(seq, "Local:\n");
2141 trie_collect_stats(trie_local, stat);
2142 trie_show_stats(seq, stat);
2146 seq_printf(seq, "Main:\n");
2147 trie_collect_stats(trie_main, stat);
2148 trie_show_stats(seq, stat);
2155 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2157 return single_open(file, fib_triestat_seq_show, NULL);
2160 static struct file_operations fib_triestat_fops = {
2161 .owner = THIS_MODULE,
2162 .open = fib_triestat_seq_open,
2164 .llseek = seq_lseek,
2165 .release = single_release,
2168 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2174 for (n = fib_trie_get_first(iter, trie_local);
2175 n; ++idx, n = fib_trie_get_next(iter)) {
2180 for (n = fib_trie_get_first(iter, trie_main);
2181 n; ++idx, n = fib_trie_get_next(iter)) {
2188 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2192 return SEQ_START_TOKEN;
2193 return fib_trie_get_idx(seq->private, *pos - 1);
2196 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2198 struct fib_trie_iter *iter = seq->private;
2202 if (v == SEQ_START_TOKEN)
2203 return fib_trie_get_idx(iter, 0);
2205 v = fib_trie_get_next(iter);
2210 /* continue scan in next trie */
2211 if (iter->trie == trie_local)
2212 return fib_trie_get_first(iter, trie_main);
2217 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2222 static void seq_indent(struct seq_file *seq, int n)
2224 while (n-- > 0) seq_puts(seq, " ");
2227 static inline const char *rtn_scope(enum rt_scope_t s)
2229 static char buf[32];
2232 case RT_SCOPE_UNIVERSE: return "universe";
2233 case RT_SCOPE_SITE: return "site";
2234 case RT_SCOPE_LINK: return "link";
2235 case RT_SCOPE_HOST: return "host";
2236 case RT_SCOPE_NOWHERE: return "nowhere";
2238 snprintf(buf, sizeof(buf), "scope=%d", s);
2243 static const char *rtn_type_names[__RTN_MAX] = {
2244 [RTN_UNSPEC] = "UNSPEC",
2245 [RTN_UNICAST] = "UNICAST",
2246 [RTN_LOCAL] = "LOCAL",
2247 [RTN_BROADCAST] = "BROADCAST",
2248 [RTN_ANYCAST] = "ANYCAST",
2249 [RTN_MULTICAST] = "MULTICAST",
2250 [RTN_BLACKHOLE] = "BLACKHOLE",
2251 [RTN_UNREACHABLE] = "UNREACHABLE",
2252 [RTN_PROHIBIT] = "PROHIBIT",
2253 [RTN_THROW] = "THROW",
2255 [RTN_XRESOLVE] = "XRESOLVE",
2258 static inline const char *rtn_type(unsigned t)
2260 static char buf[32];
2262 if (t < __RTN_MAX && rtn_type_names[t])
2263 return rtn_type_names[t];
2264 snprintf(buf, sizeof(buf), "type %d", t);
2268 /* Pretty print the trie */
2269 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2271 const struct fib_trie_iter *iter = seq->private;
2274 if (v == SEQ_START_TOKEN)
2278 struct tnode *tn = (struct tnode *) n;
2279 t_key prf = ntohl(MASK_PFX(tn->key, tn->pos));
2281 if (!NODE_PARENT(n)) {
2282 if (iter->trie == trie_local)
2283 seq_puts(seq, "<local>:\n");
2285 seq_puts(seq, "<main>:\n");
2287 seq_indent(seq, iter->depth-1);
2288 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2289 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2290 tn->empty_children);
2293 struct leaf *l = (struct leaf *) n;
2295 u32 val = ntohl(l->key);
2297 seq_indent(seq, iter->depth);
2298 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2299 for (i = 32; i >= 0; i--) {
2300 struct leaf_info *li = find_leaf_info(l, i);
2302 struct fib_alias *fa;
2303 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2304 seq_indent(seq, iter->depth+1);
2305 seq_printf(seq, " /%d %s %s", i,
2306 rtn_scope(fa->fa_scope),
2307 rtn_type(fa->fa_type));
2309 seq_printf(seq, "tos =%d\n",
2311 seq_putc(seq, '\n');
2320 static struct seq_operations fib_trie_seq_ops = {
2321 .start = fib_trie_seq_start,
2322 .next = fib_trie_seq_next,
2323 .stop = fib_trie_seq_stop,
2324 .show = fib_trie_seq_show,
2327 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2329 struct seq_file *seq;
2331 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2336 rc = seq_open(file, &fib_trie_seq_ops);
2340 seq = file->private_data;
2342 memset(s, 0, sizeof(*s));
2350 static struct file_operations fib_trie_fops = {
2351 .owner = THIS_MODULE,
2352 .open = fib_trie_seq_open,
2354 .llseek = seq_lseek,
2355 .release = seq_release_private,
2358 static unsigned fib_flag_trans(int type, u32 mask, const struct fib_info *fi)
2360 static unsigned type2flags[RTN_MAX + 1] = {
2361 [7] = RTF_REJECT, [8] = RTF_REJECT,
2363 unsigned flags = type2flags[type];
2365 if (fi && fi->fib_nh->nh_gw)
2366 flags |= RTF_GATEWAY;
2367 if (mask == 0xFFFFFFFF)
2374 * This outputs /proc/net/route.
2375 * The format of the file is not supposed to be changed
2376 * and needs to be same as fib_hash output to avoid breaking
2379 static int fib_route_seq_show(struct seq_file *seq, void *v)
2381 const struct fib_trie_iter *iter = seq->private;
2386 if (v == SEQ_START_TOKEN) {
2387 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2388 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2393 if (iter->trie == trie_local)
2398 for (i=32; i>=0; i--) {
2399 struct leaf_info *li = find_leaf_info(l, i);
2400 struct fib_alias *fa;
2406 mask = inet_make_mask(li->plen);
2407 prefix = htonl(l->key);
2409 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2410 const struct fib_info *fi = fa->fa_info;
2411 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2413 if (fa->fa_type == RTN_BROADCAST
2414 || fa->fa_type == RTN_MULTICAST)
2418 snprintf(bf, sizeof(bf),
2419 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2420 fi->fib_dev ? fi->fib_dev->name : "*",
2422 fi->fib_nh->nh_gw, flags, 0, 0,
2425 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2429 snprintf(bf, sizeof(bf),
2430 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2431 prefix, 0, flags, 0, 0, 0,
2434 seq_printf(seq, "%-127s\n", bf);
2441 static struct seq_operations fib_route_seq_ops = {
2442 .start = fib_trie_seq_start,
2443 .next = fib_trie_seq_next,
2444 .stop = fib_trie_seq_stop,
2445 .show = fib_route_seq_show,
2448 static int fib_route_seq_open(struct inode *inode, struct file *file)
2450 struct seq_file *seq;
2452 struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
2457 rc = seq_open(file, &fib_route_seq_ops);
2461 seq = file->private_data;
2463 memset(s, 0, sizeof(*s));
2471 static struct file_operations fib_route_fops = {
2472 .owner = THIS_MODULE,
2473 .open = fib_route_seq_open,
2475 .llseek = seq_lseek,
2476 .release = seq_release_private,
2479 int __init fib_proc_init(void)
2481 if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_fops))
2484 if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_fops))
2487 if (!proc_net_fops_create("route", S_IRUGO, &fib_route_fops))
2493 proc_net_remove("fib_triestat");
2495 proc_net_remove("fib_trie");
2500 void __init fib_proc_exit(void)
2502 proc_net_remove("fib_trie");
2503 proc_net_remove("fib_triestat");
2504 proc_net_remove("route");
2507 #endif /* CONFIG_PROC_FS */