2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * DECnet Routing Forwarding Information Base (Routing Tables)
8 * Author: Steve Whitehouse <SteveW@ACM.org>
9 * Mostly copied from the IPv4 routing code
15 #include <linux/string.h>
16 #include <linux/net.h>
17 #include <linux/socket.h>
18 #include <linux/sockios.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/netlink.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netdevice.h>
25 #include <linux/timer.h>
26 #include <linux/spinlock.h>
27 #include <asm/atomic.h>
28 #include <asm/uaccess.h>
29 #include <linux/route.h> /* RTF_xxx */
30 #include <net/neighbour.h>
33 #include <net/fib_rules.h>
35 #include <net/dn_route.h>
36 #include <net/dn_fib.h>
37 #include <net/dn_neigh.h>
38 #include <net/dn_dev.h>
42 struct dn_zone *dz_next;
43 struct dn_fib_node **dz_hash;
47 #define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
50 #define DZ_MASK(dz) ((dz)->dz_mask)
55 struct dn_zone *dh_zones[17];
56 struct dn_zone *dh_zone_list;
59 #define dz_key_0(key) ((key).datum = 0)
60 #define dz_prefix(key,dz) ((key).datum)
62 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
63 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
65 #define endfor_nexthops(fi) }
67 #define DN_MAX_DIVISOR 1024
69 #define DN_S_ACCESSED 2
71 #define DN_FIB_SCAN(f, fp) \
72 for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
74 #define DN_FIB_SCAN_KEY(f, fp, key) \
75 for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
77 #define RT_TABLE_MIN 1
78 #define DN_FIB_TABLE_HASHSZ 256
79 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
80 static DEFINE_RWLOCK(dn_fib_tables_lock);
82 static struct kmem_cache *dn_hash_kmem __read_mostly;
83 static int dn_fib_hash_zombies;
85 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
87 u16 h = dn_ntohs(key.datum)>>(16 - dz->dz_order);
91 return *(dn_fib_idx_t *)&h;
94 static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz)
97 k.datum = dst & DZ_MASK(dz);
101 static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
103 return &dz->dz_hash[dn_hash(key, dz).datum];
106 static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
108 return dz->dz_hash[dn_hash(key, dz).datum];
111 static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
113 return a.datum == b.datum;
116 static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
118 return a.datum <= b.datum;
121 static inline void dn_rebuild_zone(struct dn_zone *dz,
122 struct dn_fib_node **old_ht,
126 struct dn_fib_node *f, **fp, *next;
128 for(i = 0; i < old_divisor; i++) {
129 for(f = old_ht[i]; f; f = f->fn_next) {
131 for(fp = dn_chain_p(f->fn_key, dz);
132 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
133 fp = &(*fp)->fn_next)
141 static void dn_rehash_zone(struct dn_zone *dz)
143 struct dn_fib_node **ht, **old_ht;
144 int old_divisor, new_divisor;
147 old_divisor = dz->dz_divisor;
149 switch(old_divisor) {
155 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor);
158 new_hashmask = 0x3FF;
162 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
166 write_lock_bh(&dn_fib_tables_lock);
167 old_ht = dz->dz_hash;
169 dz->dz_hashmask = new_hashmask;
170 dz->dz_divisor = new_divisor;
171 dn_rebuild_zone(dz, old_ht, old_divisor);
172 write_unlock_bh(&dn_fib_tables_lock);
176 static void dn_free_node(struct dn_fib_node *f)
178 dn_fib_release_info(DN_FIB_INFO(f));
179 kmem_cache_free(dn_hash_kmem, f);
183 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
186 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
192 dz->dz_hashmask = 0x0F;
198 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
205 dz->dz_mask = dnet_make_mask(z);
207 for(i = z + 1; i <= 16; i++)
208 if (table->dh_zones[i])
211 write_lock_bh(&dn_fib_tables_lock);
213 dz->dz_next = table->dh_zone_list;
214 table->dh_zone_list = dz;
216 dz->dz_next = table->dh_zones[i]->dz_next;
217 table->dh_zones[i]->dz_next = dz;
219 table->dh_zones[z] = dz;
220 write_unlock_bh(&dn_fib_tables_lock);
225 static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern_rta *rta, struct dn_fib_info *fi)
227 struct rtnexthop *nhp;
230 if (rta->rta_priority && *rta->rta_priority != fi->fib_priority)
233 if (rta->rta_oif || rta->rta_gw) {
234 if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) &&
235 (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 2) == 0))
240 if (rta->rta_mp == NULL)
243 nhp = RTA_DATA(rta->rta_mp);
244 nhlen = RTA_PAYLOAD(rta->rta_mp);
247 int attrlen = nhlen - sizeof(struct rtnexthop);
250 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
252 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
255 gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);
257 if (gw && gw != nh->nh_gw)
260 nhp = RTNH_NEXT(nhp);
261 } endfor_nexthops(fi);
266 static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
268 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
269 + nla_total_size(4) /* RTA_TABLE */
270 + nla_total_size(2) /* RTA_DST */
271 + nla_total_size(4); /* RTA_PRIORITY */
273 /* space for nested metrics */
274 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
277 /* Also handles the special case fib_nhs == 1 */
279 /* each nexthop is packed in an attribute */
280 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
282 /* may contain a gateway attribute */
283 nhsize += nla_total_size(4);
285 /* all nexthops are packed in a nested attribute */
286 payload += nla_total_size(fi->fib_nhs * nhsize);
292 static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
293 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
294 struct dn_fib_info *fi, unsigned int flags)
297 struct nlmsghdr *nlh;
298 unsigned char *b = skb->tail;
300 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
301 rtm = NLMSG_DATA(nlh);
302 rtm->rtm_family = AF_DECnet;
303 rtm->rtm_dst_len = dst_len;
304 rtm->rtm_src_len = 0;
306 rtm->rtm_table = tb_id;
307 RTA_PUT_U32(skb, RTA_TABLE, tb_id);
308 rtm->rtm_flags = fi->fib_flags;
309 rtm->rtm_scope = scope;
310 rtm->rtm_type = type;
311 if (rtm->rtm_dst_len)
312 RTA_PUT(skb, RTA_DST, 2, dst);
313 rtm->rtm_protocol = fi->fib_protocol;
314 if (fi->fib_priority)
315 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
316 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
318 if (fi->fib_nhs == 1) {
319 if (fi->fib_nh->nh_gw)
320 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
321 if (fi->fib_nh->nh_oif)
322 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
324 if (fi->fib_nhs > 1) {
325 struct rtnexthop *nhp;
326 struct rtattr *mp_head;
327 if (skb_tailroom(skb) <= RTA_SPACE(0))
329 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
332 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
334 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
335 nhp->rtnh_flags = nh->nh_flags & 0xFF;
336 nhp->rtnh_hops = nh->nh_weight - 1;
337 nhp->rtnh_ifindex = nh->nh_oif;
339 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
340 nhp->rtnh_len = skb->tail - (unsigned char *)nhp;
341 } endfor_nexthops(fi);
342 mp_head->rta_type = RTA_MULTIPATH;
343 mp_head->rta_len = skb->tail - (u8*)mp_head;
346 nlh->nlmsg_len = skb->tail - b;
352 skb_trim(skb, b - skb->data);
357 static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
358 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
361 u32 pid = req ? req->pid : 0;
364 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
368 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
369 f->fn_type, f->fn_scope, &f->fn_key, z,
371 /* failure implies BUG in dn_fib_nlmsg_size() */
374 err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
377 rtnl_set_sk_err(RTNLGRP_DECnet_ROUTE, err);
380 static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
381 struct netlink_callback *cb,
382 struct dn_fib_table *tb,
384 struct dn_fib_node *f)
389 for(i = 0; f; i++, f = f->fn_next) {
392 if (f->fn_state & DN_S_ZOMBIE)
394 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
398 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
399 f->fn_scope, &f->fn_key, dz->dz_order,
400 f->fn_info, NLM_F_MULTI) < 0) {
409 static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
410 struct netlink_callback *cb,
411 struct dn_fib_table *tb,
417 for(h = 0; h < dz->dz_divisor; h++) {
421 memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0]));
422 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
424 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
433 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
434 struct netlink_callback *cb)
438 struct dn_hash *table = (struct dn_hash *)tb->data;
441 read_lock(&dn_fib_tables_lock);
442 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) {
446 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
448 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
450 read_unlock(&dn_fib_tables_lock);
454 read_unlock(&dn_fib_tables_lock);
460 int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
463 unsigned int e = 0, s_e;
464 struct dn_fib_table *tb;
465 struct hlist_node *node;
468 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
469 ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)
470 return dn_cache_dump(skb, cb);
475 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
477 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) {
481 memset(&cb->args[2], 0, sizeof(cb->args) -
482 2 * sizeof(cb->args[0]));
483 if (tb->dump(tb, skb, cb) < 0)
497 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req)
499 struct dn_hash *table = (struct dn_hash *)tb->data;
500 struct dn_fib_node *new_f, *f, **fp, **del_fp;
502 struct dn_fib_info *fi;
503 int z = r->rtm_dst_len;
504 int type = r->rtm_type;
511 dz = table->dh_zones[z];
512 if (!dz && !(dz = dn_new_zone(table, z)))
518 memcpy(&dst, rta->rta_dst, 2);
519 if (dst & ~DZ_MASK(dz))
521 key = dz_key(dst, dz);
524 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL)
527 if (dz->dz_nent > (dz->dz_divisor << 2) &&
528 dz->dz_divisor > DN_MAX_DIVISOR &&
529 (z==16 || (1<<z) > dz->dz_divisor))
532 fp = dn_chain_p(key, dz);
535 if (dn_key_leq(key, f->fn_key))
541 if (f && (f->fn_state & DN_S_ZOMBIE) &&
542 dn_key_eq(f->fn_key, key)) {
549 DN_FIB_SCAN_KEY(f, fp, key) {
550 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority)
554 if (f && dn_key_eq(f->fn_key, key) &&
555 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) {
556 struct dn_fib_node **ins_fp;
559 if (n->nlmsg_flags & NLM_F_EXCL)
562 if (n->nlmsg_flags & NLM_F_REPLACE) {
572 DN_FIB_SCAN_KEY(f, fp, key) {
573 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
575 if (f->fn_type == type && f->fn_scope == r->rtm_scope
576 && DN_FIB_INFO(f) == fi)
580 if (!(n->nlmsg_flags & NLM_F_APPEND)) {
588 if (!(n->nlmsg_flags & NLM_F_CREATE))
593 new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
597 memset(new_f, 0, sizeof(struct dn_fib_node));
600 new_f->fn_type = type;
601 new_f->fn_scope = r->rtm_scope;
602 DN_FIB_INFO(new_f) = fi;
605 write_lock_bh(&dn_fib_tables_lock);
607 write_unlock_bh(&dn_fib_tables_lock);
612 write_lock_bh(&dn_fib_tables_lock);
613 *del_fp = f->fn_next;
614 write_unlock_bh(&dn_fib_tables_lock);
616 if (!(f->fn_state & DN_S_ZOMBIE))
617 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
618 if (f->fn_state & DN_S_ACCESSED)
619 dn_rt_cache_flush(-1);
623 dn_rt_cache_flush(-1);
626 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
630 dn_fib_release_info(fi);
635 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req)
637 struct dn_hash *table = (struct dn_hash*)tb->data;
638 struct dn_fib_node **fp, **del_fp, *f;
639 int z = r->rtm_dst_len;
648 if ((dz = table->dh_zones[z]) == NULL)
654 memcpy(&dst, rta->rta_dst, 2);
655 if (dst & ~DZ_MASK(dz))
657 key = dz_key(dst, dz);
660 fp = dn_chain_p(key, dz);
663 if (dn_key_eq(f->fn_key, key))
665 if (dn_key_leq(key, f->fn_key))
671 DN_FIB_SCAN_KEY(f, fp, key) {
672 struct dn_fib_info *fi = DN_FIB_INFO(f);
674 if (f->fn_state & DN_S_ZOMBIE)
679 if (del_fp == NULL &&
680 (!r->rtm_type || f->fn_type == r->rtm_type) &&
681 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
683 fi->fib_protocol == r->rtm_protocol) &&
684 dn_fib_nh_match(r, n, rta, fi) == 0)
690 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
693 write_lock_bh(&dn_fib_tables_lock);
694 *del_fp = f->fn_next;
695 write_unlock_bh(&dn_fib_tables_lock);
697 if (f->fn_state & DN_S_ACCESSED)
698 dn_rt_cache_flush(-1);
702 f->fn_state |= DN_S_ZOMBIE;
703 if (f->fn_state & DN_S_ACCESSED) {
704 f->fn_state &= ~DN_S_ACCESSED;
705 dn_rt_cache_flush(-1);
707 if (++dn_fib_hash_zombies > 128)
717 static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
720 struct dn_fib_node *f;
722 while((f = *fp) != NULL) {
723 struct dn_fib_info *fi = DN_FIB_INFO(f);
725 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) {
726 write_lock_bh(&dn_fib_tables_lock);
728 write_unlock_bh(&dn_fib_tables_lock);
740 static int dn_fib_table_flush(struct dn_fib_table *tb)
742 struct dn_hash *table = (struct dn_hash *)tb->data;
746 dn_fib_hash_zombies = 0;
747 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) {
750 for(i = dz->dz_divisor-1; i >= 0; i--)
751 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table);
759 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
763 struct dn_hash *t = (struct dn_hash *)tb->data;
765 read_lock(&dn_fib_tables_lock);
766 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
767 struct dn_fib_node *f;
768 dn_fib_key_t k = dz_key(flp->fld_dst, dz);
770 for(f = dz_chain(k, dz); f; f = f->fn_next) {
771 if (!dn_key_eq(k, f->fn_key)) {
772 if (dn_key_leq(k, f->fn_key))
778 f->fn_state |= DN_S_ACCESSED;
780 if (f->fn_state&DN_S_ZOMBIE)
783 if (f->fn_scope < flp->fld_scope)
786 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
789 res->type = f->fn_type;
790 res->scope = f->fn_scope;
791 res->prefixlen = dz->dz_order;
800 read_unlock(&dn_fib_tables_lock);
805 struct dn_fib_table *dn_fib_get_table(u32 n, int create)
807 struct dn_fib_table *t;
808 struct hlist_node *node;
811 if (n < RT_TABLE_MIN)
814 if (n > RT_TABLE_MAX)
817 h = n & (DN_FIB_TABLE_HASHSZ - 1);
819 hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) {
830 if (in_interrupt() && net_ratelimit()) {
831 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
835 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
841 t->insert = dn_fib_table_insert;
842 t->delete = dn_fib_table_delete;
843 t->lookup = dn_fib_table_lookup;
844 t->flush = dn_fib_table_flush;
845 t->dump = dn_fib_table_dump;
846 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
851 struct dn_fib_table *dn_fib_empty_table(void)
855 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
856 if (dn_fib_get_table(id, 0) == NULL)
857 return dn_fib_get_table(id, 1);
861 void dn_fib_flush(void)
864 struct dn_fib_table *tb;
865 struct hlist_node *node;
868 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
869 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
870 flushed += tb->flush(tb);
874 dn_rt_cache_flush(-1);
877 void __init dn_fib_table_init(void)
879 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
880 sizeof(struct dn_fib_info),
881 0, SLAB_HWCACHE_ALIGN,
885 void __exit dn_fib_table_cleanup(void)
887 struct dn_fib_table *t;
888 struct hlist_node *node, *next;
891 write_lock(&dn_fib_tables_lock);
892 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
893 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
895 hlist_del(&t->hlist);
899 write_unlock(&dn_fib_tables_lock);