2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <linux/bitops.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/if_ether.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/notifier.h>
33 #include <net/route.h>
34 #include <linux/skbuff.h>
36 #include <net/act_api.h>
37 #include <net/pkt_cls.h>
40 1. For now we assume that route tags < 256.
41 It allows to use direct table lookups, instead of hash tables.
42 2. For now we assume that "from TAG" and "fromdev DEV" statements
43 are mutually exclusive.
44 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
49 struct route4_filter *filter;
56 struct route4_fastmap fastmap[16];
57 struct route4_bucket *table[256+1];
62 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
63 struct route4_filter *ht[16+16+1];
68 struct route4_filter *next;
72 struct tcf_result res;
75 struct route4_bucket *bkt;
78 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
80 static struct tcf_ext_map route_ext_map = {
81 .police = TCA_ROUTE4_POLICE,
82 .action = TCA_ROUTE4_ACT
85 static __inline__ int route4_fastmap_hash(u32 id, int iif)
91 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
93 spin_lock_bh(&dev->queue_lock);
94 memset(head->fastmap, 0, sizeof(head->fastmap));
95 spin_unlock_bh(&dev->queue_lock);
98 static void __inline__
99 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
100 struct route4_filter *f)
102 int h = route4_fastmap_hash(id, iif);
103 head->fastmap[h].id = id;
104 head->fastmap[h].iif = iif;
105 head->fastmap[h].filter = f;
108 static __inline__ int route4_hash_to(u32 id)
113 static __inline__ int route4_hash_from(u32 id)
118 static __inline__ int route4_hash_iif(int iif)
120 return 16 + ((iif>>16)&0xF);
123 static __inline__ int route4_hash_wild(void)
128 #define ROUTE4_APPLY_RESULT() \
131 if (tcf_exts_is_available(&f->exts)) { \
132 int r = tcf_exts_exec(skb, &f->exts, res); \
138 } else if (!dont_cache) \
139 route4_set_fastmap(head, id, iif, f); \
143 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
144 struct tcf_result *res)
146 struct route4_head *head = (struct route4_head*)tp->root;
147 struct dst_entry *dst;
148 struct route4_bucket *b;
149 struct route4_filter *f;
151 int iif, dont_cache = 0;
153 if ((dst = skb->dst) == NULL)
160 iif = ((struct rtable*)dst)->fl.iif;
162 h = route4_fastmap_hash(id, iif);
163 if (id == head->fastmap[h].id &&
164 iif == head->fastmap[h].iif &&
165 (f = head->fastmap[h].filter) != NULL) {
166 if (f == ROUTE4_FAILURE)
173 h = route4_hash_to(id);
176 if ((b = head->table[h]) != NULL) {
177 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
179 ROUTE4_APPLY_RESULT();
181 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
183 ROUTE4_APPLY_RESULT();
185 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
186 ROUTE4_APPLY_RESULT();
196 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
201 if (id && (TC_H_MAJ(id) == 0 ||
202 !(TC_H_MAJ(id^tp->q->handle)))) {
210 static inline u32 to_hash(u32 id)
218 static inline u32 from_hash(u32 id)
223 if (!(id & 0x8000)) {
228 return 16 + (id&0xF);
231 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
233 struct route4_head *head = (struct route4_head*)tp->root;
234 struct route4_bucket *b;
235 struct route4_filter *f;
241 h1 = to_hash(handle);
245 h2 = from_hash(handle>>16);
249 if ((b = head->table[h1]) != NULL) {
250 for (f = b->ht[h2]; f; f = f->next)
251 if (f->handle == handle)
252 return (unsigned long)f;
257 static void route4_put(struct tcf_proto *tp, unsigned long f)
261 static int route4_init(struct tcf_proto *tp)
267 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
269 tcf_unbind_filter(tp, &f->res);
270 tcf_exts_destroy(tp, &f->exts);
274 static void route4_destroy(struct tcf_proto *tp)
276 struct route4_head *head = xchg(&tp->root, NULL);
282 for (h1=0; h1<=256; h1++) {
283 struct route4_bucket *b;
285 if ((b = head->table[h1]) != NULL) {
286 for (h2=0; h2<=32; h2++) {
287 struct route4_filter *f;
289 while ((f = b->ht[h2]) != NULL) {
291 route4_delete_filter(tp, f);
300 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
302 struct route4_head *head = (struct route4_head*)tp->root;
303 struct route4_filter **fp, *f = (struct route4_filter*)arg;
305 struct route4_bucket *b;
314 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
320 route4_reset_fastmap(tp->q->dev, head, f->id);
321 route4_delete_filter(tp, f);
325 for (i=0; i<=32; i++)
329 /* OK, session has no flows */
331 head->table[to_hash(h)] = NULL;
341 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
342 struct route4_filter *f, u32 handle, struct route4_head *head,
343 struct rtattr **tb, struct rtattr *est, int new)
346 u32 id = 0, to = 0, nhandle = 0x8000;
347 struct route4_filter *fp;
349 struct route4_bucket *b;
352 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
357 if (tb[TCA_ROUTE4_CLASSID-1])
358 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
361 if (tb[TCA_ROUTE4_TO-1]) {
362 if (new && handle & 0x8000)
364 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
366 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
372 if (tb[TCA_ROUTE4_FROM-1]) {
373 if (tb[TCA_ROUTE4_IIF-1])
375 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
377 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
381 } else if (tb[TCA_ROUTE4_IIF-1]) {
382 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
384 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
387 nhandle |= (id | 0x8000) << 16;
389 nhandle |= 0xFFFF << 16;
392 nhandle |= handle & 0x7F00;
393 if (nhandle != handle)
397 h1 = to_hash(nhandle);
398 if ((b = head->table[h1]) == NULL) {
400 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
403 memset(b, 0, sizeof(*b));
409 unsigned int h2 = from_hash(nhandle >> 16);
411 for (fp = b->ht[h2]; fp; fp = fp->next)
412 if (fp->handle == f->handle)
417 if (tb[TCA_ROUTE4_TO-1])
420 if (tb[TCA_ROUTE4_FROM-1])
422 else if (tb[TCA_ROUTE4_IIF-1])
429 if (tb[TCA_ROUTE4_CLASSID-1]) {
430 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
431 tcf_bind_filter(tp, &f->res, base);
434 tcf_exts_change(tp, &f->exts, &e);
438 tcf_exts_destroy(tp, &e);
442 static int route4_change(struct tcf_proto *tp, unsigned long base,
447 struct route4_head *head = tp->root;
448 struct route4_filter *f, *f1, **fp;
449 struct route4_bucket *b;
450 struct rtattr *opt = tca[TCA_OPTIONS-1];
451 struct rtattr *tb[TCA_ROUTE4_MAX];
457 return handle ? -EINVAL : 0;
459 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
462 if ((f = (struct route4_filter*)*arg) != NULL) {
463 if (f->handle != handle && handle)
467 old_handle = f->handle;
469 err = route4_set_parms(tp, base, f, handle, head, tb,
479 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
482 memset(head, 0, sizeof(struct route4_head));
489 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
492 memset(f, 0, sizeof(*f));
494 err = route4_set_parms(tp, base, f, handle, head, tb,
500 h = from_hash(f->handle >> 16);
501 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
502 if (f->handle < f1->handle)
509 if (old_handle && f->handle != old_handle) {
510 th = to_hash(old_handle);
511 h = from_hash(old_handle >> 16);
512 if ((b = head->table[th]) != NULL) {
513 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
523 route4_reset_fastmap(tp->q->dev, head, f->id);
524 *arg = (unsigned long)f;
533 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
535 struct route4_head *head = tp->root;
544 for (h = 0; h <= 256; h++) {
545 struct route4_bucket *b = head->table[h];
548 for (h1 = 0; h1 <= 32; h1++) {
549 struct route4_filter *f;
551 for (f = b->ht[h1]; f; f = f->next) {
552 if (arg->count < arg->skip) {
556 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
567 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
568 struct sk_buff *skb, struct tcmsg *t)
570 struct route4_filter *f = (struct route4_filter*)fh;
571 unsigned char *b = skb->tail;
578 t->tcm_handle = f->handle;
580 rta = (struct rtattr*)b;
581 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
583 if (!(f->handle&0x8000)) {
585 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
587 if (f->handle&0x80000000) {
588 if ((f->handle>>16) != 0xFFFF)
589 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
592 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
595 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
597 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
600 rta->rta_len = skb->tail - b;
602 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
608 skb_trim(skb, b - skb->data);
612 static struct tcf_proto_ops cls_route4_ops = {
615 .classify = route4_classify,
617 .destroy = route4_destroy,
620 .change = route4_change,
621 .delete = route4_delete,
624 .owner = THIS_MODULE,
627 static int __init init_route4(void)
629 return register_tcf_proto_ops(&cls_route4_ops);
632 static void __exit exit_route4(void)
634 unregister_tcf_proto_ops(&cls_route4_ops);
637 module_init(init_route4)
638 module_exit(exit_route4)
639 MODULE_LICENSE("GPL");