2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/if_ether.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/notifier.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
39 1. For now we assume that route tags < 256.
40 It allows to use direct table lookups, instead of hash tables.
41 2. For now we assume that "from TAG" and "fromdev DEV" statements
42 are mutually exclusive.
43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
48 struct route4_filter *filter;
55 struct route4_fastmap fastmap[16];
56 struct route4_bucket *table[256+1];
61 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 struct route4_filter *ht[16+16+1];
67 struct route4_filter *next;
71 struct tcf_result res;
74 struct route4_bucket *bkt;
77 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
79 static struct tcf_ext_map route_ext_map = {
80 .police = TCA_ROUTE4_POLICE,
81 .action = TCA_ROUTE4_ACT
84 static __inline__ int route4_fastmap_hash(u32 id, int iif)
90 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
92 spin_lock_bh(&dev->queue_lock);
93 memset(head->fastmap, 0, sizeof(head->fastmap));
94 spin_unlock_bh(&dev->queue_lock);
97 static void __inline__
98 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
99 struct route4_filter *f)
101 int h = route4_fastmap_hash(id, iif);
102 head->fastmap[h].id = id;
103 head->fastmap[h].iif = iif;
104 head->fastmap[h].filter = f;
107 static __inline__ int route4_hash_to(u32 id)
112 static __inline__ int route4_hash_from(u32 id)
117 static __inline__ int route4_hash_iif(int iif)
119 return 16 + ((iif>>16)&0xF);
122 static __inline__ int route4_hash_wild(void)
127 #define ROUTE4_APPLY_RESULT() \
130 if (tcf_exts_is_available(&f->exts)) { \
131 int r = tcf_exts_exec(skb, &f->exts, res); \
137 } else if (!dont_cache) \
138 route4_set_fastmap(head, id, iif, f); \
142 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 struct tcf_result *res)
145 struct route4_head *head = (struct route4_head*)tp->root;
146 struct dst_entry *dst;
147 struct route4_bucket *b;
148 struct route4_filter *f;
150 int iif, dont_cache = 0;
152 if ((dst = skb->dst) == NULL)
159 iif = ((struct rtable*)dst)->fl.iif;
161 h = route4_fastmap_hash(id, iif);
162 if (id == head->fastmap[h].id &&
163 iif == head->fastmap[h].iif &&
164 (f = head->fastmap[h].filter) != NULL) {
165 if (f == ROUTE4_FAILURE)
172 h = route4_hash_to(id);
175 if ((b = head->table[h]) != NULL) {
176 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
178 ROUTE4_APPLY_RESULT();
180 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
182 ROUTE4_APPLY_RESULT();
184 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
185 ROUTE4_APPLY_RESULT();
195 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
200 if (id && (TC_H_MAJ(id) == 0 ||
201 !(TC_H_MAJ(id^tp->q->handle)))) {
209 static inline u32 to_hash(u32 id)
217 static inline u32 from_hash(u32 id)
222 if (!(id & 0x8000)) {
227 return 16 + (id&0xF);
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
232 struct route4_head *head = (struct route4_head*)tp->root;
233 struct route4_bucket *b;
234 struct route4_filter *f;
240 h1 = to_hash(handle);
244 h2 = from_hash(handle>>16);
248 if ((b = head->table[h1]) != NULL) {
249 for (f = b->ht[h2]; f; f = f->next)
250 if (f->handle == handle)
251 return (unsigned long)f;
256 static void route4_put(struct tcf_proto *tp, unsigned long f)
260 static int route4_init(struct tcf_proto *tp)
266 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
268 tcf_unbind_filter(tp, &f->res);
269 tcf_exts_destroy(tp, &f->exts);
273 static void route4_destroy(struct tcf_proto *tp)
275 struct route4_head *head = xchg(&tp->root, NULL);
281 for (h1=0; h1<=256; h1++) {
282 struct route4_bucket *b;
284 if ((b = head->table[h1]) != NULL) {
285 for (h2=0; h2<=32; h2++) {
286 struct route4_filter *f;
288 while ((f = b->ht[h2]) != NULL) {
290 route4_delete_filter(tp, f);
299 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
301 struct route4_head *head = (struct route4_head*)tp->root;
302 struct route4_filter **fp, *f = (struct route4_filter*)arg;
304 struct route4_bucket *b;
313 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
319 route4_reset_fastmap(tp->q->dev, head, f->id);
320 route4_delete_filter(tp, f);
324 for (i=0; i<=32; i++)
328 /* OK, session has no flows */
330 head->table[to_hash(h)] = NULL;
340 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
341 struct route4_filter *f, u32 handle, struct route4_head *head,
342 struct rtattr **tb, struct rtattr *est, int new)
345 u32 id = 0, to = 0, nhandle = 0x8000;
346 struct route4_filter *fp;
348 struct route4_bucket *b;
351 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
356 if (tb[TCA_ROUTE4_CLASSID-1])
357 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
360 if (tb[TCA_ROUTE4_TO-1]) {
361 if (new && handle & 0x8000)
363 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
365 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
371 if (tb[TCA_ROUTE4_FROM-1]) {
372 if (tb[TCA_ROUTE4_IIF-1])
374 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
376 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
380 } else if (tb[TCA_ROUTE4_IIF-1]) {
381 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
383 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
386 nhandle |= (id | 0x8000) << 16;
388 nhandle |= 0xFFFF << 16;
391 nhandle |= handle & 0x7F00;
392 if (nhandle != handle)
396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) {
399 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
402 memset(b, 0, sizeof(*b));
408 unsigned int h2 = from_hash(nhandle >> 16);
410 for (fp = b->ht[h2]; fp; fp = fp->next)
411 if (fp->handle == f->handle)
416 if (tb[TCA_ROUTE4_TO-1])
419 if (tb[TCA_ROUTE4_FROM-1])
421 else if (tb[TCA_ROUTE4_IIF-1])
428 if (tb[TCA_ROUTE4_CLASSID-1]) {
429 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
430 tcf_bind_filter(tp, &f->res, base);
433 tcf_exts_change(tp, &f->exts, &e);
437 tcf_exts_destroy(tp, &e);
441 static int route4_change(struct tcf_proto *tp, unsigned long base,
446 struct route4_head *head = tp->root;
447 struct route4_filter *f, *f1, **fp;
448 struct route4_bucket *b;
449 struct rtattr *opt = tca[TCA_OPTIONS-1];
450 struct rtattr *tb[TCA_ROUTE4_MAX];
456 return handle ? -EINVAL : 0;
458 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
461 if ((f = (struct route4_filter*)*arg) != NULL) {
462 if (f->handle != handle && handle)
466 old_handle = f->handle;
468 err = route4_set_parms(tp, base, f, handle, head, tb,
478 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
481 memset(head, 0, sizeof(struct route4_head));
488 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
491 memset(f, 0, sizeof(*f));
493 err = route4_set_parms(tp, base, f, handle, head, tb,
499 h = from_hash(f->handle >> 16);
500 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
501 if (f->handle < f1->handle)
508 if (old_handle && f->handle != old_handle) {
509 th = to_hash(old_handle);
510 h = from_hash(old_handle >> 16);
511 if ((b = head->table[th]) != NULL) {
512 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
522 route4_reset_fastmap(tp->q->dev, head, f->id);
523 *arg = (unsigned long)f;
531 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
533 struct route4_head *head = tp->root;
542 for (h = 0; h <= 256; h++) {
543 struct route4_bucket *b = head->table[h];
546 for (h1 = 0; h1 <= 32; h1++) {
547 struct route4_filter *f;
549 for (f = b->ht[h1]; f; f = f->next) {
550 if (arg->count < arg->skip) {
554 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
565 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
566 struct sk_buff *skb, struct tcmsg *t)
568 struct route4_filter *f = (struct route4_filter*)fh;
569 unsigned char *b = skb->tail;
576 t->tcm_handle = f->handle;
578 rta = (struct rtattr*)b;
579 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
581 if (!(f->handle&0x8000)) {
583 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
585 if (f->handle&0x80000000) {
586 if ((f->handle>>16) != 0xFFFF)
587 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
590 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
593 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
595 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
598 rta->rta_len = skb->tail - b;
600 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
606 skb_trim(skb, b - skb->data);
610 static struct tcf_proto_ops cls_route4_ops = {
613 .classify = route4_classify,
615 .destroy = route4_destroy,
618 .change = route4_change,
619 .delete = route4_delete,
622 .owner = THIS_MODULE,
625 static int __init init_route4(void)
627 return register_tcf_proto_ops(&cls_route4_ops);
630 static void __exit exit_route4(void)
632 unregister_tcf_proto_ops(&cls_route4_ops);
635 module_init(init_route4)
636 module_exit(exit_route4)
637 MODULE_LICENSE("GPL");