2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
25 1. For now we assume that route tags < 256.
26 It allows to use direct table lookups, instead of hash tables.
27 2. For now we assume that "from TAG" and "fromdev DEV" statements
28 are mutually exclusive.
29 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
34 struct route4_filter *filter;
41 struct route4_fastmap fastmap[16];
42 struct route4_bucket *table[256+1];
47 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48 struct route4_filter *ht[16+16+1];
53 struct route4_filter *next;
57 struct tcf_result res;
60 struct route4_bucket *bkt;
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
65 static struct tcf_ext_map route_ext_map = {
66 .police = TCA_ROUTE4_POLICE,
67 .action = TCA_ROUTE4_ACT
70 static __inline__ int route4_fastmap_hash(u32 id, int iif)
76 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
79 memset(head->fastmap, 0, sizeof(head->fastmap));
80 qdisc_unlock_tree(dev);
84 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85 struct route4_filter *f)
87 int h = route4_fastmap_hash(id, iif);
88 head->fastmap[h].id = id;
89 head->fastmap[h].iif = iif;
90 head->fastmap[h].filter = f;
93 static __inline__ int route4_hash_to(u32 id)
98 static __inline__ int route4_hash_from(u32 id)
103 static __inline__ int route4_hash_iif(int iif)
105 return 16 + ((iif>>16)&0xF);
108 static __inline__ int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129 struct tcf_result *res)
131 struct route4_head *head = (struct route4_head*)tp->root;
132 struct dst_entry *dst;
133 struct route4_bucket *b;
134 struct route4_filter *f;
136 int iif, dont_cache = 0;
138 if ((dst = skb->dst) == NULL)
145 iif = ((struct rtable*)dst)->fl.iif;
147 h = route4_fastmap_hash(id, iif);
148 if (id == head->fastmap[h].id &&
149 iif == head->fastmap[h].iif &&
150 (f = head->fastmap[h].filter) != NULL) {
151 if (f == ROUTE4_FAILURE)
158 h = route4_hash_to(id);
161 if ((b = head->table[h]) != NULL) {
162 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
164 ROUTE4_APPLY_RESULT();
166 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
168 ROUTE4_APPLY_RESULT();
170 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171 ROUTE4_APPLY_RESULT();
181 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
186 if (id && (TC_H_MAJ(id) == 0 ||
187 !(TC_H_MAJ(id^tp->q->handle)))) {
195 static inline u32 to_hash(u32 id)
203 static inline u32 from_hash(u32 id)
208 if (!(id & 0x8000)) {
213 return 16 + (id&0xF);
216 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
218 struct route4_head *head = (struct route4_head*)tp->root;
219 struct route4_bucket *b;
220 struct route4_filter *f;
226 h1 = to_hash(handle);
230 h2 = from_hash(handle>>16);
234 if ((b = head->table[h1]) != NULL) {
235 for (f = b->ht[h2]; f; f = f->next)
236 if (f->handle == handle)
237 return (unsigned long)f;
242 static void route4_put(struct tcf_proto *tp, unsigned long f)
246 static int route4_init(struct tcf_proto *tp)
252 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
254 tcf_unbind_filter(tp, &f->res);
255 tcf_exts_destroy(tp, &f->exts);
259 static void route4_destroy(struct tcf_proto *tp)
261 struct route4_head *head = xchg(&tp->root, NULL);
267 for (h1=0; h1<=256; h1++) {
268 struct route4_bucket *b;
270 if ((b = head->table[h1]) != NULL) {
271 for (h2=0; h2<=32; h2++) {
272 struct route4_filter *f;
274 while ((f = b->ht[h2]) != NULL) {
276 route4_delete_filter(tp, f);
285 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
287 struct route4_head *head = (struct route4_head*)tp->root;
288 struct route4_filter **fp, *f = (struct route4_filter*)arg;
290 struct route4_bucket *b;
299 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
305 route4_reset_fastmap(tp->q->dev, head, f->id);
306 route4_delete_filter(tp, f);
310 for (i=0; i<=32; i++)
314 /* OK, session has no flows */
316 head->table[to_hash(h)] = NULL;
326 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
327 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
328 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
329 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
330 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
333 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
334 struct route4_filter *f, u32 handle, struct route4_head *head,
335 struct nlattr **tb, struct nlattr *est, int new)
338 u32 id = 0, to = 0, nhandle = 0x8000;
339 struct route4_filter *fp;
341 struct route4_bucket *b;
344 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
349 if (tb[TCA_ROUTE4_TO]) {
350 if (new && handle & 0x8000)
352 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
358 if (tb[TCA_ROUTE4_FROM]) {
359 if (tb[TCA_ROUTE4_IIF])
361 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
365 } else if (tb[TCA_ROUTE4_IIF]) {
366 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
369 nhandle |= (id | 0x8000) << 16;
371 nhandle |= 0xFFFF << 16;
374 nhandle |= handle & 0x7F00;
375 if (nhandle != handle)
379 h1 = to_hash(nhandle);
380 if ((b = head->table[h1]) == NULL) {
382 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
390 unsigned int h2 = from_hash(nhandle >> 16);
392 for (fp = b->ht[h2]; fp; fp = fp->next)
393 if (fp->handle == f->handle)
398 if (tb[TCA_ROUTE4_TO])
401 if (tb[TCA_ROUTE4_FROM])
403 else if (tb[TCA_ROUTE4_IIF])
410 if (tb[TCA_ROUTE4_CLASSID]) {
411 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
412 tcf_bind_filter(tp, &f->res, base);
415 tcf_exts_change(tp, &f->exts, &e);
419 tcf_exts_destroy(tp, &e);
423 static int route4_change(struct tcf_proto *tp, unsigned long base,
428 struct route4_head *head = tp->root;
429 struct route4_filter *f, *f1, **fp;
430 struct route4_bucket *b;
431 struct nlattr *opt = tca[TCA_OPTIONS];
432 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
438 return handle ? -EINVAL : 0;
440 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
444 if ((f = (struct route4_filter*)*arg) != NULL) {
445 if (f->handle != handle && handle)
449 old_handle = f->handle;
451 err = route4_set_parms(tp, base, f, handle, head, tb,
461 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
470 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
474 err = route4_set_parms(tp, base, f, handle, head, tb,
480 h = from_hash(f->handle >> 16);
481 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
482 if (f->handle < f1->handle)
489 if (old_handle && f->handle != old_handle) {
490 th = to_hash(old_handle);
491 h = from_hash(old_handle >> 16);
492 if ((b = head->table[th]) != NULL) {
493 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
503 route4_reset_fastmap(tp->q->dev, head, f->id);
504 *arg = (unsigned long)f;
512 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
514 struct route4_head *head = tp->root;
523 for (h = 0; h <= 256; h++) {
524 struct route4_bucket *b = head->table[h];
527 for (h1 = 0; h1 <= 32; h1++) {
528 struct route4_filter *f;
530 for (f = b->ht[h1]; f; f = f->next) {
531 if (arg->count < arg->skip) {
535 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
546 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
547 struct sk_buff *skb, struct tcmsg *t)
549 struct route4_filter *f = (struct route4_filter*)fh;
550 unsigned char *b = skb_tail_pointer(skb);
557 t->tcm_handle = f->handle;
559 nest = nla_nest_start(skb, TCA_OPTIONS);
561 goto nla_put_failure;
563 if (!(f->handle&0x8000)) {
565 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
567 if (f->handle&0x80000000) {
568 if ((f->handle>>16) != 0xFFFF)
569 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
572 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
575 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
577 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
578 goto nla_put_failure;
580 nla_nest_end(skb, nest);
582 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
583 goto nla_put_failure;
592 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
594 .classify = route4_classify,
596 .destroy = route4_destroy,
599 .change = route4_change,
600 .delete = route4_delete,
603 .owner = THIS_MODULE,
606 static int __init init_route4(void)
608 return register_tcf_proto_ops(&cls_route4_ops);
611 static void __exit exit_route4(void)
613 unregister_tcf_proto_ops(&cls_route4_ops);
616 module_init(init_route4)
617 module_exit(exit_route4)
618 MODULE_LICENSE("GPL");