ppc: don't include <linux/ide.h>
[linux-2.6] / net / sched / cls_route.c
1 /*
2  * net/sched/cls_route.c        ROUTE4 classifier.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/dst.h>
19 #include <net/route.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
23
24 /*
25    1. For now we assume that route tags < 256.
26       It allows to use direct table lookups, instead of hash tables.
27    2. For now we assume that "from TAG" and "fromdev DEV" statements
28       are mutually  exclusive.
29    3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
30  */
31
32 struct route4_fastmap
33 {
34         struct route4_filter    *filter;
35         u32                     id;
36         int                     iif;
37 };
38
39 struct route4_head
40 {
41         struct route4_fastmap   fastmap[16];
42         struct route4_bucket    *table[256+1];
43 };
44
45 struct route4_bucket
46 {
47         /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
48         struct route4_filter    *ht[16+16+1];
49 };
50
51 struct route4_filter
52 {
53         struct route4_filter    *next;
54         u32                     id;
55         int                     iif;
56
57         struct tcf_result       res;
58         struct tcf_exts         exts;
59         u32                     handle;
60         struct route4_bucket    *bkt;
61 };
62
63 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
64
65 static const struct tcf_ext_map route_ext_map = {
66         .police = TCA_ROUTE4_POLICE,
67         .action = TCA_ROUTE4_ACT
68 };
69
70 static __inline__ int route4_fastmap_hash(u32 id, int iif)
71 {
72         return id&0xF;
73 }
74
75 static inline
76 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
77 {
78         qdisc_lock_tree(dev);
79         memset(head->fastmap, 0, sizeof(head->fastmap));
80         qdisc_unlock_tree(dev);
81 }
82
83 static inline void
84 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
85                    struct route4_filter *f)
86 {
87         int h = route4_fastmap_hash(id, iif);
88         head->fastmap[h].id = id;
89         head->fastmap[h].iif = iif;
90         head->fastmap[h].filter = f;
91 }
92
93 static __inline__ int route4_hash_to(u32 id)
94 {
95         return id&0xFF;
96 }
97
98 static __inline__ int route4_hash_from(u32 id)
99 {
100         return (id>>16)&0xF;
101 }
102
103 static __inline__ int route4_hash_iif(int iif)
104 {
105         return 16 + ((iif>>16)&0xF);
106 }
107
108 static __inline__ int route4_hash_wild(void)
109 {
110         return 32;
111 }
112
113 #define ROUTE4_APPLY_RESULT()                                   \
114 {                                                               \
115         *res = f->res;                                          \
116         if (tcf_exts_is_available(&f->exts)) {                  \
117                 int r = tcf_exts_exec(skb, &f->exts, res);      \
118                 if (r < 0) {                                    \
119                         dont_cache = 1;                         \
120                         continue;                               \
121                 }                                               \
122                 return r;                                       \
123         } else if (!dont_cache)                                 \
124                 route4_set_fastmap(head, id, iif, f);           \
125         return 0;                                               \
126 }
127
128 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
129                            struct tcf_result *res)
130 {
131         struct route4_head *head = (struct route4_head*)tp->root;
132         struct dst_entry *dst;
133         struct route4_bucket *b;
134         struct route4_filter *f;
135         u32 id, h;
136         int iif, dont_cache = 0;
137
138         if ((dst = skb->dst) == NULL)
139                 goto failure;
140
141         id = dst->tclassid;
142         if (head == NULL)
143                 goto old_method;
144
145         iif = ((struct rtable*)dst)->fl.iif;
146
147         h = route4_fastmap_hash(id, iif);
148         if (id == head->fastmap[h].id &&
149             iif == head->fastmap[h].iif &&
150             (f = head->fastmap[h].filter) != NULL) {
151                 if (f == ROUTE4_FAILURE)
152                         goto failure;
153
154                 *res = f->res;
155                 return 0;
156         }
157
158         h = route4_hash_to(id);
159
160 restart:
161         if ((b = head->table[h]) != NULL) {
162                 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
163                         if (f->id == id)
164                                 ROUTE4_APPLY_RESULT();
165
166                 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
167                         if (f->iif == iif)
168                                 ROUTE4_APPLY_RESULT();
169
170                 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
171                         ROUTE4_APPLY_RESULT();
172
173         }
174         if (h < 256) {
175                 h = 256;
176                 id &= ~0xFFFF;
177                 goto restart;
178         }
179
180         if (!dont_cache)
181                 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
182 failure:
183         return -1;
184
185 old_method:
186         if (id && (TC_H_MAJ(id) == 0 ||
187                    !(TC_H_MAJ(id^tp->q->handle)))) {
188                 res->classid = id;
189                 res->class = 0;
190                 return 0;
191         }
192         return -1;
193 }
194
195 static inline u32 to_hash(u32 id)
196 {
197         u32 h = id&0xFF;
198         if (id&0x8000)
199                 h += 256;
200         return h;
201 }
202
203 static inline u32 from_hash(u32 id)
204 {
205         id &= 0xFFFF;
206         if (id == 0xFFFF)
207                 return 32;
208         if (!(id & 0x8000)) {
209                 if (id > 255)
210                         return 256;
211                 return id&0xF;
212         }
213         return 16 + (id&0xF);
214 }
215
216 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
217 {
218         struct route4_head *head = (struct route4_head*)tp->root;
219         struct route4_bucket *b;
220         struct route4_filter *f;
221         unsigned h1, h2;
222
223         if (!head)
224                 return 0;
225
226         h1 = to_hash(handle);
227         if (h1 > 256)
228                 return 0;
229
230         h2 = from_hash(handle>>16);
231         if (h2 > 32)
232                 return 0;
233
234         if ((b = head->table[h1]) != NULL) {
235                 for (f = b->ht[h2]; f; f = f->next)
236                         if (f->handle == handle)
237                                 return (unsigned long)f;
238         }
239         return 0;
240 }
241
242 static void route4_put(struct tcf_proto *tp, unsigned long f)
243 {
244 }
245
246 static int route4_init(struct tcf_proto *tp)
247 {
248         return 0;
249 }
250
251 static inline void
252 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
253 {
254         tcf_unbind_filter(tp, &f->res);
255         tcf_exts_destroy(tp, &f->exts);
256         kfree(f);
257 }
258
259 static void route4_destroy(struct tcf_proto *tp)
260 {
261         struct route4_head *head = xchg(&tp->root, NULL);
262         int h1, h2;
263
264         if (head == NULL)
265                 return;
266
267         for (h1=0; h1<=256; h1++) {
268                 struct route4_bucket *b;
269
270                 if ((b = head->table[h1]) != NULL) {
271                         for (h2=0; h2<=32; h2++) {
272                                 struct route4_filter *f;
273
274                                 while ((f = b->ht[h2]) != NULL) {
275                                         b->ht[h2] = f->next;
276                                         route4_delete_filter(tp, f);
277                                 }
278                         }
279                         kfree(b);
280                 }
281         }
282         kfree(head);
283 }
284
285 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
286 {
287         struct route4_head *head = (struct route4_head*)tp->root;
288         struct route4_filter **fp, *f = (struct route4_filter*)arg;
289         unsigned h = 0;
290         struct route4_bucket *b;
291         int i;
292
293         if (!head || !f)
294                 return -EINVAL;
295
296         h = f->handle;
297         b = f->bkt;
298
299         for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
300                 if (*fp == f) {
301                         tcf_tree_lock(tp);
302                         *fp = f->next;
303                         tcf_tree_unlock(tp);
304
305                         route4_reset_fastmap(tp->q->dev, head, f->id);
306                         route4_delete_filter(tp, f);
307
308                         /* Strip tree */
309
310                         for (i=0; i<=32; i++)
311                                 if (b->ht[i])
312                                         return 0;
313
314                         /* OK, session has no flows */
315                         tcf_tree_lock(tp);
316                         head->table[to_hash(h)] = NULL;
317                         tcf_tree_unlock(tp);
318
319                         kfree(b);
320                         return 0;
321                 }
322         }
323         return 0;
324 }
325
326 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
327         [TCA_ROUTE4_CLASSID]    = { .type = NLA_U32 },
328         [TCA_ROUTE4_TO]         = { .type = NLA_U32 },
329         [TCA_ROUTE4_FROM]       = { .type = NLA_U32 },
330         [TCA_ROUTE4_IIF]        = { .type = NLA_U32 },
331 };
332
333 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
334         struct route4_filter *f, u32 handle, struct route4_head *head,
335         struct nlattr **tb, struct nlattr *est, int new)
336 {
337         int err;
338         u32 id = 0, to = 0, nhandle = 0x8000;
339         struct route4_filter *fp;
340         unsigned int h1;
341         struct route4_bucket *b;
342         struct tcf_exts e;
343
344         err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
345         if (err < 0)
346                 return err;
347
348         err = -EINVAL;
349         if (tb[TCA_ROUTE4_TO]) {
350                 if (new && handle & 0x8000)
351                         goto errout;
352                 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
353                 if (to > 0xFF)
354                         goto errout;
355                 nhandle = to;
356         }
357
358         if (tb[TCA_ROUTE4_FROM]) {
359                 if (tb[TCA_ROUTE4_IIF])
360                         goto errout;
361                 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
362                 if (id > 0xFF)
363                         goto errout;
364                 nhandle |= id << 16;
365         } else if (tb[TCA_ROUTE4_IIF]) {
366                 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
367                 if (id > 0x7FFF)
368                         goto errout;
369                 nhandle |= (id | 0x8000) << 16;
370         } else
371                 nhandle |= 0xFFFF << 16;
372
373         if (handle && new) {
374                 nhandle |= handle & 0x7F00;
375                 if (nhandle != handle)
376                         goto errout;
377         }
378
379         h1 = to_hash(nhandle);
380         if ((b = head->table[h1]) == NULL) {
381                 err = -ENOBUFS;
382                 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
383                 if (b == NULL)
384                         goto errout;
385
386                 tcf_tree_lock(tp);
387                 head->table[h1] = b;
388                 tcf_tree_unlock(tp);
389         } else {
390                 unsigned int h2 = from_hash(nhandle >> 16);
391                 err = -EEXIST;
392                 for (fp = b->ht[h2]; fp; fp = fp->next)
393                         if (fp->handle == f->handle)
394                                 goto errout;
395         }
396
397         tcf_tree_lock(tp);
398         if (tb[TCA_ROUTE4_TO])
399                 f->id = to;
400
401         if (tb[TCA_ROUTE4_FROM])
402                 f->id = to | id<<16;
403         else if (tb[TCA_ROUTE4_IIF])
404                 f->iif = id;
405
406         f->handle = nhandle;
407         f->bkt = b;
408         tcf_tree_unlock(tp);
409
410         if (tb[TCA_ROUTE4_CLASSID]) {
411                 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
412                 tcf_bind_filter(tp, &f->res, base);
413         }
414
415         tcf_exts_change(tp, &f->exts, &e);
416
417         return 0;
418 errout:
419         tcf_exts_destroy(tp, &e);
420         return err;
421 }
422
423 static int route4_change(struct tcf_proto *tp, unsigned long base,
424                        u32 handle,
425                        struct nlattr **tca,
426                        unsigned long *arg)
427 {
428         struct route4_head *head = tp->root;
429         struct route4_filter *f, *f1, **fp;
430         struct route4_bucket *b;
431         struct nlattr *opt = tca[TCA_OPTIONS];
432         struct nlattr *tb[TCA_ROUTE4_MAX + 1];
433         unsigned int h, th;
434         u32 old_handle = 0;
435         int err;
436
437         if (opt == NULL)
438                 return handle ? -EINVAL : 0;
439
440         err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
441         if (err < 0)
442                 return err;
443
444         if ((f = (struct route4_filter*)*arg) != NULL) {
445                 if (f->handle != handle && handle)
446                         return -EINVAL;
447
448                 if (f->bkt)
449                         old_handle = f->handle;
450
451                 err = route4_set_parms(tp, base, f, handle, head, tb,
452                         tca[TCA_RATE], 0);
453                 if (err < 0)
454                         return err;
455
456                 goto reinsert;
457         }
458
459         err = -ENOBUFS;
460         if (head == NULL) {
461                 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
462                 if (head == NULL)
463                         goto errout;
464
465                 tcf_tree_lock(tp);
466                 tp->root = head;
467                 tcf_tree_unlock(tp);
468         }
469
470         f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
471         if (f == NULL)
472                 goto errout;
473
474         err = route4_set_parms(tp, base, f, handle, head, tb,
475                 tca[TCA_RATE], 1);
476         if (err < 0)
477                 goto errout;
478
479 reinsert:
480         h = from_hash(f->handle >> 16);
481         for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
482                 if (f->handle < f1->handle)
483                         break;
484
485         f->next = f1;
486         tcf_tree_lock(tp);
487         *fp = f;
488
489         if (old_handle && f->handle != old_handle) {
490                 th = to_hash(old_handle);
491                 h = from_hash(old_handle >> 16);
492                 if ((b = head->table[th]) != NULL) {
493                         for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
494                                 if (*fp == f) {
495                                         *fp = f->next;
496                                         break;
497                                 }
498                         }
499                 }
500         }
501         tcf_tree_unlock(tp);
502
503         route4_reset_fastmap(tp->q->dev, head, f->id);
504         *arg = (unsigned long)f;
505         return 0;
506
507 errout:
508         kfree(f);
509         return err;
510 }
511
512 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
513 {
514         struct route4_head *head = tp->root;
515         unsigned h, h1;
516
517         if (head == NULL)
518                 arg->stop = 1;
519
520         if (arg->stop)
521                 return;
522
523         for (h = 0; h <= 256; h++) {
524                 struct route4_bucket *b = head->table[h];
525
526                 if (b) {
527                         for (h1 = 0; h1 <= 32; h1++) {
528                                 struct route4_filter *f;
529
530                                 for (f = b->ht[h1]; f; f = f->next) {
531                                         if (arg->count < arg->skip) {
532                                                 arg->count++;
533                                                 continue;
534                                         }
535                                         if (arg->fn(tp, (unsigned long)f, arg) < 0) {
536                                                 arg->stop = 1;
537                                                 return;
538                                         }
539                                         arg->count++;
540                                 }
541                         }
542                 }
543         }
544 }
545
546 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
547                        struct sk_buff *skb, struct tcmsg *t)
548 {
549         struct route4_filter *f = (struct route4_filter*)fh;
550         unsigned char *b = skb_tail_pointer(skb);
551         struct nlattr *nest;
552         u32 id;
553
554         if (f == NULL)
555                 return skb->len;
556
557         t->tcm_handle = f->handle;
558
559         nest = nla_nest_start(skb, TCA_OPTIONS);
560         if (nest == NULL)
561                 goto nla_put_failure;
562
563         if (!(f->handle&0x8000)) {
564                 id = f->id&0xFF;
565                 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
566         }
567         if (f->handle&0x80000000) {
568                 if ((f->handle>>16) != 0xFFFF)
569                         NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
570         } else {
571                 id = f->id>>16;
572                 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
573         }
574         if (f->res.classid)
575                 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
576
577         if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
578                 goto nla_put_failure;
579
580         nla_nest_end(skb, nest);
581
582         if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
583                 goto nla_put_failure;
584
585         return skb->len;
586
587 nla_put_failure:
588         nlmsg_trim(skb, b);
589         return -1;
590 }
591
592 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
593         .kind           =       "route",
594         .classify       =       route4_classify,
595         .init           =       route4_init,
596         .destroy        =       route4_destroy,
597         .get            =       route4_get,
598         .put            =       route4_put,
599         .change         =       route4_change,
600         .delete         =       route4_delete,
601         .walk           =       route4_walk,
602         .dump           =       route4_dump,
603         .owner          =       THIS_MODULE,
604 };
605
606 static int __init init_route4(void)
607 {
608         return register_tcf_proto_ops(&cls_route4_ops);
609 }
610
611 static void __exit exit_route4(void)
612 {
613         unregister_tcf_proto_ops(&cls_route4_ops);
614 }
615
616 module_init(init_route4)
617 module_exit(exit_route4)
618 MODULE_LICENSE("GPL");