2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/init.h>
32 #include <net/act_api.h>
33 #include <net/netlink.h>
35 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
38 #define POL_TAB_MASK 15
39 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
40 static u32 police_idx_gen;
41 static DEFINE_RWLOCK(police_lock);
43 static struct tcf_hashinfo police_hash_info = {
44 .htab = tcf_police_ht,
45 .hmask = POL_TAB_MASK,
49 /* old policer structure from before tc actions */
50 struct tc_police_compat
57 struct tc_ratespec rate;
58 struct tc_ratespec peakrate;
61 /* Each policer is serialized by its individual spinlock */
63 #ifdef CONFIG_NET_CLS_ACT
64 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
65 int type, struct tc_action *a)
68 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
71 read_lock(&police_lock);
75 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
76 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
78 for (; p; p = p->tcfc_next) {
84 r = (struct rtattr *)skb_tail_pointer(skb);
85 RTA_PUT(skb, a->order, 0, NULL);
86 if (type == RTM_DELACTION)
87 err = tcf_action_dump_1(skb, a, 0, 1);
89 err = tcf_action_dump_1(skb, a, 0, 0);
95 r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
100 read_unlock(&police_lock);
111 void tcf_police_destroy(struct tcf_police *p)
113 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
114 struct tcf_common **p1p;
116 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
117 if (*p1p == &p->common) {
118 write_lock_bh(&police_lock);
120 write_unlock_bh(&police_lock);
121 #ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p->tcf_bstats,
126 qdisc_put_rtab(p->tcfp_R_tab);
128 qdisc_put_rtab(p->tcfp_P_tab);
136 #ifdef CONFIG_NET_CLS_ACT
137 static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
138 struct tc_action *a, int ovr, int bind)
142 struct rtattr *tb[TCA_POLICE_MAX];
143 struct tc_police *parm;
144 struct tcf_police *police;
145 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
148 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
151 if (tb[TCA_POLICE_TBF-1] == NULL)
153 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
154 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
156 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
158 if (tb[TCA_POLICE_RESULT-1] != NULL &&
159 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
161 if (tb[TCA_POLICE_RESULT-1] != NULL &&
162 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
166 struct tcf_common *pc;
168 pc = tcf_hash_lookup(parm->index, &police_hash_info);
171 police = to_police(pc);
173 police->tcf_bindcnt += 1;
174 police->tcf_refcnt += 1;
182 police = kzalloc(sizeof(*police), GFP_KERNEL);
186 police->tcf_refcnt = 1;
187 spin_lock_init(&police->tcf_lock);
188 police->tcf_stats_lock = &police->tcf_lock;
190 police->tcf_bindcnt = 1;
192 if (parm->rate.rate) {
194 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
197 if (parm->peakrate.rate) {
198 P_tab = qdisc_get_rtab(&parm->peakrate,
199 tb[TCA_POLICE_PEAKRATE-1]);
201 qdisc_put_rtab(R_tab);
206 /* No failure allowed after this point */
207 spin_lock_bh(&police->tcf_lock);
209 qdisc_put_rtab(police->tcfp_R_tab);
210 police->tcfp_R_tab = R_tab;
213 qdisc_put_rtab(police->tcfp_P_tab);
214 police->tcfp_P_tab = P_tab;
217 if (tb[TCA_POLICE_RESULT-1])
218 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
219 police->tcfp_toks = police->tcfp_burst = parm->burst;
220 police->tcfp_mtu = parm->mtu;
221 if (police->tcfp_mtu == 0) {
222 police->tcfp_mtu = ~0;
223 if (police->tcfp_R_tab)
224 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
226 if (police->tcfp_P_tab)
227 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
228 police->tcf_action = parm->action;
230 #ifdef CONFIG_NET_ESTIMATOR
231 if (tb[TCA_POLICE_AVRATE-1])
232 police->tcfp_ewma_rate =
233 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
235 gen_replace_estimator(&police->tcf_bstats,
236 &police->tcf_rate_est,
237 police->tcf_stats_lock, est);
240 spin_unlock_bh(&police->tcf_lock);
241 if (ret != ACT_P_CREATED)
244 police->tcfp_t_c = psched_get_time();
245 police->tcf_index = parm->index ? parm->index :
246 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
247 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
248 write_lock_bh(&police_lock);
249 police->tcf_next = tcf_police_ht[h];
250 tcf_police_ht[h] = &police->common;
251 write_unlock_bh(&police_lock);
257 if (ret == ACT_P_CREATED)
262 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
264 struct tcf_police *p = a->priv;
267 return tcf_police_release(p, bind);
271 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
272 struct tcf_result *res)
274 struct tcf_police *police = a->priv;
279 spin_lock(&police->tcf_lock);
281 police->tcf_bstats.bytes += skb->len;
282 police->tcf_bstats.packets++;
284 #ifdef CONFIG_NET_ESTIMATOR
285 if (police->tcfp_ewma_rate &&
286 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
287 police->tcf_qstats.overlimits++;
288 spin_unlock(&police->tcf_lock);
289 return police->tcf_action;
293 if (skb->len <= police->tcfp_mtu) {
294 if (police->tcfp_R_tab == NULL) {
295 spin_unlock(&police->tcf_lock);
296 return police->tcfp_result;
299 now = psched_get_time();
300 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
302 if (police->tcfp_P_tab) {
303 ptoks = toks + police->tcfp_ptoks;
304 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
305 ptoks = (long)L2T_P(police, police->tcfp_mtu);
306 ptoks -= L2T_P(police, skb->len);
308 toks += police->tcfp_toks;
309 if (toks > (long)police->tcfp_burst)
310 toks = police->tcfp_burst;
311 toks -= L2T(police, skb->len);
312 if ((toks|ptoks) >= 0) {
313 police->tcfp_t_c = now;
314 police->tcfp_toks = toks;
315 police->tcfp_ptoks = ptoks;
316 spin_unlock(&police->tcf_lock);
317 return police->tcfp_result;
321 police->tcf_qstats.overlimits++;
322 spin_unlock(&police->tcf_lock);
323 return police->tcf_action;
327 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
329 unsigned char *b = skb_tail_pointer(skb);
330 struct tcf_police *police = a->priv;
331 struct tc_police opt;
333 opt.index = police->tcf_index;
334 opt.action = police->tcf_action;
335 opt.mtu = police->tcfp_mtu;
336 opt.burst = police->tcfp_burst;
337 opt.refcnt = police->tcf_refcnt - ref;
338 opt.bindcnt = police->tcf_bindcnt - bind;
339 if (police->tcfp_R_tab)
340 opt.rate = police->tcfp_R_tab->rate;
342 memset(&opt.rate, 0, sizeof(opt.rate));
343 if (police->tcfp_P_tab)
344 opt.peakrate = police->tcfp_P_tab->rate;
346 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
347 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
348 if (police->tcfp_result)
349 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
350 &police->tcfp_result);
351 #ifdef CONFIG_NET_ESTIMATOR
352 if (police->tcfp_ewma_rate)
353 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
362 MODULE_AUTHOR("Alexey Kuznetsov");
363 MODULE_DESCRIPTION("Policing actions");
364 MODULE_LICENSE("GPL");
366 static struct tc_action_ops act_police_ops = {
368 .hinfo = &police_hash_info,
369 .type = TCA_ID_POLICE,
370 .capab = TCA_CAP_NONE,
371 .owner = THIS_MODULE,
372 .act = tcf_act_police,
373 .dump = tcf_act_police_dump,
374 .cleanup = tcf_act_police_cleanup,
375 .lookup = tcf_hash_search,
376 .init = tcf_act_police_locate,
377 .walk = tcf_act_police_walker
381 police_init_module(void)
383 return tcf_register_action(&act_police_ops);
387 police_cleanup_module(void)
389 tcf_unregister_action(&act_police_ops);
392 module_init(police_init_module);
393 module_exit(police_cleanup_module);
395 #else /* CONFIG_NET_CLS_ACT */
397 static struct tcf_common *tcf_police_lookup(u32 index)
399 struct tcf_hashinfo *hinfo = &police_hash_info;
400 struct tcf_common *p;
402 read_lock(hinfo->lock);
403 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
405 if (p->tcfc_index == index)
408 read_unlock(hinfo->lock);
413 static u32 tcf_police_new_index(void)
415 u32 *idx_gen = &police_idx_gen;
421 } while (tcf_police_lookup(val));
423 return (*idx_gen = val);
426 struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
429 struct tcf_police *police;
430 struct rtattr *tb[TCA_POLICE_MAX];
431 struct tc_police *parm;
434 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
437 if (tb[TCA_POLICE_TBF-1] == NULL)
439 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
440 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
443 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
446 struct tcf_common *pc;
448 pc = tcf_police_lookup(parm->index);
450 police = to_police(pc);
451 police->tcf_refcnt++;
455 police = kzalloc(sizeof(*police), GFP_KERNEL);
456 if (unlikely(!police))
459 police->tcf_refcnt = 1;
460 spin_lock_init(&police->tcf_lock);
461 police->tcf_stats_lock = &police->tcf_lock;
462 if (parm->rate.rate) {
464 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
465 if (police->tcfp_R_tab == NULL)
467 if (parm->peakrate.rate) {
469 qdisc_get_rtab(&parm->peakrate,
470 tb[TCA_POLICE_PEAKRATE-1]);
471 if (police->tcfp_P_tab == NULL)
475 if (tb[TCA_POLICE_RESULT-1]) {
476 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
478 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
480 #ifdef CONFIG_NET_ESTIMATOR
481 if (tb[TCA_POLICE_AVRATE-1]) {
482 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
484 police->tcfp_ewma_rate =
485 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
488 police->tcfp_toks = police->tcfp_burst = parm->burst;
489 police->tcfp_mtu = parm->mtu;
490 if (police->tcfp_mtu == 0) {
491 police->tcfp_mtu = ~0;
492 if (police->tcfp_R_tab)
493 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
495 if (police->tcfp_P_tab)
496 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
497 police->tcfp_t_c = psched_get_time();
498 police->tcf_index = parm->index ? parm->index :
499 tcf_police_new_index();
500 police->tcf_action = parm->action;
501 #ifdef CONFIG_NET_ESTIMATOR
503 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
504 police->tcf_stats_lock, est);
506 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
507 write_lock_bh(&police_lock);
508 police->tcf_next = tcf_police_ht[h];
509 tcf_police_ht[h] = &police->common;
510 write_unlock_bh(&police_lock);
514 if (police->tcfp_R_tab)
515 qdisc_put_rtab(police->tcfp_R_tab);
520 int tcf_police(struct sk_buff *skb, struct tcf_police *police)
526 spin_lock(&police->tcf_lock);
528 police->tcf_bstats.bytes += skb->len;
529 police->tcf_bstats.packets++;
531 #ifdef CONFIG_NET_ESTIMATOR
532 if (police->tcfp_ewma_rate &&
533 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
534 police->tcf_qstats.overlimits++;
535 spin_unlock(&police->tcf_lock);
536 return police->tcf_action;
539 if (skb->len <= police->tcfp_mtu) {
540 if (police->tcfp_R_tab == NULL) {
541 spin_unlock(&police->tcf_lock);
542 return police->tcfp_result;
545 now = psched_get_time();
546 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
548 if (police->tcfp_P_tab) {
549 ptoks = toks + police->tcfp_ptoks;
550 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
551 ptoks = (long)L2T_P(police, police->tcfp_mtu);
552 ptoks -= L2T_P(police, skb->len);
554 toks += police->tcfp_toks;
555 if (toks > (long)police->tcfp_burst)
556 toks = police->tcfp_burst;
557 toks -= L2T(police, skb->len);
558 if ((toks|ptoks) >= 0) {
559 police->tcfp_t_c = now;
560 police->tcfp_toks = toks;
561 police->tcfp_ptoks = ptoks;
562 spin_unlock(&police->tcf_lock);
563 return police->tcfp_result;
567 police->tcf_qstats.overlimits++;
568 spin_unlock(&police->tcf_lock);
569 return police->tcf_action;
571 EXPORT_SYMBOL(tcf_police);
573 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
575 unsigned char *b = skb_tail_pointer(skb);
576 struct tc_police opt;
578 opt.index = police->tcf_index;
579 opt.action = police->tcf_action;
580 opt.mtu = police->tcfp_mtu;
581 opt.burst = police->tcfp_burst;
582 if (police->tcfp_R_tab)
583 opt.rate = police->tcfp_R_tab->rate;
585 memset(&opt.rate, 0, sizeof(opt.rate));
586 if (police->tcfp_P_tab)
587 opt.peakrate = police->tcfp_P_tab->rate;
589 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
590 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
591 if (police->tcfp_result)
592 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
593 &police->tcfp_result);
594 #ifdef CONFIG_NET_ESTIMATOR
595 if (police->tcfp_ewma_rate)
596 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
605 int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
609 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
610 TCA_XSTATS, police->tcf_stats_lock,
614 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
615 #ifdef CONFIG_NET_ESTIMATOR
616 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
618 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
621 if (gnet_stats_finish_copy(&d) < 0)
630 #endif /* CONFIG_NET_CLS_ACT */