2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
23 * spotted bug in dequeue code and helped with fix
25 * fixed requeue routine
26 * and many others. thanks.
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/string.h>
34 #include <linux/errno.h>
35 #include <linux/skbuff.h>
36 #include <linux/list.h>
37 #include <linux/compiler.h>
38 #include <linux/rbtree.h>
39 #include <net/netlink.h>
40 #include <net/pkt_sched.h>
44 ========================================================================
45 HTB is like TBF with multiple classes. It is also similar to CBQ because
46 it allows to assign priority to each class in hierarchy.
47 In fact it is another implementation of Floyd's formal sharing.
50 Each class is assigned level. Leaf has ALWAYS level 0 and root
51 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
52 one less than their parent.
55 #define HTB_HSIZE 16 /* classid hash size */
56 #define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
57 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
59 #if HTB_VER >> 16 != TC_HTB_PROTOVER
60 #error "Mismatched sch_htb.c and pkt_sch.h"
63 /* used internaly to keep status of single class */
65 HTB_CANT_SEND, /* class can't send and can't borrow */
66 HTB_MAY_BORROW, /* class can't send but may borrow */
67 HTB_CAN_SEND /* class can send */
70 /* interior & leaf nodes; props specific to leaves are marked L: */
72 /* general class parameters */
74 struct gnet_stats_basic bstats;
75 struct gnet_stats_queue qstats;
76 struct gnet_stats_rate_est rate_est;
77 struct tc_htb_xstats xstats; /* our special stats */
78 int refcnt; /* usage count of this class */
81 int level; /* our level (see above) */
82 struct htb_class *parent; /* parent class */
83 struct hlist_node hlist; /* classid hash list item */
84 struct list_head sibling; /* sibling list item */
85 struct list_head children; /* children list */
88 struct htb_class_leaf {
93 int deficit[TC_HTB_MAXDEPTH];
94 struct list_head drop_list;
96 struct htb_class_inner {
97 struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
98 struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
99 /* When class changes from state 1->2 and disconnects from
100 parent's feed then we lost ptr value and start from the
101 first child again. Here we store classid of the
102 last valid ptr (used when ptr is NULL). */
103 u32 last_ptr_id[TC_HTB_NUMPRIO];
106 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
107 struct rb_node pq_node; /* node for event queue */
108 psched_time_t pq_key;
110 int prio_activity; /* for which prios are we active */
111 enum htb_cmode cmode; /* current mode of the class */
113 /* class attached filters */
114 struct tcf_proto *filter_list;
117 int warned; /* only one warning about non work conserving .. */
119 /* token bucket parameters */
120 struct qdisc_rate_table *rate; /* rate table of the class itself */
121 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
122 long buffer, cbuffer; /* token bucket depth/rate */
123 psched_tdiff_t mbuffer; /* max wait time */
124 long tokens, ctokens; /* current number of tokens */
125 psched_time_t t_c; /* checkpoint time */
127 int prio; /* For parent to leaf return possible here */
128 int quantum; /* we do backup. Finally full replacement */
129 /* of un.leaf originals should be done. */
132 static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
135 int slot = size >> rate->rate.cell_log;
137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
138 return rate->data[slot];
142 struct list_head root; /* root classes list */
143 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
144 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
146 /* self list - roots of self generating tree */
147 struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
148 int row_mask[TC_HTB_MAXDEPTH];
149 struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
150 u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
152 /* self wait list - roots of wait PQs per row */
153 struct rb_root wait_pq[TC_HTB_MAXDEPTH];
155 /* time of nearest event per level (row) */
156 psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
158 /* whether we hit non-work conserving class during this dequeue; we use */
159 int nwc_hit; /* this to disable mindelay complaint in dequeue */
161 int defcls; /* class where unclassified flows go to */
163 /* filters for qdisc itself */
164 struct tcf_proto *filter_list;
167 int rate2quantum; /* quant = rate / rate2quantum */
168 psched_time_t now; /* cached dequeue time */
169 struct qdisc_watchdog watchdog;
171 /* non shaped skbs; let them go directly thru */
172 struct sk_buff_head direct_queue;
173 int direct_qlen; /* max qlen of above */
178 /* compute hash of size HTB_HSIZE for given handle */
179 static inline int htb_hash(u32 h)
182 #error "Declare new hash for your HTB_HSIZE"
184 h ^= h >> 8; /* stolen from cbq_hash */
189 /* find class in global hash table using given handle */
190 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
192 struct htb_sched *q = qdisc_priv(sch);
193 struct hlist_node *p;
194 struct htb_class *cl;
196 if (TC_H_MAJ(handle) != sch->handle)
199 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
200 if (cl->classid == handle)
207 * htb_classify - classify a packet into class
209 * It returns NULL if the packet should be dropped or -1 if the packet
210 * should be passed directly thru. In all other cases leaf class is returned.
211 * We allow direct class selection by classid in priority. The we examine
212 * filters in qdisc and in inner nodes (if higher filter points to the inner
213 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
214 * internal fifo (direct). These packets then go directly thru. If we still
215 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
216 * then finish and return direct queue.
218 #define HTB_DIRECT (struct htb_class*)-1
219 static inline u32 htb_classid(struct htb_class *cl)
221 return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
224 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
227 struct htb_sched *q = qdisc_priv(sch);
228 struct htb_class *cl;
229 struct tcf_result res;
230 struct tcf_proto *tcf;
233 /* allow to select class by setting skb->priority to valid classid;
234 note that nfmark can be used too by attaching filter fw with no
236 if (skb->priority == sch->handle)
237 return HTB_DIRECT; /* X:0 (direct flow) selected */
238 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
241 *qerr = NET_XMIT_BYPASS;
242 tcf = q->filter_list;
243 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
244 #ifdef CONFIG_NET_CLS_ACT
248 *qerr = NET_XMIT_SUCCESS;
253 if ((cl = (void *)res.class) == NULL) {
254 if (res.classid == sch->handle)
255 return HTB_DIRECT; /* X:0 (direct flow) */
256 if ((cl = htb_find(res.classid, sch)) == NULL)
257 break; /* filter selected invalid classid */
260 return cl; /* we hit leaf; return it */
262 /* we have got inner class; apply inner filter chain */
263 tcf = cl->filter_list;
265 /* classification failed; try to use default class */
266 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
267 if (!cl || cl->level)
268 return HTB_DIRECT; /* bad default .. this is safe bet */
273 * htb_add_to_id_tree - adds class to the round robin list
275 * Routine adds class to the list (actually tree) sorted by classid.
276 * Make sure that class is not already on such list for given prio.
278 static void htb_add_to_id_tree(struct rb_root *root,
279 struct htb_class *cl, int prio)
281 struct rb_node **p = &root->rb_node, *parent = NULL;
286 c = rb_entry(parent, struct htb_class, node[prio]);
288 if (cl->classid > c->classid)
289 p = &parent->rb_right;
291 p = &parent->rb_left;
293 rb_link_node(&cl->node[prio], parent, p);
294 rb_insert_color(&cl->node[prio], root);
298 * htb_add_to_wait_tree - adds class to the event queue with delay
300 * The class is added to priority event queue to indicate that class will
301 * change its mode in cl->pq_key microseconds. Make sure that class is not
302 * already in the queue.
304 static void htb_add_to_wait_tree(struct htb_sched *q,
305 struct htb_class *cl, long delay)
307 struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
309 cl->pq_key = q->now + delay;
310 if (cl->pq_key == q->now)
313 /* update the nearest event cache */
314 if (q->near_ev_cache[cl->level] > cl->pq_key)
315 q->near_ev_cache[cl->level] = cl->pq_key;
320 c = rb_entry(parent, struct htb_class, pq_node);
321 if (cl->pq_key >= c->pq_key)
322 p = &parent->rb_right;
324 p = &parent->rb_left;
326 rb_link_node(&cl->pq_node, parent, p);
327 rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
331 * htb_next_rb_node - finds next node in binary tree
333 * When we are past last key we return NULL.
334 * Average complexity is 2 steps per call.
336 static inline void htb_next_rb_node(struct rb_node **n)
342 * htb_add_class_to_row - add class to its row
344 * The class is added to row at priorities marked in mask.
345 * It does nothing if mask == 0.
347 static inline void htb_add_class_to_row(struct htb_sched *q,
348 struct htb_class *cl, int mask)
350 q->row_mask[cl->level] |= mask;
352 int prio = ffz(~mask);
353 mask &= ~(1 << prio);
354 htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
358 /* If this triggers, it is a bug in this code, but it need not be fatal */
359 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
361 if (RB_EMPTY_NODE(rb)) {
371 * htb_remove_class_from_row - removes class from its row
373 * The class is removed from row at priorities marked in mask.
374 * It does nothing if mask == 0.
376 static inline void htb_remove_class_from_row(struct htb_sched *q,
377 struct htb_class *cl, int mask)
382 int prio = ffz(~mask);
384 mask &= ~(1 << prio);
385 if (q->ptr[cl->level][prio] == cl->node + prio)
386 htb_next_rb_node(q->ptr[cl->level] + prio);
388 htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
389 if (!q->row[cl->level][prio].rb_node)
392 q->row_mask[cl->level] &= ~m;
396 * htb_activate_prios - creates active classe's feed chain
398 * The class is connected to ancestors and/or appropriate rows
399 * for priorities it is participating on. cl->cmode must be new
400 * (activated) mode. It does nothing if cl->prio_activity == 0.
402 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
404 struct htb_class *p = cl->parent;
405 long m, mask = cl->prio_activity;
407 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
413 if (p->un.inner.feed[prio].rb_node)
414 /* parent already has its feed in use so that
415 reset bit in mask as parent is already ok */
416 mask &= ~(1 << prio);
418 htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
420 p->prio_activity |= mask;
425 if (cl->cmode == HTB_CAN_SEND && mask)
426 htb_add_class_to_row(q, cl, mask);
430 * htb_deactivate_prios - remove class from feed chain
432 * cl->cmode must represent old mode (before deactivation). It does
433 * nothing if cl->prio_activity == 0. Class is removed from all feed
436 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
438 struct htb_class *p = cl->parent;
439 long m, mask = cl->prio_activity;
441 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
448 if (p->un.inner.ptr[prio] == cl->node + prio) {
449 /* we are removing child which is pointed to from
450 parent feed - forget the pointer but remember
452 p->un.inner.last_ptr_id[prio] = cl->classid;
453 p->un.inner.ptr[prio] = NULL;
456 htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
458 if (!p->un.inner.feed[prio].rb_node)
462 p->prio_activity &= ~mask;
467 if (cl->cmode == HTB_CAN_SEND && mask)
468 htb_remove_class_from_row(q, cl, mask);
472 static inline long htb_lowater(const struct htb_class *cl)
474 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
476 static inline long htb_hiwater(const struct htb_class *cl)
478 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
481 #define htb_lowater(cl) (0)
482 #define htb_hiwater(cl) (0)
486 * htb_class_mode - computes and returns current class mode
488 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
489 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
490 * from now to time when cl will change its state.
491 * Also it is worth to note that class mode doesn't change simply
492 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
493 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
494 * mode transitions per time unit. The speed gain is about 1/6.
496 static inline enum htb_cmode
497 htb_class_mode(struct htb_class *cl, long *diff)
501 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
503 return HTB_CANT_SEND;
506 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
510 return HTB_MAY_BORROW;
514 * htb_change_class_mode - changes classe's mode
516 * This should be the only way how to change classe's mode under normal
517 * cirsumstances. Routine will update feed lists linkage, change mode
518 * and add class to the wait event queue if appropriate. New mode should
519 * be different from old one and cl->pq_key has to be valid if changing
520 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
523 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
525 enum htb_cmode new_mode = htb_class_mode(cl, diff);
527 if (new_mode == cl->cmode)
530 if (cl->prio_activity) { /* not necessary: speed optimization */
531 if (cl->cmode != HTB_CANT_SEND)
532 htb_deactivate_prios(q, cl);
533 cl->cmode = new_mode;
534 if (new_mode != HTB_CANT_SEND)
535 htb_activate_prios(q, cl);
537 cl->cmode = new_mode;
541 * htb_activate - inserts leaf cl into appropriate active feeds
543 * Routine learns (new) priority of leaf and activates feed chain
544 * for the prio. It can be called on already active leaf safely.
545 * It also adds leaf into droplist.
547 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
549 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
551 if (!cl->prio_activity) {
552 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
553 htb_activate_prios(q, cl);
554 list_add_tail(&cl->un.leaf.drop_list,
555 q->drops + cl->un.leaf.aprio);
560 * htb_deactivate - remove leaf cl from active feeds
562 * Make sure that leaf is active. In the other words it can't be called
563 * with non-active leaf. It also removes class from the drop list.
565 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
567 BUG_TRAP(cl->prio_activity);
569 htb_deactivate_prios(q, cl);
570 cl->prio_activity = 0;
571 list_del_init(&cl->un.leaf.drop_list);
574 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
577 struct htb_sched *q = qdisc_priv(sch);
578 struct htb_class *cl = htb_classify(skb, sch, &ret);
580 if (cl == HTB_DIRECT) {
581 /* enqueue to helper queue */
582 if (q->direct_queue.qlen < q->direct_qlen) {
583 __skb_queue_tail(&q->direct_queue, skb);
588 return NET_XMIT_DROP;
590 #ifdef CONFIG_NET_CLS_ACT
592 if (ret == NET_XMIT_BYPASS)
597 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
601 return NET_XMIT_DROP;
603 cl->bstats.packets +=
604 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
605 cl->bstats.bytes += skb->len;
610 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
611 sch->bstats.bytes += skb->len;
612 return NET_XMIT_SUCCESS;
615 /* TODO: requeuing packet charges it to policers again !! */
616 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
618 struct htb_sched *q = qdisc_priv(sch);
619 int ret = NET_XMIT_SUCCESS;
620 struct htb_class *cl = htb_classify(skb, sch, &ret);
621 struct sk_buff *tskb;
623 if (cl == HTB_DIRECT || !cl) {
624 /* enqueue to helper queue */
625 if (q->direct_queue.qlen < q->direct_qlen && cl) {
626 __skb_queue_head(&q->direct_queue, skb);
628 __skb_queue_head(&q->direct_queue, skb);
629 tskb = __skb_dequeue_tail(&q->direct_queue);
634 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
638 return NET_XMIT_DROP;
643 sch->qstats.requeues++;
644 return NET_XMIT_SUCCESS;
648 * htb_charge_class - charges amount "bytes" to leaf and ancestors
650 * Routine assumes that packet "bytes" long was dequeued from leaf cl
651 * borrowing from "level". It accounts bytes to ceil leaky bucket for
652 * leaf and all ancestors and to rate bucket for ancestors at levels
653 * "level" and higher. It also handles possible change of mode resulting
654 * from the update. Note that mode can also increase here (MAY_BORROW to
655 * CAN_SEND) because we can use more precise clock that event queue here.
656 * In such case we remove class from event queue first.
658 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
659 int level, struct sk_buff *skb)
661 int bytes = skb->len;
663 enum htb_cmode old_mode;
665 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
666 if (toks > cl->B) toks = cl->B; \
667 toks -= L2T(cl, cl->R, bytes); \
668 if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
672 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
673 if (cl->level >= level) {
674 if (cl->level == level)
676 HTB_ACCNT(tokens, buffer, rate);
678 cl->xstats.borrows++;
679 cl->tokens += diff; /* we moved t_c; update tokens */
681 HTB_ACCNT(ctokens, cbuffer, ceil);
684 old_mode = cl->cmode;
686 htb_change_class_mode(q, cl, &diff);
687 if (old_mode != cl->cmode) {
688 if (old_mode != HTB_CAN_SEND)
689 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
690 if (cl->cmode != HTB_CAN_SEND)
691 htb_add_to_wait_tree(q, cl, diff);
694 /* update byte stats except for leaves which are already updated */
696 cl->bstats.bytes += bytes;
697 cl->bstats.packets += skb_is_gso(skb)?
698 skb_shinfo(skb)->gso_segs:1;
705 * htb_do_events - make mode changes to classes at the level
707 * Scans event queue for pending events and applies them. Returns time of
708 * next pending event (0 for no event in pq).
709 * Note: Applied are events whose have cl->pq_key <= q->now.
711 static psched_time_t htb_do_events(struct htb_sched *q, int level)
715 for (i = 0; i < 500; i++) {
716 struct htb_class *cl;
718 struct rb_node *p = rb_first(&q->wait_pq[level]);
723 cl = rb_entry(p, struct htb_class, pq_node);
724 if (cl->pq_key > q->now)
727 htb_safe_rb_erase(p, q->wait_pq + level);
728 diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
729 htb_change_class_mode(q, cl, &diff);
730 if (cl->cmode != HTB_CAN_SEND)
731 htb_add_to_wait_tree(q, cl, diff);
734 printk(KERN_WARNING "htb: too many events !\n");
735 return q->now + PSCHED_TICKS_PER_SEC / 10;
738 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
739 is no such one exists. */
740 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
743 struct rb_node *r = NULL;
745 struct htb_class *cl =
746 rb_entry(n, struct htb_class, node[prio]);
747 if (id == cl->classid)
750 if (id > cl->classid) {
761 * htb_lookup_leaf - returns next leaf class in DRR order
763 * Find leaf where current feed pointers points to.
765 static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
766 struct rb_node **pptr, u32 * pid)
770 struct rb_node *root;
771 struct rb_node **pptr;
773 } stk[TC_HTB_MAXDEPTH], *sp = stk;
775 BUG_TRAP(tree->rb_node);
776 sp->root = tree->rb_node;
780 for (i = 0; i < 65535; i++) {
781 if (!*sp->pptr && *sp->pid) {
782 /* ptr was invalidated but id is valid - try to recover
783 the original or next ptr */
785 htb_id_find_next_upper(prio, sp->root, *sp->pid);
787 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
788 can become out of date quickly */
789 if (!*sp->pptr) { /* we are at right end; rewind & go up */
790 *sp->pptr = sp->root;
791 while ((*sp->pptr)->rb_left)
792 *sp->pptr = (*sp->pptr)->rb_left;
798 htb_next_rb_node(sp->pptr);
801 struct htb_class *cl;
802 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
805 (++sp)->root = cl->un.inner.feed[prio].rb_node;
806 sp->pptr = cl->un.inner.ptr + prio;
807 sp->pid = cl->un.inner.last_ptr_id + prio;
814 /* dequeues packet at given priority and level; call only if
815 you are sure that there is active class at prio/level */
816 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
819 struct sk_buff *skb = NULL;
820 struct htb_class *cl, *start;
821 /* look initial class up in the row */
822 start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
823 q->ptr[level] + prio,
824 q->last_ptr_id[level] + prio);
832 /* class can be empty - it is unlikely but can be true if leaf
833 qdisc drops packets in enqueue routine or if someone used
834 graft operation on the leaf since last dequeue;
835 simply deactivate and skip such class */
836 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
837 struct htb_class *next;
838 htb_deactivate(q, cl);
840 /* row/level might become empty */
841 if ((q->row_mask[level] & (1 << prio)) == 0)
844 next = htb_lookup_leaf(q->row[level] + prio,
845 prio, q->ptr[level] + prio,
846 q->last_ptr_id[level] + prio);
848 if (cl == start) /* fix start if we just deleted it */
854 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
855 if (likely(skb != NULL))
859 "htb: class %X isn't work conserving ?!\n",
864 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
866 cl = htb_lookup_leaf(q->row[level] + prio, prio,
867 q->ptr[level] + prio,
868 q->last_ptr_id[level] + prio);
870 } while (cl != start);
872 if (likely(skb != NULL)) {
873 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
874 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
875 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
878 /* this used to be after charge_class but this constelation
879 gives us slightly better performance */
880 if (!cl->un.leaf.q->q.qlen)
881 htb_deactivate(q, cl);
882 htb_charge_class(q, cl, level, skb);
887 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
889 struct sk_buff *skb = NULL;
890 struct htb_sched *q = qdisc_priv(sch);
892 psched_time_t next_event;
894 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
895 skb = __skb_dequeue(&q->direct_queue);
897 sch->flags &= ~TCQ_F_THROTTLED;
904 q->now = psched_get_time();
906 next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
908 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
909 /* common case optimization - skip event handler quickly */
913 if (q->now >= q->near_ev_cache[level]) {
914 event = htb_do_events(q, level);
916 event = q->now + PSCHED_TICKS_PER_SEC;
917 q->near_ev_cache[level] = event;
919 event = q->near_ev_cache[level];
921 if (event && next_event > event)
924 m = ~q->row_mask[level];
925 while (m != (int)(-1)) {
928 skb = htb_dequeue_tree(q, prio, level);
929 if (likely(skb != NULL)) {
931 sch->flags &= ~TCQ_F_THROTTLED;
936 sch->qstats.overlimits++;
937 qdisc_watchdog_schedule(&q->watchdog, next_event);
942 /* try to drop from each class (by prio) until one succeed */
943 static unsigned int htb_drop(struct Qdisc *sch)
945 struct htb_sched *q = qdisc_priv(sch);
948 for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
950 list_for_each(p, q->drops + prio) {
951 struct htb_class *cl = list_entry(p, struct htb_class,
954 if (cl->un.leaf.q->ops->drop &&
955 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
957 if (!cl->un.leaf.q->q.qlen)
958 htb_deactivate(q, cl);
966 /* reset all classes */
967 /* always caled under BH & queue lock */
968 static void htb_reset(struct Qdisc *sch)
970 struct htb_sched *q = qdisc_priv(sch);
973 for (i = 0; i < HTB_HSIZE; i++) {
974 struct hlist_node *p;
975 struct htb_class *cl;
977 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
979 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
982 qdisc_reset(cl->un.leaf.q);
983 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
985 cl->prio_activity = 0;
986 cl->cmode = HTB_CAN_SEND;
990 qdisc_watchdog_cancel(&q->watchdog);
991 __skb_queue_purge(&q->direct_queue);
993 memset(q->row, 0, sizeof(q->row));
994 memset(q->row_mask, 0, sizeof(q->row_mask));
995 memset(q->wait_pq, 0, sizeof(q->wait_pq));
996 memset(q->ptr, 0, sizeof(q->ptr));
997 for (i = 0; i < TC_HTB_NUMPRIO; i++)
998 INIT_LIST_HEAD(q->drops + i);
1001 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1003 struct htb_sched *q = qdisc_priv(sch);
1004 struct rtattr *tb[TCA_HTB_INIT];
1005 struct tc_htb_glob *gopt;
1007 if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1008 tb[TCA_HTB_INIT - 1] == NULL ||
1009 RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
1010 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1013 gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
1014 if (gopt->version != HTB_VER >> 16) {
1016 "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1017 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1021 INIT_LIST_HEAD(&q->root);
1022 for (i = 0; i < HTB_HSIZE; i++)
1023 INIT_HLIST_HEAD(q->hash + i);
1024 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1025 INIT_LIST_HEAD(q->drops + i);
1027 qdisc_watchdog_init(&q->watchdog, sch);
1028 skb_queue_head_init(&q->direct_queue);
1030 q->direct_qlen = sch->dev->tx_queue_len;
1031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1034 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1035 q->rate2quantum = 1;
1036 q->defcls = gopt->defcls;
1041 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1043 struct htb_sched *q = qdisc_priv(sch);
1044 unsigned char *b = skb_tail_pointer(skb);
1046 struct tc_htb_glob gopt;
1047 spin_lock_bh(&sch->dev->queue_lock);
1048 gopt.direct_pkts = q->direct_pkts;
1050 gopt.version = HTB_VER;
1051 gopt.rate2quantum = q->rate2quantum;
1052 gopt.defcls = q->defcls;
1054 rta = (struct rtattr *)b;
1055 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1056 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1057 rta->rta_len = skb_tail_pointer(skb) - b;
1058 spin_unlock_bh(&sch->dev->queue_lock);
1061 spin_unlock_bh(&sch->dev->queue_lock);
1062 nlmsg_trim(skb, skb_tail_pointer(skb));
1066 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1067 struct sk_buff *skb, struct tcmsg *tcm)
1069 struct htb_class *cl = (struct htb_class *)arg;
1070 unsigned char *b = skb_tail_pointer(skb);
1072 struct tc_htb_opt opt;
1074 spin_lock_bh(&sch->dev->queue_lock);
1075 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1076 tcm->tcm_handle = cl->classid;
1077 if (!cl->level && cl->un.leaf.q)
1078 tcm->tcm_info = cl->un.leaf.q->handle;
1080 rta = (struct rtattr *)b;
1081 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1083 memset(&opt, 0, sizeof(opt));
1085 opt.rate = cl->rate->rate;
1086 opt.buffer = cl->buffer;
1087 opt.ceil = cl->ceil->rate;
1088 opt.cbuffer = cl->cbuffer;
1089 opt.quantum = cl->un.leaf.quantum;
1090 opt.prio = cl->un.leaf.prio;
1091 opt.level = cl->level;
1092 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1093 rta->rta_len = skb_tail_pointer(skb) - b;
1094 spin_unlock_bh(&sch->dev->queue_lock);
1097 spin_unlock_bh(&sch->dev->queue_lock);
1103 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1105 struct htb_class *cl = (struct htb_class *)arg;
1107 if (!cl->level && cl->un.leaf.q)
1108 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1109 cl->xstats.tokens = cl->tokens;
1110 cl->xstats.ctokens = cl->ctokens;
1112 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1113 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1114 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1117 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1120 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1123 struct htb_class *cl = (struct htb_class *)arg;
1125 if (cl && !cl->level) {
1127 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1132 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1133 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1136 sch_tree_unlock(sch);
1142 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1144 struct htb_class *cl = (struct htb_class *)arg;
1145 return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1148 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1150 struct htb_class *cl = (struct htb_class *)arg;
1152 if (cl->un.leaf.q->q.qlen == 0)
1153 htb_deactivate(qdisc_priv(sch), cl);
1156 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1158 struct htb_class *cl = htb_find(classid, sch);
1161 return (unsigned long)cl;
1164 static inline int htb_parent_last_child(struct htb_class *cl)
1167 /* the root class */
1170 if (!(cl->parent->children.next == &cl->sibling &&
1171 cl->parent->children.prev == &cl->sibling))
1172 /* not the last child */
1178 static void htb_parent_to_leaf(struct htb_class *cl, struct Qdisc *new_q)
1180 struct htb_class *parent = cl->parent;
1182 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
1185 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1186 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1187 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1188 parent->un.leaf.quantum = parent->quantum;
1189 parent->un.leaf.prio = parent->prio;
1190 parent->tokens = parent->buffer;
1191 parent->ctokens = parent->cbuffer;
1192 parent->t_c = psched_get_time();
1193 parent->cmode = HTB_CAN_SEND;
1196 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1198 struct htb_sched *q = qdisc_priv(sch);
1201 BUG_TRAP(cl->un.leaf.q);
1202 qdisc_destroy(cl->un.leaf.q);
1204 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1205 qdisc_put_rtab(cl->rate);
1206 qdisc_put_rtab(cl->ceil);
1208 tcf_destroy_chain(cl->filter_list);
1210 while (!list_empty(&cl->children))
1211 htb_destroy_class(sch, list_entry(cl->children.next,
1212 struct htb_class, sibling));
1214 /* note: this delete may happen twice (see htb_delete) */
1215 hlist_del_init(&cl->hlist);
1216 list_del(&cl->sibling);
1218 if (cl->prio_activity)
1219 htb_deactivate(q, cl);
1221 if (cl->cmode != HTB_CAN_SEND)
1222 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1227 /* always caled under BH & queue lock */
1228 static void htb_destroy(struct Qdisc *sch)
1230 struct htb_sched *q = qdisc_priv(sch);
1232 qdisc_watchdog_cancel(&q->watchdog);
1233 /* This line used to be after htb_destroy_class call below
1234 and surprisingly it worked in 2.4. But it must precede it
1235 because filter need its target class alive to be able to call
1236 unbind_filter on it (without Oops). */
1237 tcf_destroy_chain(q->filter_list);
1239 while (!list_empty(&q->root))
1240 htb_destroy_class(sch, list_entry(q->root.next,
1241 struct htb_class, sibling));
1243 __skb_queue_purge(&q->direct_queue);
1246 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1248 struct htb_sched *q = qdisc_priv(sch);
1249 struct htb_class *cl = (struct htb_class *)arg;
1251 struct Qdisc *new_q = NULL;
1254 // TODO: why don't allow to delete subtree ? references ? does
1255 // tc subsys quarantee us that in htb_destroy it holds no class
1256 // refs so that we can remove children safely there ?
1257 if (!list_empty(&cl->children) || cl->filter_cnt)
1260 if (!cl->level && htb_parent_last_child(cl)) {
1261 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1262 cl->parent->classid);
1269 qlen = cl->un.leaf.q->q.qlen;
1270 qdisc_reset(cl->un.leaf.q);
1271 qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1274 /* delete from hash and active; remainder in destroy_class */
1275 hlist_del_init(&cl->hlist);
1277 if (cl->prio_activity)
1278 htb_deactivate(q, cl);
1281 htb_parent_to_leaf(cl, new_q);
1283 if (--cl->refcnt == 0)
1284 htb_destroy_class(sch, cl);
1286 sch_tree_unlock(sch);
1290 static void htb_put(struct Qdisc *sch, unsigned long arg)
1292 struct htb_class *cl = (struct htb_class *)arg;
1294 if (--cl->refcnt == 0)
1295 htb_destroy_class(sch, cl);
1298 static int htb_change_class(struct Qdisc *sch, u32 classid,
1299 u32 parentid, struct rtattr **tca,
1303 struct htb_sched *q = qdisc_priv(sch);
1304 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1305 struct rtattr *opt = tca[TCA_OPTIONS - 1];
1306 struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1307 struct rtattr *tb[TCA_HTB_RTAB];
1308 struct tc_htb_opt *hopt;
1310 /* extract all subattrs from opt attr */
1311 if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1312 tb[TCA_HTB_PARMS - 1] == NULL ||
1313 RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
1316 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1318 hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
1320 rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
1321 ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
1325 if (!cl) { /* new class */
1326 struct Qdisc *new_q;
1330 struct gnet_estimator opt;
1333 .rta_len = RTA_LENGTH(sizeof(est.opt)),
1334 .rta_type = TCA_RATE,
1337 /* 4s interval, 16s averaging constant */
1343 /* check for valid classid */
1344 if (!classid || TC_H_MAJ(classid ^ sch->handle)
1345 || htb_find(classid, sch))
1348 /* check maximal depth */
1349 if (parent && parent->parent && parent->parent->level < 2) {
1350 printk(KERN_ERR "htb: tree is too deep\n");
1354 if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1357 gen_new_estimator(&cl->bstats, &cl->rate_est,
1358 &sch->dev->queue_lock,
1359 tca[TCA_RATE-1] ? : &est.rta);
1361 INIT_LIST_HEAD(&cl->sibling);
1362 INIT_HLIST_NODE(&cl->hlist);
1363 INIT_LIST_HEAD(&cl->children);
1364 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1365 RB_CLEAR_NODE(&cl->pq_node);
1367 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1368 RB_CLEAR_NODE(&cl->node[prio]);
1370 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1371 so that can't be used inside of sch_tree_lock
1372 -- thanks to Karlis Peisenieks */
1373 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1375 if (parent && !parent->level) {
1376 unsigned int qlen = parent->un.leaf.q->q.qlen;
1378 /* turn parent into inner node */
1379 qdisc_reset(parent->un.leaf.q);
1380 qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1381 qdisc_destroy(parent->un.leaf.q);
1382 if (parent->prio_activity)
1383 htb_deactivate(q, parent);
1385 /* remove from evt list because of level change */
1386 if (parent->cmode != HTB_CAN_SEND) {
1387 htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1388 parent->cmode = HTB_CAN_SEND;
1390 parent->level = (parent->parent ? parent->parent->level
1391 : TC_HTB_MAXDEPTH) - 1;
1392 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1394 /* leaf (we) needs elementary qdisc */
1395 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1397 cl->classid = classid;
1398 cl->parent = parent;
1400 /* set class to be in HTB_CAN_SEND state */
1401 cl->tokens = hopt->buffer;
1402 cl->ctokens = hopt->cbuffer;
1403 cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
1404 cl->t_c = psched_get_time();
1405 cl->cmode = HTB_CAN_SEND;
1407 /* attach to the hash list and parent's family */
1408 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
1409 list_add_tail(&cl->sibling,
1410 parent ? &parent->children : &q->root);
1412 if (tca[TCA_RATE-1])
1413 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1414 &sch->dev->queue_lock,
1419 /* it used to be a nasty bug here, we have to check that node
1420 is really leaf before changing cl->un.leaf ! */
1422 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1423 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1425 "HTB: quantum of class %X is small. Consider r2q change.\n",
1427 cl->un.leaf.quantum = 1000;
1429 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1431 "HTB: quantum of class %X is big. Consider r2q change.\n",
1433 cl->un.leaf.quantum = 200000;
1436 cl->un.leaf.quantum = hopt->quantum;
1437 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1438 cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1440 /* backup for htb_parent_to_leaf */
1441 cl->quantum = cl->un.leaf.quantum;
1442 cl->prio = cl->un.leaf.prio;
1445 cl->buffer = hopt->buffer;
1446 cl->cbuffer = hopt->cbuffer;
1448 qdisc_put_rtab(cl->rate);
1451 qdisc_put_rtab(cl->ceil);
1453 sch_tree_unlock(sch);
1455 *arg = (unsigned long)cl;
1460 qdisc_put_rtab(rtab);
1462 qdisc_put_rtab(ctab);
1466 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1468 struct htb_sched *q = qdisc_priv(sch);
1469 struct htb_class *cl = (struct htb_class *)arg;
1470 struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1475 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1478 struct htb_sched *q = qdisc_priv(sch);
1479 struct htb_class *cl = htb_find(classid, sch);
1481 /*if (cl && !cl->level) return 0;
1482 The line above used to be there to prevent attaching filters to
1483 leaves. But at least tc_index filter uses this just to get class
1484 for other reasons so that we have to allow for it.
1486 19.6.2002 As Werner explained it is ok - bind filter is just
1487 another way to "lock" the class - unlike "get" this lock can
1488 be broken by class during destroy IIUC.
1494 return (unsigned long)cl;
1497 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1499 struct htb_sched *q = qdisc_priv(sch);
1500 struct htb_class *cl = (struct htb_class *)arg;
1508 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1510 struct htb_sched *q = qdisc_priv(sch);
1516 for (i = 0; i < HTB_HSIZE; i++) {
1517 struct hlist_node *p;
1518 struct htb_class *cl;
1520 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1521 if (arg->count < arg->skip) {
1525 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1534 static struct Qdisc_class_ops htb_class_ops = {
1537 .qlen_notify = htb_qlen_notify,
1540 .change = htb_change_class,
1541 .delete = htb_delete,
1543 .tcf_chain = htb_find_tcf,
1544 .bind_tcf = htb_bind_filter,
1545 .unbind_tcf = htb_unbind_filter,
1546 .dump = htb_dump_class,
1547 .dump_stats = htb_dump_class_stats,
1550 static struct Qdisc_ops htb_qdisc_ops = {
1552 .cl_ops = &htb_class_ops,
1554 .priv_size = sizeof(struct htb_sched),
1555 .enqueue = htb_enqueue,
1556 .dequeue = htb_dequeue,
1557 .requeue = htb_requeue,
1561 .destroy = htb_destroy,
1562 .change = NULL /* htb_change */,
1564 .owner = THIS_MODULE,
1567 static int __init htb_module_init(void)
1569 return register_qdisc(&htb_qdisc_ops);
1571 static void __exit htb_module_exit(void)
1573 unregister_qdisc(&htb_qdisc_ops);
1576 module_init(htb_module_init)
1577 module_exit(htb_module_exit)
1578 MODULE_LICENSE("GPL");