Pull kmalloc into release branch
[linux-2.6] / net / sched / sch_htb.c
1 /* vim: ts=8 sw=8
2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Martin Devera, <devik@cdi.cz>
10  *
11  * Credits (in time order) for older HTB versions:
12  *              Stef Coene <stef.coene@docum.org>
13  *                      HTB support at LARTC mailing list
14  *              Ondrej Kraus, <krauso@barr.cz> 
15  *                      found missing INIT_QDISC(htb)
16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17  *                      helped a lot to locate nasty class stall bug
18  *              Andi Kleen, Jamal Hadi, Bert Hubert
19  *                      code review and helpful comments on shaping
20  *              Tomasz Wrona, <tw@eter.tym.pl>
21  *                      created test case so that I was able to fix nasty bug
22  *              Wilfried Weissmann
23  *                      spotted bug in dequeue code and helped with fix
24  *              Jiri Fojtasek
25  *                      fixed requeue routine
26  *              and many others. thanks.
27  *
28  * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29  */
30 #include <linux/module.h>
31 #include <asm/uaccess.h>
32 #include <asm/system.h>
33 #include <linux/bitops.h>
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/socket.h>
40 #include <linux/sockios.h>
41 #include <linux/in.h>
42 #include <linux/errno.h>
43 #include <linux/interrupt.h>
44 #include <linux/if_ether.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/notifier.h>
49 #include <net/ip.h>
50 #include <net/route.h>
51 #include <linux/skbuff.h>
52 #include <linux/list.h>
53 #include <linux/compiler.h>
54 #include <net/sock.h>
55 #include <net/pkt_sched.h>
56 #include <linux/rbtree.h>
57
58 /* HTB algorithm.
59     Author: devik@cdi.cz
60     ========================================================================
61     HTB is like TBF with multiple classes. It is also similar to CBQ because
62     it allows to assign priority to each class in hierarchy. 
63     In fact it is another implementation of Floyd's formal sharing.
64
65     Levels:
66     Each class is assigned level. Leaf has ALWAYS level 0 and root 
67     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
68     one less than their parent.
69 */
70
71 #define HTB_HSIZE 16    /* classid hash size */
72 #define HTB_EWMAC 2     /* rate average over HTB_EWMAC*HTB_HSIZE sec */
73 #undef HTB_DEBUG        /* compile debugging support (activated by tc tool) */
74 #define HTB_RATECM 1    /* whether to use rate computer */
75 #define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
76 #define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
77 #define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
78 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
79
80 #if HTB_VER >> 16 != TC_HTB_PROTOVER
81 #error "Mismatched sch_htb.c and pkt_sch.h"
82 #endif
83
84 /* debugging support; S is subsystem, these are defined:
85   0 - netlink messages
86   1 - enqueue
87   2 - drop & requeue
88   3 - dequeue main
89   4 - dequeue one prio DRR part
90   5 - dequeue class accounting
91   6 - class overlimit status computation
92   7 - hint tree
93   8 - event queue
94  10 - rate estimator
95  11 - classifier 
96  12 - fast dequeue cache
97
98  L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
99  q->debug uint32 contains 16 2-bit fields one for subsystem starting
100  from LSB
101  */
102 #ifdef HTB_DEBUG
103 #define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
104 #define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
105         printk(KERN_DEBUG FMT,##ARG)
106 #define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
107 #define HTB_PASSQ q,
108 #define HTB_ARGQ struct htb_sched *q,
109 #define static
110 #undef __inline__
111 #define __inline__
112 #undef inline
113 #define inline
114 #define HTB_CMAGIC 0xFEFAFEF1
115 #define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
116                 if ((N)->rb_color == -1) break; \
117                 rb_erase(N,R); \
118                 (N)->rb_color = -1; } while (0)
119 #else
120 #define HTB_DBG_COND(S,L) (0)
121 #define HTB_DBG(S,L,FMT,ARG...)
122 #define HTB_PASSQ
123 #define HTB_ARGQ
124 #define HTB_CHCL(cl)
125 #define htb_safe_rb_erase(N,R) rb_erase(N,R)
126 #endif
127
128
129 /* used internaly to keep status of single class */
130 enum htb_cmode {
131     HTB_CANT_SEND,              /* class can't send and can't borrow */
132     HTB_MAY_BORROW,             /* class can't send but may borrow */
133     HTB_CAN_SEND                /* class can send */
134 };
135
136 /* interior & leaf nodes; props specific to leaves are marked L: */
137 struct htb_class
138 {
139 #ifdef HTB_DEBUG
140         unsigned magic;
141 #endif
142     /* general class parameters */
143     u32 classid;
144     struct gnet_stats_basic bstats;
145     struct gnet_stats_queue qstats;
146     struct gnet_stats_rate_est rate_est;
147     struct tc_htb_xstats xstats;/* our special stats */
148     int refcnt;                 /* usage count of this class */
149
150 #ifdef HTB_RATECM
151     /* rate measurement counters */
152     unsigned long rate_bytes,sum_bytes;
153     unsigned long rate_packets,sum_packets;
154 #endif
155
156     /* topology */
157     int level;                  /* our level (see above) */
158     struct htb_class *parent;   /* parent class */
159     struct list_head hlist;     /* classid hash list item */
160     struct list_head sibling;   /* sibling list item */
161     struct list_head children;  /* children list */
162
163     union {
164             struct htb_class_leaf {
165                     struct Qdisc *q;
166                     int prio;
167                     int aprio;  
168                     int quantum;
169                     int deficit[TC_HTB_MAXDEPTH];
170                     struct list_head drop_list;
171             } leaf;
172             struct htb_class_inner {
173                     struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
174                     struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
175             /* When class changes from state 1->2 and disconnects from 
176                parent's feed then we lost ptr value and start from the
177               first child again. Here we store classid of the
178               last valid ptr (used when ptr is NULL). */
179               u32 last_ptr_id[TC_HTB_NUMPRIO];
180             } inner;
181     } un;
182     struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
183     struct rb_node pq_node;              /* node for event queue */
184     unsigned long pq_key;       /* the same type as jiffies global */
185     
186     int prio_activity;          /* for which prios are we active */
187     enum htb_cmode cmode;       /* current mode of the class */
188
189     /* class attached filters */
190     struct tcf_proto *filter_list;
191     int filter_cnt;
192
193     int warned;         /* only one warning about non work conserving .. */
194
195     /* token bucket parameters */
196     struct qdisc_rate_table *rate;      /* rate table of the class itself */
197     struct qdisc_rate_table *ceil;      /* ceiling rate (limits borrows too) */
198     long buffer,cbuffer;                /* token bucket depth/rate */
199     long mbuffer;                       /* max wait time */
200     long tokens,ctokens;                /* current number of tokens */
201     psched_time_t t_c;                  /* checkpoint time */
202 };
203
204 /* TODO: maybe compute rate when size is too large .. or drop ? */
205 static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
206         int size)
207
208     int slot = size >> rate->rate.cell_log;
209     if (slot > 255) {
210         cl->xstats.giants++;
211         slot = 255;
212     }
213     return rate->data[slot];
214 }
215
216 struct htb_sched
217 {
218     struct list_head root;                      /* root classes list */
219     struct list_head hash[HTB_HSIZE];           /* hashed by classid */
220     struct list_head drops[TC_HTB_NUMPRIO];     /* active leaves (for drops) */
221     
222     /* self list - roots of self generating tree */
223     struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
224     int row_mask[TC_HTB_MAXDEPTH];
225     struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
226     u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
227
228     /* self wait list - roots of wait PQs per row */
229     struct rb_root wait_pq[TC_HTB_MAXDEPTH];
230
231     /* time of nearest event per level (row) */
232     unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
233
234     /* cached value of jiffies in dequeue */
235     unsigned long jiffies;
236
237     /* whether we hit non-work conserving class during this dequeue; we use */
238     int nwc_hit;        /* this to disable mindelay complaint in dequeue */
239
240     int defcls;         /* class where unclassified flows go to */
241     u32 debug;          /* subsystem debug levels */
242
243     /* filters for qdisc itself */
244     struct tcf_proto *filter_list;
245     int filter_cnt;
246
247     int rate2quantum;           /* quant = rate / rate2quantum */
248     psched_time_t now;          /* cached dequeue time */
249     struct timer_list timer;    /* send delay timer */
250 #ifdef HTB_RATECM
251     struct timer_list rttim;    /* rate computer timer */
252     int recmp_bucket;           /* which hash bucket to recompute next */
253 #endif
254     
255     /* non shaped skbs; let them go directly thru */
256     struct sk_buff_head direct_queue;
257     int direct_qlen;  /* max qlen of above */
258
259     long direct_pkts;
260 };
261
262 /* compute hash of size HTB_HSIZE for given handle */
263 static __inline__ int htb_hash(u32 h) 
264 {
265 #if HTB_HSIZE != 16
266  #error "Declare new hash for your HTB_HSIZE"
267 #endif
268     h ^= h>>8;  /* stolen from cbq_hash */
269     h ^= h>>4;
270     return h & 0xf;
271 }
272
273 /* find class in global hash table using given handle */
274 static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
275 {
276         struct htb_sched *q = qdisc_priv(sch);
277         struct list_head *p;
278         if (TC_H_MAJ(handle) != sch->handle) 
279                 return NULL;
280         
281         list_for_each (p,q->hash+htb_hash(handle)) {
282                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
283                 if (cl->classid == handle)
284                         return cl;
285         }
286         return NULL;
287 }
288
289 /**
290  * htb_classify - classify a packet into class
291  *
292  * It returns NULL if the packet should be dropped or -1 if the packet
293  * should be passed directly thru. In all other cases leaf class is returned.
294  * We allow direct class selection by classid in priority. The we examine
295  * filters in qdisc and in inner nodes (if higher filter points to the inner
296  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
297  * internal fifo (direct). These packets then go directly thru. If we still 
298  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
299  * then finish and return direct queue.
300  */
301 #define HTB_DIRECT (struct htb_class*)-1
302 static inline u32 htb_classid(struct htb_class *cl)
303 {
304         return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
305 }
306
307 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
308 {
309         struct htb_sched *q = qdisc_priv(sch);
310         struct htb_class *cl;
311         struct tcf_result res;
312         struct tcf_proto *tcf;
313         int result;
314
315         /* allow to select class by setting skb->priority to valid classid;
316            note that nfmark can be used too by attaching filter fw with no
317            rules in it */
318         if (skb->priority == sch->handle)
319                 return HTB_DIRECT;  /* X:0 (direct flow) selected */
320         if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0) 
321                 return cl;
322
323         *qerr = NET_XMIT_BYPASS;
324         tcf = q->filter_list;
325         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
326 #ifdef CONFIG_NET_CLS_ACT
327                 switch (result) {
328                 case TC_ACT_QUEUED:
329                 case TC_ACT_STOLEN: 
330                         *qerr = NET_XMIT_SUCCESS;
331                 case TC_ACT_SHOT:
332                         return NULL;
333                 }
334 #elif defined(CONFIG_NET_CLS_POLICE)
335                 if (result == TC_POLICE_SHOT)
336                         return HTB_DIRECT;
337 #endif
338                 if ((cl = (void*)res.class) == NULL) {
339                         if (res.classid == sch->handle)
340                                 return HTB_DIRECT;  /* X:0 (direct flow) */
341                         if ((cl = htb_find(res.classid,sch)) == NULL)
342                                 break; /* filter selected invalid classid */
343                 }
344                 if (!cl->level)
345                         return cl; /* we hit leaf; return it */
346
347                 /* we have got inner class; apply inner filter chain */
348                 tcf = cl->filter_list;
349         }
350         /* classification failed; try to use default class */
351         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
352         if (!cl || cl->level)
353                 return HTB_DIRECT; /* bad default .. this is safe bet */
354         return cl;
355 }
356
357 #ifdef HTB_DEBUG
358 static void htb_next_rb_node(struct rb_node **n);
359 #define HTB_DUMTREE(root,memb) if(root) { \
360         struct rb_node *n = (root)->rb_node; \
361         while (n->rb_left) n = n->rb_left; \
362         while (n) { \
363                 struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
364                 printk(" %x",cl->classid); htb_next_rb_node (&n); \
365         } }
366
367 static void htb_debug_dump (struct htb_sched *q)
368 {
369         int i,p;
370         printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
371         /* rows */
372         for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
373                 printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
374                 for (p=0;p<TC_HTB_NUMPRIO;p++) {
375                         if (!q->row[i][p].rb_node) continue;
376                         printk(" p%d:",p);
377                         HTB_DUMTREE(q->row[i]+p,node[p]);
378                 }
379                 printk("\n");
380         }
381         /* classes */
382         for (i = 0; i < HTB_HSIZE; i++) {
383                 struct list_head *l;
384                 list_for_each (l,q->hash+i) {
385                         struct htb_class *cl = list_entry(l,struct htb_class,hlist);
386                         long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
387                         printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
388                                         "pa=%x f:",
389                                 cl->classid,cl->cmode,cl->tokens,cl->ctokens,
390                                 cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
391                                 cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
392                         if (cl->level)
393                         for (p=0;p<TC_HTB_NUMPRIO;p++) {
394                                 if (!cl->un.inner.feed[p].rb_node) continue;
395                                 printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
396                                 HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
397                         }
398                         printk("\n");
399                 }
400         }
401 }
402 #endif
403 /**
404  * htb_add_to_id_tree - adds class to the round robin list
405  *
406  * Routine adds class to the list (actually tree) sorted by classid.
407  * Make sure that class is not already on such list for given prio.
408  */
409 static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
410                 struct htb_class *cl,int prio)
411 {
412         struct rb_node **p = &root->rb_node, *parent = NULL;
413         HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
414 #ifdef HTB_DEBUG
415         if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
416         HTB_CHCL(cl);
417         if (*p) {
418                 struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
419                 HTB_CHCL(x);
420         }
421 #endif
422         while (*p) {
423                 struct htb_class *c; parent = *p;
424                 c = rb_entry(parent, struct htb_class, node[prio]);
425                 HTB_CHCL(c);
426                 if (cl->classid > c->classid)
427                         p = &parent->rb_right;
428                 else 
429                         p = &parent->rb_left;
430         }
431         rb_link_node(&cl->node[prio], parent, p);
432         rb_insert_color(&cl->node[prio], root);
433 }
434
435 /**
436  * htb_add_to_wait_tree - adds class to the event queue with delay
437  *
438  * The class is added to priority event queue to indicate that class will
439  * change its mode in cl->pq_key microseconds. Make sure that class is not
440  * already in the queue.
441  */
442 static void htb_add_to_wait_tree (struct htb_sched *q,
443                 struct htb_class *cl,long delay,int debug_hint)
444 {
445         struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
446         HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
447 #ifdef HTB_DEBUG
448         if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
449         HTB_CHCL(cl);
450         if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
451                 printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
452 #endif
453         cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
454         if (cl->pq_key == q->jiffies)
455                 cl->pq_key++;
456
457         /* update the nearest event cache */
458         if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
459                 q->near_ev_cache[cl->level] = cl->pq_key;
460         
461         while (*p) {
462                 struct htb_class *c; parent = *p;
463                 c = rb_entry(parent, struct htb_class, pq_node);
464                 if (time_after_eq(cl->pq_key, c->pq_key))
465                         p = &parent->rb_right;
466                 else 
467                         p = &parent->rb_left;
468         }
469         rb_link_node(&cl->pq_node, parent, p);
470         rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
471 }
472
473 /**
474  * htb_next_rb_node - finds next node in binary tree
475  *
476  * When we are past last key we return NULL.
477  * Average complexity is 2 steps per call.
478  */
479 static void htb_next_rb_node(struct rb_node **n)
480 {
481         *n = rb_next(*n);
482 }
483
484 /**
485  * htb_add_class_to_row - add class to its row
486  *
487  * The class is added to row at priorities marked in mask.
488  * It does nothing if mask == 0.
489  */
490 static inline void htb_add_class_to_row(struct htb_sched *q, 
491                 struct htb_class *cl,int mask)
492 {
493         HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
494                         cl->classid,mask,q->row_mask[cl->level]);
495         HTB_CHCL(cl);
496         q->row_mask[cl->level] |= mask;
497         while (mask) {
498                 int prio = ffz(~mask);
499                 mask &= ~(1 << prio);
500                 htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
501         }
502 }
503
504 /**
505  * htb_remove_class_from_row - removes class from its row
506  *
507  * The class is removed from row at priorities marked in mask.
508  * It does nothing if mask == 0.
509  */
510 static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
511                 struct htb_class *cl,int mask)
512 {
513         int m = 0;
514         HTB_CHCL(cl);
515         while (mask) {
516                 int prio = ffz(~mask);
517                 mask &= ~(1 << prio);
518                 if (q->ptr[cl->level][prio] == cl->node+prio)
519                         htb_next_rb_node(q->ptr[cl->level]+prio);
520                 htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
521                 if (!q->row[cl->level][prio].rb_node) 
522                         m |= 1 << prio;
523         }
524         HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
525                         cl->classid,mask,q->row_mask[cl->level],m);
526         q->row_mask[cl->level] &= ~m;
527 }
528
529 /**
530  * htb_activate_prios - creates active classe's feed chain
531  *
532  * The class is connected to ancestors and/or appropriate rows
533  * for priorities it is participating on. cl->cmode must be new 
534  * (activated) mode. It does nothing if cl->prio_activity == 0.
535  */
536 static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
537 {
538         struct htb_class *p = cl->parent;
539         long m,mask = cl->prio_activity;
540         HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
541         HTB_CHCL(cl);
542
543         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
544                 HTB_CHCL(p);
545                 m = mask; while (m) {
546                         int prio = ffz(~m);
547                         m &= ~(1 << prio);
548                         
549                         if (p->un.inner.feed[prio].rb_node)
550                                 /* parent already has its feed in use so that
551                                    reset bit in mask as parent is already ok */
552                                 mask &= ~(1 << prio);
553                         
554                         htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
555                 }
556                 HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
557                                 p->classid,p->prio_activity,mask,p->cmode);
558                 p->prio_activity |= mask;
559                 cl = p; p = cl->parent;
560                 HTB_CHCL(cl);
561         }
562         if (cl->cmode == HTB_CAN_SEND && mask)
563                 htb_add_class_to_row(q,cl,mask);
564 }
565
566 /**
567  * htb_deactivate_prios - remove class from feed chain
568  *
569  * cl->cmode must represent old mode (before deactivation). It does 
570  * nothing if cl->prio_activity == 0. Class is removed from all feed
571  * chains and rows.
572  */
573 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
574 {
575         struct htb_class *p = cl->parent;
576         long m,mask = cl->prio_activity;
577         HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
578         HTB_CHCL(cl);
579
580         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
581                 m = mask; mask = 0; 
582                 while (m) {
583                         int prio = ffz(~m);
584                         m &= ~(1 << prio);
585                         
586                         if (p->un.inner.ptr[prio] == cl->node+prio) {
587                                 /* we are removing child which is pointed to from
588                                    parent feed - forget the pointer but remember
589                                    classid */
590                                 p->un.inner.last_ptr_id[prio] = cl->classid;
591                                 p->un.inner.ptr[prio] = NULL;
592                         }
593                         
594                         htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
595                         
596                         if (!p->un.inner.feed[prio].rb_node) 
597                                 mask |= 1 << prio;
598                 }
599                 HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
600                                 p->classid,p->prio_activity,mask,p->cmode);
601                 p->prio_activity &= ~mask;
602                 cl = p; p = cl->parent;
603                 HTB_CHCL(cl);
604         }
605         if (cl->cmode == HTB_CAN_SEND && mask) 
606                 htb_remove_class_from_row(q,cl,mask);
607 }
608
609 /**
610  * htb_class_mode - computes and returns current class mode
611  *
612  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
613  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
614  * from now to time when cl will change its state. 
615  * Also it is worth to note that class mode doesn't change simply
616  * at cl->{c,}tokens == 0 but there can rather be hysteresis of 
617  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
618  * mode transitions per time unit. The speed gain is about 1/6.
619  */
620 static __inline__ enum htb_cmode 
621 htb_class_mode(struct htb_class *cl,long *diff)
622 {
623     long toks;
624
625     if ((toks = (cl->ctokens + *diff)) < (
626 #if HTB_HYSTERESIS
627             cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
628 #endif
629             0)) {
630             *diff = -toks;
631             return HTB_CANT_SEND;
632     }
633     if ((toks = (cl->tokens + *diff)) >= (
634 #if HTB_HYSTERESIS
635             cl->cmode == HTB_CAN_SEND ? -cl->buffer :
636 #endif
637             0))
638             return HTB_CAN_SEND;
639
640     *diff = -toks;
641     return HTB_MAY_BORROW;
642 }
643
644 /**
645  * htb_change_class_mode - changes classe's mode
646  *
647  * This should be the only way how to change classe's mode under normal
648  * cirsumstances. Routine will update feed lists linkage, change mode
649  * and add class to the wait event queue if appropriate. New mode should
650  * be different from old one and cl->pq_key has to be valid if changing
651  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
652  */
653 static void 
654 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
655
656         enum htb_cmode new_mode = htb_class_mode(cl,diff);
657         
658         HTB_CHCL(cl);
659         HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
660
661         if (new_mode == cl->cmode)
662                 return; 
663         
664         if (cl->prio_activity) { /* not necessary: speed optimization */
665                 if (cl->cmode != HTB_CANT_SEND) 
666                         htb_deactivate_prios(q,cl);
667                 cl->cmode = new_mode;
668                 if (new_mode != HTB_CANT_SEND) 
669                         htb_activate_prios(q,cl);
670         } else 
671                 cl->cmode = new_mode;
672 }
673
674 /**
675  * htb_activate - inserts leaf cl into appropriate active feeds 
676  *
677  * Routine learns (new) priority of leaf and activates feed chain
678  * for the prio. It can be called on already active leaf safely.
679  * It also adds leaf into droplist.
680  */
681 static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
682 {
683         BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
684         HTB_CHCL(cl);
685         if (!cl->prio_activity) {
686                 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
687                 htb_activate_prios(q,cl);
688                 list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
689         }
690 }
691
692 /**
693  * htb_deactivate - remove leaf cl from active feeds 
694  *
695  * Make sure that leaf is active. In the other words it can't be called
696  * with non-active leaf. It also removes class from the drop list.
697  */
698 static __inline__ void 
699 htb_deactivate(struct htb_sched *q,struct htb_class *cl)
700 {
701         BUG_TRAP(cl->prio_activity);
702         HTB_CHCL(cl);
703         htb_deactivate_prios(q,cl);
704         cl->prio_activity = 0;
705         list_del_init(&cl->un.leaf.drop_list);
706 }
707
708 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
709 {
710     int ret;
711     struct htb_sched *q = qdisc_priv(sch);
712     struct htb_class *cl = htb_classify(skb,sch,&ret);
713
714     if (cl == HTB_DIRECT) {
715         /* enqueue to helper queue */
716         if (q->direct_queue.qlen < q->direct_qlen) {
717             __skb_queue_tail(&q->direct_queue, skb);
718             q->direct_pkts++;
719         } else {
720             kfree_skb(skb);
721             sch->qstats.drops++;
722             return NET_XMIT_DROP;
723         }
724 #ifdef CONFIG_NET_CLS_ACT
725     } else if (!cl) {
726         if (ret == NET_XMIT_BYPASS)
727                 sch->qstats.drops++;
728         kfree_skb (skb);
729         return ret;
730 #endif
731     } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
732         sch->qstats.drops++;
733         cl->qstats.drops++;
734         return NET_XMIT_DROP;
735     } else {
736         cl->bstats.packets++; cl->bstats.bytes += skb->len;
737         htb_activate (q,cl);
738     }
739
740     sch->q.qlen++;
741     sch->bstats.packets++; sch->bstats.bytes += skb->len;
742     HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
743     return NET_XMIT_SUCCESS;
744 }
745
746 /* TODO: requeuing packet charges it to policers again !! */
747 static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
748 {
749     struct htb_sched *q = qdisc_priv(sch);
750     int ret =  NET_XMIT_SUCCESS;
751     struct htb_class *cl = htb_classify(skb,sch, &ret);
752     struct sk_buff *tskb;
753
754     if (cl == HTB_DIRECT || !cl) {
755         /* enqueue to helper queue */
756         if (q->direct_queue.qlen < q->direct_qlen && cl) {
757             __skb_queue_head(&q->direct_queue, skb);
758         } else {
759             __skb_queue_head(&q->direct_queue, skb);
760             tskb = __skb_dequeue_tail(&q->direct_queue);
761             kfree_skb (tskb);
762             sch->qstats.drops++;
763             return NET_XMIT_CN; 
764         }
765     } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
766         sch->qstats.drops++;
767         cl->qstats.drops++;
768         return NET_XMIT_DROP;
769     } else 
770             htb_activate (q,cl);
771
772     sch->q.qlen++;
773     sch->qstats.requeues++;
774     HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
775     return NET_XMIT_SUCCESS;
776 }
777
778 static void htb_timer(unsigned long arg)
779 {
780     struct Qdisc *sch = (struct Qdisc*)arg;
781     sch->flags &= ~TCQ_F_THROTTLED;
782     wmb();
783     netif_schedule(sch->dev);
784 }
785
786 #ifdef HTB_RATECM
787 #define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
788 static void htb_rate_timer(unsigned long arg)
789 {
790         struct Qdisc *sch = (struct Qdisc*)arg;
791         struct htb_sched *q = qdisc_priv(sch);
792         struct list_head *p;
793
794         /* lock queue so that we can muck with it */
795         HTB_QLOCK(sch);
796         HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
797
798         q->rttim.expires = jiffies + HZ;
799         add_timer(&q->rttim);
800
801         /* scan and recompute one bucket at time */
802         if (++q->recmp_bucket >= HTB_HSIZE) 
803                 q->recmp_bucket = 0;
804         list_for_each (p,q->hash+q->recmp_bucket) {
805                 struct htb_class *cl = list_entry(p,struct htb_class,hlist);
806                 HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
807                                 cl->classid,cl->sum_bytes,cl->sum_packets);
808                 RT_GEN (cl->sum_bytes,cl->rate_bytes);
809                 RT_GEN (cl->sum_packets,cl->rate_packets);
810         }
811         HTB_QUNLOCK(sch);
812 }
813 #endif
814
815 /**
816  * htb_charge_class - charges amount "bytes" to leaf and ancestors
817  *
818  * Routine assumes that packet "bytes" long was dequeued from leaf cl
819  * borrowing from "level". It accounts bytes to ceil leaky bucket for
820  * leaf and all ancestors and to rate bucket for ancestors at levels
821  * "level" and higher. It also handles possible change of mode resulting
822  * from the update. Note that mode can also increase here (MAY_BORROW to
823  * CAN_SEND) because we can use more precise clock that event queue here.
824  * In such case we remove class from event queue first.
825  */
826 static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
827                 int level,int bytes)
828 {       
829         long toks,diff;
830         enum htb_cmode old_mode;
831         HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
832
833 #define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
834         if (toks > cl->B) toks = cl->B; \
835         toks -= L2T(cl, cl->R, bytes); \
836         if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
837         cl->T = toks
838
839         while (cl) {
840                 HTB_CHCL(cl);
841                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
842 #ifdef HTB_DEBUG
843                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
844                         if (net_ratelimit())
845                                 printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
846                                        cl->classid, diff,
847 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
848                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
849                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
850 #else
851                                        (unsigned long long) q->now,
852                                        (unsigned long long) cl->t_c,
853 #endif
854                                        q->jiffies);
855                         diff = 1000;
856                 }
857 #endif
858                 if (cl->level >= level) {
859                         if (cl->level == level) cl->xstats.lends++;
860                         HTB_ACCNT (tokens,buffer,rate);
861                 } else {
862                         cl->xstats.borrows++;
863                         cl->tokens += diff; /* we moved t_c; update tokens */
864                 }
865                 HTB_ACCNT (ctokens,cbuffer,ceil);
866                 cl->t_c = q->now;
867                 HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
868
869                 old_mode = cl->cmode; diff = 0;
870                 htb_change_class_mode(q,cl,&diff);
871                 if (old_mode != cl->cmode) {
872                         if (old_mode != HTB_CAN_SEND)
873                                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
874                         if (cl->cmode != HTB_CAN_SEND)
875                                 htb_add_to_wait_tree (q,cl,diff,1);
876                 }
877                 
878 #ifdef HTB_RATECM
879                 /* update rate counters */
880                 cl->sum_bytes += bytes; cl->sum_packets++;
881 #endif
882
883                 /* update byte stats except for leaves which are already updated */
884                 if (cl->level) {
885                         cl->bstats.bytes += bytes;
886                         cl->bstats.packets++;
887                 }
888                 cl = cl->parent;
889         }
890 }
891
892 /**
893  * htb_do_events - make mode changes to classes at the level
894  *
895  * Scans event queue for pending events and applies them. Returns jiffies to
896  * next pending event (0 for no event in pq).
897  * Note: Aplied are events whose have cl->pq_key <= jiffies.
898  */
899 static long htb_do_events(struct htb_sched *q,int level)
900 {
901         int i;
902         HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
903                         level,q->wait_pq[level].rb_node,q->row_mask[level]);
904         for (i = 0; i < 500; i++) {
905                 struct htb_class *cl;
906                 long diff;
907                 struct rb_node *p = q->wait_pq[level].rb_node;
908                 if (!p) return 0;
909                 while (p->rb_left) p = p->rb_left;
910
911                 cl = rb_entry(p, struct htb_class, pq_node);
912                 if (time_after(cl->pq_key, q->jiffies)) {
913                         HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
914                         return cl->pq_key - q->jiffies;
915                 }
916                 htb_safe_rb_erase(p,q->wait_pq+level);
917                 diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
918 #ifdef HTB_DEBUG
919                 if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
920                         if (net_ratelimit())
921                                 printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
922                                        cl->classid, diff,
923 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
924                                        q->now.tv_sec * 1000000ULL + q->now.tv_usec,
925                                        cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
926 #else
927                                        (unsigned long long) q->now,
928                                        (unsigned long long) cl->t_c,
929 #endif
930                                        q->jiffies);
931                         diff = 1000;
932                 }
933 #endif
934                 htb_change_class_mode(q,cl,&diff);
935                 if (cl->cmode != HTB_CAN_SEND)
936                         htb_add_to_wait_tree (q,cl,diff,2);
937         }
938         if (net_ratelimit())
939                 printk(KERN_WARNING "htb: too many events !\n");
940         return HZ/10;
941 }
942
943 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
944    is no such one exists. */
945 static struct rb_node *
946 htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
947 {
948         struct rb_node *r = NULL;
949         while (n) {
950                 struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
951                 if (id == cl->classid) return n;
952                 
953                 if (id > cl->classid) {
954                         n = n->rb_right;
955                 } else {
956                         r = n;
957                         n = n->rb_left;
958                 }
959         }
960         return r;
961 }
962
963 /**
964  * htb_lookup_leaf - returns next leaf class in DRR order
965  *
966  * Find leaf where current feed pointers points to.
967  */
968 static struct htb_class *
969 htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
970 {
971         int i;
972         struct {
973                 struct rb_node *root;
974                 struct rb_node **pptr;
975                 u32 *pid;
976         } stk[TC_HTB_MAXDEPTH],*sp = stk;
977         
978         BUG_TRAP(tree->rb_node);
979         sp->root = tree->rb_node;
980         sp->pptr = pptr;
981         sp->pid = pid;
982
983         for (i = 0; i < 65535; i++) {
984                 HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
985                 
986                 if (!*sp->pptr && *sp->pid) { 
987                         /* ptr was invalidated but id is valid - try to recover 
988                            the original or next ptr */
989                         *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
990                 }
991                 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
992                                  can become out of date quickly */
993                 if (!*sp->pptr) { /* we are at right end; rewind & go up */
994                         *sp->pptr = sp->root;
995                         while ((*sp->pptr)->rb_left) 
996                                 *sp->pptr = (*sp->pptr)->rb_left;
997                         if (sp > stk) {
998                                 sp--;
999                                 BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
1000                                 htb_next_rb_node (sp->pptr);
1001                         }
1002                 } else {
1003                         struct htb_class *cl;
1004                         cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
1005                         HTB_CHCL(cl);
1006                         if (!cl->level) 
1007                                 return cl;
1008                         (++sp)->root = cl->un.inner.feed[prio].rb_node;
1009                         sp->pptr = cl->un.inner.ptr+prio;
1010                         sp->pid = cl->un.inner.last_ptr_id+prio;
1011                 }
1012         }
1013         BUG_TRAP(0);
1014         return NULL;
1015 }
1016
1017 /* dequeues packet at given priority and level; call only if
1018    you are sure that there is active class at prio/level */
1019 static struct sk_buff *
1020 htb_dequeue_tree(struct htb_sched *q,int prio,int level)
1021 {
1022         struct sk_buff *skb = NULL;
1023         struct htb_class *cl,*start;
1024         /* look initial class up in the row */
1025         start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
1026                         q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1027         
1028         do {
1029 next:
1030                 BUG_TRAP(cl); 
1031                 if (!cl) return NULL;
1032                 HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
1033                                 prio,level,cl->classid,cl->un.leaf.deficit[level]);
1034
1035                 /* class can be empty - it is unlikely but can be true if leaf
1036                    qdisc drops packets in enqueue routine or if someone used
1037                    graft operation on the leaf since last dequeue; 
1038                    simply deactivate and skip such class */
1039                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
1040                         struct htb_class *next;
1041                         htb_deactivate(q,cl);
1042
1043                         /* row/level might become empty */
1044                         if ((q->row_mask[level] & (1 << prio)) == 0)
1045                                 return NULL; 
1046                         
1047                         next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
1048                                         prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
1049
1050                         if (cl == start) /* fix start if we just deleted it */
1051                                 start = next;
1052                         cl = next;
1053                         goto next;
1054                 }
1055         
1056                 if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL)) 
1057                         break;
1058                 if (!cl->warned) {
1059                         printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
1060                         cl->warned = 1;
1061                 }
1062                 q->nwc_hit++;
1063                 htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1064                 cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
1065                                 q->last_ptr_id[level]+prio);
1066
1067         } while (cl != start);
1068
1069         if (likely(skb != NULL)) {
1070                 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
1071                         HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
1072                                 level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
1073                         cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
1074                         htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
1075                 }
1076                 /* this used to be after charge_class but this constelation
1077                    gives us slightly better performance */
1078                 if (!cl->un.leaf.q->q.qlen)
1079                         htb_deactivate (q,cl);
1080                 htb_charge_class (q,cl,level,skb->len);
1081         }
1082         return skb;
1083 }
1084
1085 static void htb_delay_by(struct Qdisc *sch,long delay)
1086 {
1087         struct htb_sched *q = qdisc_priv(sch);
1088         if (delay <= 0) delay = 1;
1089         if (unlikely(delay > 5*HZ)) {
1090                 if (net_ratelimit())
1091                         printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
1092                 delay = 5*HZ;
1093         }
1094         /* why don't use jiffies here ? because expires can be in past */
1095         mod_timer(&q->timer, q->jiffies + delay);
1096         sch->flags |= TCQ_F_THROTTLED;
1097         sch->qstats.overlimits++;
1098         HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
1099 }
1100
1101 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1102 {
1103         struct sk_buff *skb = NULL;
1104         struct htb_sched *q = qdisc_priv(sch);
1105         int level;
1106         long min_delay;
1107 #ifdef HTB_DEBUG
1108         int evs_used = 0;
1109 #endif
1110
1111         q->jiffies = jiffies;
1112         HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
1113                         sch->q.qlen);
1114
1115         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
1116         if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
1117                 sch->flags &= ~TCQ_F_THROTTLED;
1118                 sch->q.qlen--;
1119                 return skb;
1120         }
1121
1122         if (!sch->q.qlen) goto fin;
1123         PSCHED_GET_TIME(q->now);
1124
1125         min_delay = LONG_MAX;
1126         q->nwc_hit = 0;
1127         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
1128                 /* common case optimization - skip event handler quickly */
1129                 int m;
1130                 long delay;
1131                 if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
1132                         delay = htb_do_events(q,level);
1133                         q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
1134 #ifdef HTB_DEBUG
1135                         evs_used++;
1136 #endif
1137                 } else
1138                         delay = q->near_ev_cache[level] - q->jiffies;   
1139                 
1140                 if (delay && min_delay > delay) 
1141                         min_delay = delay;
1142                 m = ~q->row_mask[level];
1143                 while (m != (int)(-1)) {
1144                         int prio = ffz (m);
1145                         m |= 1 << prio;
1146                         skb = htb_dequeue_tree(q,prio,level);
1147                         if (likely(skb != NULL)) {
1148                                 sch->q.qlen--;
1149                                 sch->flags &= ~TCQ_F_THROTTLED;
1150                                 goto fin;
1151                         }
1152                 }
1153         }
1154 #ifdef HTB_DEBUG
1155         if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
1156                 if (min_delay == LONG_MAX) {
1157                         printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
1158                                         evs_used,q->jiffies,jiffies);
1159                         htb_debug_dump(q);
1160                 } else 
1161                         printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
1162                                         "too small rate\n",min_delay);
1163         }
1164 #endif
1165         htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
1166 fin:
1167         HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
1168         return skb;
1169 }
1170
1171 /* try to drop from each class (by prio) until one succeed */
1172 static unsigned int htb_drop(struct Qdisc* sch)
1173 {
1174         struct htb_sched *q = qdisc_priv(sch);
1175         int prio;
1176
1177         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
1178                 struct list_head *p;
1179                 list_for_each (p,q->drops+prio) {
1180                         struct htb_class *cl = list_entry(p, struct htb_class,
1181                                                           un.leaf.drop_list);
1182                         unsigned int len;
1183                         if (cl->un.leaf.q->ops->drop && 
1184                                 (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
1185                                 sch->q.qlen--;
1186                                 if (!cl->un.leaf.q->q.qlen)
1187                                         htb_deactivate (q,cl);
1188                                 return len;
1189                         }
1190                 }
1191         }
1192         return 0;
1193 }
1194
1195 /* reset all classes */
1196 /* always caled under BH & queue lock */
1197 static void htb_reset(struct Qdisc* sch)
1198 {
1199         struct htb_sched *q = qdisc_priv(sch);
1200         int i;
1201         HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
1202
1203         for (i = 0; i < HTB_HSIZE; i++) {
1204                 struct list_head *p;
1205                 list_for_each (p,q->hash+i) {
1206                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1207                         if (cl->level)
1208                                 memset(&cl->un.inner,0,sizeof(cl->un.inner));
1209                         else {
1210                                 if (cl->un.leaf.q) 
1211                                         qdisc_reset(cl->un.leaf.q);
1212                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1213                         }
1214                         cl->prio_activity = 0;
1215                         cl->cmode = HTB_CAN_SEND;
1216 #ifdef HTB_DEBUG
1217                         cl->pq_node.rb_color = -1;
1218                         memset(cl->node,255,sizeof(cl->node));
1219 #endif
1220
1221                 }
1222         }
1223         sch->flags &= ~TCQ_F_THROTTLED;
1224         del_timer(&q->timer);
1225         __skb_queue_purge(&q->direct_queue);
1226         sch->q.qlen = 0;
1227         memset(q->row,0,sizeof(q->row));
1228         memset(q->row_mask,0,sizeof(q->row_mask));
1229         memset(q->wait_pq,0,sizeof(q->wait_pq));
1230         memset(q->ptr,0,sizeof(q->ptr));
1231         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1232                 INIT_LIST_HEAD(q->drops+i);
1233 }
1234
1235 static int htb_init(struct Qdisc *sch, struct rtattr *opt)
1236 {
1237         struct htb_sched *q = qdisc_priv(sch);
1238         struct rtattr *tb[TCA_HTB_INIT];
1239         struct tc_htb_glob *gopt;
1240         int i;
1241 #ifdef HTB_DEBUG
1242         printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
1243                           HTB_VER >> 16,HTB_VER & 0xffff);
1244 #endif
1245         if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
1246                         tb[TCA_HTB_INIT-1] == NULL ||
1247                         RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
1248                 printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
1249                 return -EINVAL;
1250         }
1251         gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
1252         if (gopt->version != HTB_VER >> 16) {
1253                 printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
1254                                 HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
1255                 return -EINVAL;
1256         }
1257         q->debug = gopt->debug;
1258         HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
1259
1260         INIT_LIST_HEAD(&q->root);
1261         for (i = 0; i < HTB_HSIZE; i++)
1262                 INIT_LIST_HEAD(q->hash+i);
1263         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1264                 INIT_LIST_HEAD(q->drops+i);
1265
1266         init_timer(&q->timer);
1267         skb_queue_head_init(&q->direct_queue);
1268
1269         q->direct_qlen = sch->dev->tx_queue_len;
1270         if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1271                 q->direct_qlen = 2;
1272         q->timer.function = htb_timer;
1273         q->timer.data = (unsigned long)sch;
1274
1275 #ifdef HTB_RATECM
1276         init_timer(&q->rttim);
1277         q->rttim.function = htb_rate_timer;
1278         q->rttim.data = (unsigned long)sch;
1279         q->rttim.expires = jiffies + HZ;
1280         add_timer(&q->rttim);
1281 #endif
1282         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1283                 q->rate2quantum = 1;
1284         q->defcls = gopt->defcls;
1285
1286         return 0;
1287 }
1288
1289 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1290 {
1291         struct htb_sched *q = qdisc_priv(sch);
1292         unsigned char    *b = skb->tail;
1293         struct rtattr *rta;
1294         struct tc_htb_glob gopt;
1295         HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
1296         HTB_QLOCK(sch);
1297         gopt.direct_pkts = q->direct_pkts;
1298
1299 #ifdef HTB_DEBUG
1300         if (HTB_DBG_COND(0,2))
1301                 htb_debug_dump(q);
1302 #endif
1303         gopt.version = HTB_VER;
1304         gopt.rate2quantum = q->rate2quantum;
1305         gopt.defcls = q->defcls;
1306         gopt.debug = q->debug;
1307         rta = (struct rtattr*)b;
1308         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1309         RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1310         rta->rta_len = skb->tail - b;
1311         HTB_QUNLOCK(sch);
1312         return skb->len;
1313 rtattr_failure:
1314         HTB_QUNLOCK(sch);
1315         skb_trim(skb, skb->tail - skb->data);
1316         return -1;
1317 }
1318
1319 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1320         struct sk_buff *skb, struct tcmsg *tcm)
1321 {
1322 #ifdef HTB_DEBUG
1323         struct htb_sched *q = qdisc_priv(sch);
1324 #endif
1325         struct htb_class *cl = (struct htb_class*)arg;
1326         unsigned char    *b = skb->tail;
1327         struct rtattr *rta;
1328         struct tc_htb_opt opt;
1329
1330         HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
1331
1332         HTB_QLOCK(sch);
1333         tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
1334         tcm->tcm_handle = cl->classid;
1335         if (!cl->level && cl->un.leaf.q)
1336                 tcm->tcm_info = cl->un.leaf.q->handle;
1337
1338         rta = (struct rtattr*)b;
1339         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1340
1341         memset (&opt,0,sizeof(opt));
1342
1343         opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
1344         opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
1345         opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
1346         opt.level = cl->level; 
1347         RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1348         rta->rta_len = skb->tail - b;
1349         HTB_QUNLOCK(sch);
1350         return skb->len;
1351 rtattr_failure:
1352         HTB_QUNLOCK(sch);
1353         skb_trim(skb, b - skb->data);
1354         return -1;
1355 }
1356
1357 static int
1358 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1359         struct gnet_dump *d)
1360 {
1361         struct htb_class *cl = (struct htb_class*)arg;
1362
1363 #ifdef HTB_RATECM
1364         cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
1365         cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
1366 #endif
1367
1368         if (!cl->level && cl->un.leaf.q)
1369                 cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1370         cl->xstats.tokens = cl->tokens;
1371         cl->xstats.ctokens = cl->ctokens;
1372
1373         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1374             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1375             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1376                 return -1;
1377
1378         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1379 }
1380
1381 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1382         struct Qdisc **old)
1383 {
1384         struct htb_class *cl = (struct htb_class*)arg;
1385
1386         if (cl && !cl->level) {
1387                 if (new == NULL && (new = qdisc_create_dflt(sch->dev, 
1388                                         &pfifo_qdisc_ops)) == NULL)
1389                                         return -ENOBUFS;
1390                 sch_tree_lock(sch);
1391                 if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
1392                         if (cl->prio_activity)
1393                                 htb_deactivate (qdisc_priv(sch),cl);
1394
1395                         /* TODO: is it correct ? Why CBQ doesn't do it ? */
1396                         sch->q.qlen -= (*old)->q.qlen;  
1397                         qdisc_reset(*old);
1398                 }
1399                 sch_tree_unlock(sch);
1400                 return 0;
1401         }
1402         return -ENOENT;
1403 }
1404
1405 static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
1406 {
1407         struct htb_class *cl = (struct htb_class*)arg;
1408         return (cl && !cl->level) ? cl->un.leaf.q : NULL;
1409 }
1410
1411 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1412 {
1413 #ifdef HTB_DEBUG
1414         struct htb_sched *q = qdisc_priv(sch);
1415 #endif
1416         struct htb_class *cl = htb_find(classid,sch);
1417         HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
1418         if (cl) 
1419                 cl->refcnt++;
1420         return (unsigned long)cl;
1421 }
1422
1423 static void htb_destroy_filters(struct tcf_proto **fl)
1424 {
1425         struct tcf_proto *tp;
1426
1427         while ((tp = *fl) != NULL) {
1428                 *fl = tp->next;
1429                 tcf_destroy(tp);
1430         }
1431 }
1432
1433 static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
1434 {
1435         struct htb_sched *q = qdisc_priv(sch);
1436         HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
1437         if (!cl->level) {
1438                 BUG_TRAP(cl->un.leaf.q);
1439                 sch->q.qlen -= cl->un.leaf.q->q.qlen;
1440                 qdisc_destroy(cl->un.leaf.q);
1441         }
1442         qdisc_put_rtab(cl->rate);
1443         qdisc_put_rtab(cl->ceil);
1444         
1445         htb_destroy_filters (&cl->filter_list);
1446         
1447         while (!list_empty(&cl->children)) 
1448                 htb_destroy_class (sch,list_entry(cl->children.next,
1449                                         struct htb_class,sibling));
1450
1451         /* note: this delete may happen twice (see htb_delete) */
1452         list_del(&cl->hlist);
1453         list_del(&cl->sibling);
1454         
1455         if (cl->prio_activity)
1456                 htb_deactivate (q,cl);
1457         
1458         if (cl->cmode != HTB_CAN_SEND)
1459                 htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
1460         
1461         kfree(cl);
1462 }
1463
1464 /* always caled under BH & queue lock */
1465 static void htb_destroy(struct Qdisc* sch)
1466 {
1467         struct htb_sched *q = qdisc_priv(sch);
1468         HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1469
1470         del_timer_sync (&q->timer);
1471 #ifdef HTB_RATECM
1472         del_timer_sync (&q->rttim);
1473 #endif
1474         /* This line used to be after htb_destroy_class call below
1475            and surprisingly it worked in 2.4. But it must precede it 
1476            because filter need its target class alive to be able to call
1477            unbind_filter on it (without Oops). */
1478         htb_destroy_filters(&q->filter_list);
1479         
1480         while (!list_empty(&q->root)) 
1481                 htb_destroy_class (sch,list_entry(q->root.next,
1482                                         struct htb_class,sibling));
1483
1484         __skb_queue_purge(&q->direct_queue);
1485 }
1486
1487 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1488 {
1489         struct htb_sched *q = qdisc_priv(sch);
1490         struct htb_class *cl = (struct htb_class*)arg;
1491         HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1492
1493         // TODO: why don't allow to delete subtree ? references ? does
1494         // tc subsys quarantee us that in htb_destroy it holds no class
1495         // refs so that we can remove children safely there ?
1496         if (!list_empty(&cl->children) || cl->filter_cnt)
1497                 return -EBUSY;
1498         
1499         sch_tree_lock(sch);
1500         
1501         /* delete from hash and active; remainder in destroy_class */
1502         list_del_init(&cl->hlist);
1503         if (cl->prio_activity)
1504                 htb_deactivate (q,cl);
1505
1506         if (--cl->refcnt == 0)
1507                 htb_destroy_class(sch,cl);
1508
1509         sch_tree_unlock(sch);
1510         return 0;
1511 }
1512
1513 static void htb_put(struct Qdisc *sch, unsigned long arg)
1514 {
1515 #ifdef HTB_DEBUG
1516         struct htb_sched *q = qdisc_priv(sch);
1517 #endif
1518         struct htb_class *cl = (struct htb_class*)arg;
1519         HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1520
1521         if (--cl->refcnt == 0)
1522                 htb_destroy_class(sch,cl);
1523 }
1524
1525 static int htb_change_class(struct Qdisc *sch, u32 classid, 
1526                 u32 parentid, struct rtattr **tca, unsigned long *arg)
1527 {
1528         int err = -EINVAL;
1529         struct htb_sched *q = qdisc_priv(sch);
1530         struct htb_class *cl = (struct htb_class*)*arg,*parent;
1531         struct rtattr *opt = tca[TCA_OPTIONS-1];
1532         struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1533         struct rtattr *tb[TCA_HTB_RTAB];
1534         struct tc_htb_opt *hopt;
1535
1536         /* extract all subattrs from opt attr */
1537         if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
1538                         tb[TCA_HTB_PARMS-1] == NULL ||
1539                         RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1540                 goto failure;
1541         
1542         parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
1543
1544         hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1545         HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1546         rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1547         ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1548         if (!rtab || !ctab) goto failure;
1549
1550         if (!cl) { /* new class */
1551                 struct Qdisc *new_q;
1552                 /* check for valid classid */
1553                 if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
1554                         goto failure;
1555
1556                 /* check maximal depth */
1557                 if (parent && parent->parent && parent->parent->level < 2) {
1558                         printk(KERN_ERR "htb: tree is too deep\n");
1559                         goto failure;
1560                 }
1561                 err = -ENOBUFS;
1562                 if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
1563                         goto failure;
1564                 
1565                 memset(cl, 0, sizeof(*cl));
1566                 cl->refcnt = 1;
1567                 INIT_LIST_HEAD(&cl->sibling);
1568                 INIT_LIST_HEAD(&cl->hlist);
1569                 INIT_LIST_HEAD(&cl->children);
1570                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1571 #ifdef HTB_DEBUG
1572                 cl->magic = HTB_CMAGIC;
1573 #endif
1574
1575                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1576                    so that can't be used inside of sch_tree_lock
1577                    -- thanks to Karlis Peisenieks */
1578                 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
1579                 sch_tree_lock(sch);
1580                 if (parent && !parent->level) {
1581                         /* turn parent into inner node */
1582                         sch->q.qlen -= parent->un.leaf.q->q.qlen;
1583                         qdisc_destroy (parent->un.leaf.q);
1584                         if (parent->prio_activity) 
1585                                 htb_deactivate (q,parent);
1586
1587                         /* remove from evt list because of level change */
1588                         if (parent->cmode != HTB_CAN_SEND) {
1589                                 htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
1590                                 parent->cmode = HTB_CAN_SEND;
1591                         }
1592                         parent->level = (parent->parent ? parent->parent->level
1593                                         : TC_HTB_MAXDEPTH) - 1;
1594                         memset (&parent->un.inner,0,sizeof(parent->un.inner));
1595                 }
1596                 /* leaf (we) needs elementary qdisc */
1597                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1598
1599                 cl->classid = classid; cl->parent = parent;
1600
1601                 /* set class to be in HTB_CAN_SEND state */
1602                 cl->tokens = hopt->buffer;
1603                 cl->ctokens = hopt->cbuffer;
1604                 cl->mbuffer = 60000000; /* 1min */
1605                 PSCHED_GET_TIME(cl->t_c);
1606                 cl->cmode = HTB_CAN_SEND;
1607
1608                 /* attach to the hash list and parent's family */
1609                 list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
1610                 list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
1611 #ifdef HTB_DEBUG
1612                 { 
1613                         int i;
1614                         for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
1615                         cl->pq_node.rb_color = -1;
1616                 }
1617 #endif
1618         } else sch_tree_lock(sch);
1619
1620         /* it used to be a nasty bug here, we have to check that node
1621            is really leaf before changing cl->un.leaf ! */
1622         if (!cl->level) {
1623                 cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
1624                 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1625                         printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
1626                         cl->un.leaf.quantum = 1000;
1627                 }
1628                 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1629                         printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
1630                         cl->un.leaf.quantum = 200000;
1631                 }
1632                 if (hopt->quantum)
1633                         cl->un.leaf.quantum = hopt->quantum;
1634                 if ((cl->un.leaf.prio = hopt->prio) >= TC_HTB_NUMPRIO)
1635                         cl->un.leaf.prio = TC_HTB_NUMPRIO - 1;
1636         }
1637
1638         cl->buffer = hopt->buffer;
1639         cl->cbuffer = hopt->cbuffer;
1640         if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1641         if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1642         sch_tree_unlock(sch);
1643
1644         *arg = (unsigned long)cl;
1645         return 0;
1646
1647 failure:
1648         if (rtab) qdisc_put_rtab(rtab);
1649         if (ctab) qdisc_put_rtab(ctab);
1650         return err;
1651 }
1652
1653 static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1654 {
1655         struct htb_sched *q = qdisc_priv(sch);
1656         struct htb_class *cl = (struct htb_class *)arg;
1657         struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1658         HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1659         return fl;
1660 }
1661
1662 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1663         u32 classid)
1664 {
1665         struct htb_sched *q = qdisc_priv(sch);
1666         struct htb_class *cl = htb_find (classid,sch);
1667         HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1668         /*if (cl && !cl->level) return 0;
1669           The line above used to be there to prevent attaching filters to 
1670           leaves. But at least tc_index filter uses this just to get class 
1671           for other reasons so that we have to allow for it.
1672           ----
1673           19.6.2002 As Werner explained it is ok - bind filter is just
1674           another way to "lock" the class - unlike "get" this lock can
1675           be broken by class during destroy IIUC.
1676          */
1677         if (cl) 
1678                 cl->filter_cnt++; 
1679         else 
1680                 q->filter_cnt++;
1681         return (unsigned long)cl;
1682 }
1683
1684 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1685 {
1686         struct htb_sched *q = qdisc_priv(sch);
1687         struct htb_class *cl = (struct htb_class *)arg;
1688         HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1689         if (cl) 
1690                 cl->filter_cnt--; 
1691         else 
1692                 q->filter_cnt--;
1693 }
1694
1695 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1696 {
1697         struct htb_sched *q = qdisc_priv(sch);
1698         int i;
1699
1700         if (arg->stop)
1701                 return;
1702
1703         for (i = 0; i < HTB_HSIZE; i++) {
1704                 struct list_head *p;
1705                 list_for_each (p,q->hash+i) {
1706                         struct htb_class *cl = list_entry(p,struct htb_class,hlist);
1707                         if (arg->count < arg->skip) {
1708                                 arg->count++;
1709                                 continue;
1710                         }
1711                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1712                                 arg->stop = 1;
1713                                 return;
1714                         }
1715                         arg->count++;
1716                 }
1717         }
1718 }
1719
1720 static struct Qdisc_class_ops htb_class_ops = {
1721         .graft          =       htb_graft,
1722         .leaf           =       htb_leaf,
1723         .get            =       htb_get,
1724         .put            =       htb_put,
1725         .change         =       htb_change_class,
1726         .delete         =       htb_delete,
1727         .walk           =       htb_walk,
1728         .tcf_chain      =       htb_find_tcf,
1729         .bind_tcf       =       htb_bind_filter,
1730         .unbind_tcf     =       htb_unbind_filter,
1731         .dump           =       htb_dump_class,
1732         .dump_stats     =       htb_dump_class_stats,
1733 };
1734
1735 static struct Qdisc_ops htb_qdisc_ops = {
1736         .next           =       NULL,
1737         .cl_ops         =       &htb_class_ops,
1738         .id             =       "htb",
1739         .priv_size      =       sizeof(struct htb_sched),
1740         .enqueue        =       htb_enqueue,
1741         .dequeue        =       htb_dequeue,
1742         .requeue        =       htb_requeue,
1743         .drop           =       htb_drop,
1744         .init           =       htb_init,
1745         .reset          =       htb_reset,
1746         .destroy        =       htb_destroy,
1747         .change         =       NULL /* htb_change */,
1748         .dump           =       htb_dump,
1749         .owner          =       THIS_MODULE,
1750 };
1751
1752 static int __init htb_module_init(void)
1753 {
1754     return register_qdisc(&htb_qdisc_ops);
1755 }
1756 static void __exit htb_module_exit(void) 
1757 {
1758     unregister_qdisc(&htb_qdisc_ops);
1759 }
1760 module_init(htb_module_init)
1761 module_exit(htb_module_exit)
1762 MODULE_LICENSE("GPL");