net-sched: sch_hfsc: use dynamic class hash helpers
[linux-2.6] / net / sched / sch_hfsc.c
1 /*
2  * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * 2003-10-17 - Ported from altq
10  */
11 /*
12  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13  *
14  * Permission to use, copy, modify, and distribute this software and
15  * its documentation is hereby granted (including for commercial or
16  * for-profit use), provided that both the copyright notice and this
17  * permission notice appear in all copies of the software, derivative
18  * works, or modified versions, and any portions thereof.
19  *
20  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33  * DAMAGE.
34  *
35  * Carnegie Mellon encourages (but does not require) users of this
36  * software to return any improvements or extensions that they make,
37  * and to grant Carnegie Mellon the rights to redistribute these
38  * changes without encumbrance.
39  */
40 /*
41  * H-FSC is described in Proceedings of SIGCOMM'97,
42  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43  * Real-Time and Priority Service"
44  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45  *
46  * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47  * when a class has an upperlimit, the fit-time is computed from the
48  * upperlimit service curve.  the link-sharing scheduler does not schedule
49  * a class whose fit-time exceeds the current time.
50  */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/compiler.h>
57 #include <linux/spinlock.h>
58 #include <linux/skbuff.h>
59 #include <linux/string.h>
60 #include <linux/slab.h>
61 #include <linux/list.h>
62 #include <linux/rbtree.h>
63 #include <linux/init.h>
64 #include <linux/rtnetlink.h>
65 #include <linux/pkt_sched.h>
66 #include <net/netlink.h>
67 #include <net/pkt_sched.h>
68 #include <net/pkt_cls.h>
69 #include <asm/div64.h>
70
71 /*
72  * kernel internal service curve representation:
73  *   coordinates are given by 64 bit unsigned integers.
74  *   x-axis: unit is clock count.
75  *   y-axis: unit is byte.
76  *
77  *   The service curve parameters are converted to the internal
78  *   representation. The slope values are scaled to avoid overflow.
79  *   the inverse slope values as well as the y-projection of the 1st
80  *   segment are kept in order to to avoid 64-bit divide operations
81  *   that are expensive on 32-bit architectures.
82  */
83
84 struct internal_sc
85 {
86         u64     sm1;    /* scaled slope of the 1st segment */
87         u64     ism1;   /* scaled inverse-slope of the 1st segment */
88         u64     dx;     /* the x-projection of the 1st segment */
89         u64     dy;     /* the y-projection of the 1st segment */
90         u64     sm2;    /* scaled slope of the 2nd segment */
91         u64     ism2;   /* scaled inverse-slope of the 2nd segment */
92 };
93
94 /* runtime service curve */
95 struct runtime_sc
96 {
97         u64     x;      /* current starting position on x-axis */
98         u64     y;      /* current starting position on y-axis */
99         u64     sm1;    /* scaled slope of the 1st segment */
100         u64     ism1;   /* scaled inverse-slope of the 1st segment */
101         u64     dx;     /* the x-projection of the 1st segment */
102         u64     dy;     /* the y-projection of the 1st segment */
103         u64     sm2;    /* scaled slope of the 2nd segment */
104         u64     ism2;   /* scaled inverse-slope of the 2nd segment */
105 };
106
107 enum hfsc_class_flags
108 {
109         HFSC_RSC = 0x1,
110         HFSC_FSC = 0x2,
111         HFSC_USC = 0x4
112 };
113
114 struct hfsc_class
115 {
116         struct Qdisc_class_common cl_common;
117         unsigned int    refcnt;         /* usage count */
118
119         struct gnet_stats_basic bstats;
120         struct gnet_stats_queue qstats;
121         struct gnet_stats_rate_est rate_est;
122         unsigned int    level;          /* class level in hierarchy */
123         struct tcf_proto *filter_list;  /* filter list */
124         unsigned int    filter_cnt;     /* filter count */
125
126         struct hfsc_sched *sched;       /* scheduler data */
127         struct hfsc_class *cl_parent;   /* parent class */
128         struct list_head siblings;      /* sibling classes */
129         struct list_head children;      /* child classes */
130         struct Qdisc    *qdisc;         /* leaf qdisc */
131
132         struct rb_node el_node;         /* qdisc's eligible tree member */
133         struct rb_root vt_tree;         /* active children sorted by cl_vt */
134         struct rb_node vt_node;         /* parent's vt_tree member */
135         struct rb_root cf_tree;         /* active children sorted by cl_f */
136         struct rb_node cf_node;         /* parent's cf_heap member */
137         struct list_head dlist;         /* drop list member */
138
139         u64     cl_total;               /* total work in bytes */
140         u64     cl_cumul;               /* cumulative work in bytes done by
141                                            real-time criteria */
142
143         u64     cl_d;                   /* deadline*/
144         u64     cl_e;                   /* eligible time */
145         u64     cl_vt;                  /* virtual time */
146         u64     cl_f;                   /* time when this class will fit for
147                                            link-sharing, max(myf, cfmin) */
148         u64     cl_myf;                 /* my fit-time (calculated from this
149                                            class's own upperlimit curve) */
150         u64     cl_myfadj;              /* my fit-time adjustment (to cancel
151                                            history dependence) */
152         u64     cl_cfmin;               /* earliest children's fit-time (used
153                                            with cl_myf to obtain cl_f) */
154         u64     cl_cvtmin;              /* minimal virtual time among the
155                                            children fit for link-sharing
156                                            (monotonic within a period) */
157         u64     cl_vtadj;               /* intra-period cumulative vt
158                                            adjustment */
159         u64     cl_vtoff;               /* inter-period cumulative vt offset */
160         u64     cl_cvtmax;              /* max child's vt in the last period */
161         u64     cl_cvtoff;              /* cumulative cvtmax of all periods */
162         u64     cl_pcvtoff;             /* parent's cvtoff at initialization
163                                            time */
164
165         struct internal_sc cl_rsc;      /* internal real-time service curve */
166         struct internal_sc cl_fsc;      /* internal fair service curve */
167         struct internal_sc cl_usc;      /* internal upperlimit service curve */
168         struct runtime_sc cl_deadline;  /* deadline curve */
169         struct runtime_sc cl_eligible;  /* eligible curve */
170         struct runtime_sc cl_virtual;   /* virtual curve */
171         struct runtime_sc cl_ulimit;    /* upperlimit curve */
172
173         unsigned long   cl_flags;       /* which curves are valid */
174         unsigned long   cl_vtperiod;    /* vt period sequence number */
175         unsigned long   cl_parentperiod;/* parent's vt period sequence number*/
176         unsigned long   cl_nactive;     /* number of active children */
177 };
178
179 struct hfsc_sched
180 {
181         u16     defcls;                         /* default class id */
182         struct hfsc_class root;                 /* root class */
183         struct Qdisc_class_hash clhash;         /* class hash */
184         struct rb_root eligible;                /* eligible tree */
185         struct list_head droplist;              /* active leaf class list (for
186                                                    dropping) */
187         struct sk_buff_head requeue;            /* requeued packet */
188         struct qdisc_watchdog watchdog;         /* watchdog timer */
189 };
190
191 #define HT_INFINITY     0xffffffffffffffffULL   /* infinite time value */
192
193
194 /*
195  * eligible tree holds backlogged classes being sorted by their eligible times.
196  * there is one eligible tree per hfsc instance.
197  */
198
199 static void
200 eltree_insert(struct hfsc_class *cl)
201 {
202         struct rb_node **p = &cl->sched->eligible.rb_node;
203         struct rb_node *parent = NULL;
204         struct hfsc_class *cl1;
205
206         while (*p != NULL) {
207                 parent = *p;
208                 cl1 = rb_entry(parent, struct hfsc_class, el_node);
209                 if (cl->cl_e >= cl1->cl_e)
210                         p = &parent->rb_right;
211                 else
212                         p = &parent->rb_left;
213         }
214         rb_link_node(&cl->el_node, parent, p);
215         rb_insert_color(&cl->el_node, &cl->sched->eligible);
216 }
217
218 static inline void
219 eltree_remove(struct hfsc_class *cl)
220 {
221         rb_erase(&cl->el_node, &cl->sched->eligible);
222 }
223
224 static inline void
225 eltree_update(struct hfsc_class *cl)
226 {
227         eltree_remove(cl);
228         eltree_insert(cl);
229 }
230
231 /* find the class with the minimum deadline among the eligible classes */
232 static inline struct hfsc_class *
233 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
234 {
235         struct hfsc_class *p, *cl = NULL;
236         struct rb_node *n;
237
238         for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
239                 p = rb_entry(n, struct hfsc_class, el_node);
240                 if (p->cl_e > cur_time)
241                         break;
242                 if (cl == NULL || p->cl_d < cl->cl_d)
243                         cl = p;
244         }
245         return cl;
246 }
247
248 /* find the class with minimum eligible time among the eligible classes */
249 static inline struct hfsc_class *
250 eltree_get_minel(struct hfsc_sched *q)
251 {
252         struct rb_node *n;
253
254         n = rb_first(&q->eligible);
255         if (n == NULL)
256                 return NULL;
257         return rb_entry(n, struct hfsc_class, el_node);
258 }
259
260 /*
261  * vttree holds holds backlogged child classes being sorted by their virtual
262  * time. each intermediate class has one vttree.
263  */
264 static void
265 vttree_insert(struct hfsc_class *cl)
266 {
267         struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
268         struct rb_node *parent = NULL;
269         struct hfsc_class *cl1;
270
271         while (*p != NULL) {
272                 parent = *p;
273                 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
274                 if (cl->cl_vt >= cl1->cl_vt)
275                         p = &parent->rb_right;
276                 else
277                         p = &parent->rb_left;
278         }
279         rb_link_node(&cl->vt_node, parent, p);
280         rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
281 }
282
283 static inline void
284 vttree_remove(struct hfsc_class *cl)
285 {
286         rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
287 }
288
289 static inline void
290 vttree_update(struct hfsc_class *cl)
291 {
292         vttree_remove(cl);
293         vttree_insert(cl);
294 }
295
296 static inline struct hfsc_class *
297 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
298 {
299         struct hfsc_class *p;
300         struct rb_node *n;
301
302         for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
303                 p = rb_entry(n, struct hfsc_class, vt_node);
304                 if (p->cl_f <= cur_time)
305                         return p;
306         }
307         return NULL;
308 }
309
310 /*
311  * get the leaf class with the minimum vt in the hierarchy
312  */
313 static struct hfsc_class *
314 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
315 {
316         /* if root-class's cfmin is bigger than cur_time nothing to do */
317         if (cl->cl_cfmin > cur_time)
318                 return NULL;
319
320         while (cl->level > 0) {
321                 cl = vttree_firstfit(cl, cur_time);
322                 if (cl == NULL)
323                         return NULL;
324                 /*
325                  * update parent's cl_cvtmin.
326                  */
327                 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
328                         cl->cl_parent->cl_cvtmin = cl->cl_vt;
329         }
330         return cl;
331 }
332
333 static void
334 cftree_insert(struct hfsc_class *cl)
335 {
336         struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
337         struct rb_node *parent = NULL;
338         struct hfsc_class *cl1;
339
340         while (*p != NULL) {
341                 parent = *p;
342                 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
343                 if (cl->cl_f >= cl1->cl_f)
344                         p = &parent->rb_right;
345                 else
346                         p = &parent->rb_left;
347         }
348         rb_link_node(&cl->cf_node, parent, p);
349         rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
350 }
351
352 static inline void
353 cftree_remove(struct hfsc_class *cl)
354 {
355         rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
356 }
357
358 static inline void
359 cftree_update(struct hfsc_class *cl)
360 {
361         cftree_remove(cl);
362         cftree_insert(cl);
363 }
364
365 /*
366  * service curve support functions
367  *
368  *  external service curve parameters
369  *      m: bps
370  *      d: us
371  *  internal service curve parameters
372  *      sm: (bytes/psched_us) << SM_SHIFT
373  *      ism: (psched_us/byte) << ISM_SHIFT
374  *      dx: psched_us
375  *
376  * The clock source resolution with ktime is 1.024us.
377  *
378  * sm and ism are scaled in order to keep effective digits.
379  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
380  * digits in decimal using the following table.
381  *
382  *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
383  *  ------------+-------------------------------------------------------
384  *  bytes/1.024us 12.8e-3    128e-3     1280e-3    12800e-3   128000e-3
385  *
386  *  1.024us/byte  78.125     7.8125     0.78125    0.078125   0.0078125
387  */
388 #define SM_SHIFT        20
389 #define ISM_SHIFT       18
390
391 #define SM_MASK         ((1ULL << SM_SHIFT) - 1)
392 #define ISM_MASK        ((1ULL << ISM_SHIFT) - 1)
393
394 static inline u64
395 seg_x2y(u64 x, u64 sm)
396 {
397         u64 y;
398
399         /*
400          * compute
401          *      y = x * sm >> SM_SHIFT
402          * but divide it for the upper and lower bits to avoid overflow
403          */
404         y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
405         return y;
406 }
407
408 static inline u64
409 seg_y2x(u64 y, u64 ism)
410 {
411         u64 x;
412
413         if (y == 0)
414                 x = 0;
415         else if (ism == HT_INFINITY)
416                 x = HT_INFINITY;
417         else {
418                 x = (y >> ISM_SHIFT) * ism
419                     + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
420         }
421         return x;
422 }
423
424 /* Convert m (bps) into sm (bytes/psched us) */
425 static u64
426 m2sm(u32 m)
427 {
428         u64 sm;
429
430         sm = ((u64)m << SM_SHIFT);
431         sm += PSCHED_TICKS_PER_SEC - 1;
432         do_div(sm, PSCHED_TICKS_PER_SEC);
433         return sm;
434 }
435
436 /* convert m (bps) into ism (psched us/byte) */
437 static u64
438 m2ism(u32 m)
439 {
440         u64 ism;
441
442         if (m == 0)
443                 ism = HT_INFINITY;
444         else {
445                 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
446                 ism += m - 1;
447                 do_div(ism, m);
448         }
449         return ism;
450 }
451
452 /* convert d (us) into dx (psched us) */
453 static u64
454 d2dx(u32 d)
455 {
456         u64 dx;
457
458         dx = ((u64)d * PSCHED_TICKS_PER_SEC);
459         dx += USEC_PER_SEC - 1;
460         do_div(dx, USEC_PER_SEC);
461         return dx;
462 }
463
464 /* convert sm (bytes/psched us) into m (bps) */
465 static u32
466 sm2m(u64 sm)
467 {
468         u64 m;
469
470         m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
471         return (u32)m;
472 }
473
474 /* convert dx (psched us) into d (us) */
475 static u32
476 dx2d(u64 dx)
477 {
478         u64 d;
479
480         d = dx * USEC_PER_SEC;
481         do_div(d, PSCHED_TICKS_PER_SEC);
482         return (u32)d;
483 }
484
485 static void
486 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
487 {
488         isc->sm1  = m2sm(sc->m1);
489         isc->ism1 = m2ism(sc->m1);
490         isc->dx   = d2dx(sc->d);
491         isc->dy   = seg_x2y(isc->dx, isc->sm1);
492         isc->sm2  = m2sm(sc->m2);
493         isc->ism2 = m2ism(sc->m2);
494 }
495
496 /*
497  * initialize the runtime service curve with the given internal
498  * service curve starting at (x, y).
499  */
500 static void
501 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
502 {
503         rtsc->x    = x;
504         rtsc->y    = y;
505         rtsc->sm1  = isc->sm1;
506         rtsc->ism1 = isc->ism1;
507         rtsc->dx   = isc->dx;
508         rtsc->dy   = isc->dy;
509         rtsc->sm2  = isc->sm2;
510         rtsc->ism2 = isc->ism2;
511 }
512
513 /*
514  * calculate the y-projection of the runtime service curve by the
515  * given x-projection value
516  */
517 static u64
518 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
519 {
520         u64 x;
521
522         if (y < rtsc->y)
523                 x = rtsc->x;
524         else if (y <= rtsc->y + rtsc->dy) {
525                 /* x belongs to the 1st segment */
526                 if (rtsc->dy == 0)
527                         x = rtsc->x + rtsc->dx;
528                 else
529                         x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
530         } else {
531                 /* x belongs to the 2nd segment */
532                 x = rtsc->x + rtsc->dx
533                     + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
534         }
535         return x;
536 }
537
538 static u64
539 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
540 {
541         u64 y;
542
543         if (x <= rtsc->x)
544                 y = rtsc->y;
545         else if (x <= rtsc->x + rtsc->dx)
546                 /* y belongs to the 1st segment */
547                 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
548         else
549                 /* y belongs to the 2nd segment */
550                 y = rtsc->y + rtsc->dy
551                     + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
552         return y;
553 }
554
555 /*
556  * update the runtime service curve by taking the minimum of the current
557  * runtime service curve and the service curve starting at (x, y).
558  */
559 static void
560 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
561 {
562         u64 y1, y2, dx, dy;
563         u32 dsm;
564
565         if (isc->sm1 <= isc->sm2) {
566                 /* service curve is convex */
567                 y1 = rtsc_x2y(rtsc, x);
568                 if (y1 < y)
569                         /* the current rtsc is smaller */
570                         return;
571                 rtsc->x = x;
572                 rtsc->y = y;
573                 return;
574         }
575
576         /*
577          * service curve is concave
578          * compute the two y values of the current rtsc
579          *      y1: at x
580          *      y2: at (x + dx)
581          */
582         y1 = rtsc_x2y(rtsc, x);
583         if (y1 <= y) {
584                 /* rtsc is below isc, no change to rtsc */
585                 return;
586         }
587
588         y2 = rtsc_x2y(rtsc, x + isc->dx);
589         if (y2 >= y + isc->dy) {
590                 /* rtsc is above isc, replace rtsc by isc */
591                 rtsc->x = x;
592                 rtsc->y = y;
593                 rtsc->dx = isc->dx;
594                 rtsc->dy = isc->dy;
595                 return;
596         }
597
598         /*
599          * the two curves intersect
600          * compute the offsets (dx, dy) using the reverse
601          * function of seg_x2y()
602          *      seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
603          */
604         dx = (y1 - y) << SM_SHIFT;
605         dsm = isc->sm1 - isc->sm2;
606         do_div(dx, dsm);
607         /*
608          * check if (x, y1) belongs to the 1st segment of rtsc.
609          * if so, add the offset.
610          */
611         if (rtsc->x + rtsc->dx > x)
612                 dx += rtsc->x + rtsc->dx - x;
613         dy = seg_x2y(dx, isc->sm1);
614
615         rtsc->x = x;
616         rtsc->y = y;
617         rtsc->dx = dx;
618         rtsc->dy = dy;
619         return;
620 }
621
622 static void
623 init_ed(struct hfsc_class *cl, unsigned int next_len)
624 {
625         u64 cur_time = psched_get_time();
626
627         /* update the deadline curve */
628         rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
629
630         /*
631          * update the eligible curve.
632          * for concave, it is equal to the deadline curve.
633          * for convex, it is a linear curve with slope m2.
634          */
635         cl->cl_eligible = cl->cl_deadline;
636         if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
637                 cl->cl_eligible.dx = 0;
638                 cl->cl_eligible.dy = 0;
639         }
640
641         /* compute e and d */
642         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
643         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
644
645         eltree_insert(cl);
646 }
647
648 static void
649 update_ed(struct hfsc_class *cl, unsigned int next_len)
650 {
651         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
652         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
653
654         eltree_update(cl);
655 }
656
657 static inline void
658 update_d(struct hfsc_class *cl, unsigned int next_len)
659 {
660         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
661 }
662
663 static inline void
664 update_cfmin(struct hfsc_class *cl)
665 {
666         struct rb_node *n = rb_first(&cl->cf_tree);
667         struct hfsc_class *p;
668
669         if (n == NULL) {
670                 cl->cl_cfmin = 0;
671                 return;
672         }
673         p = rb_entry(n, struct hfsc_class, cf_node);
674         cl->cl_cfmin = p->cl_f;
675 }
676
677 static void
678 init_vf(struct hfsc_class *cl, unsigned int len)
679 {
680         struct hfsc_class *max_cl;
681         struct rb_node *n;
682         u64 vt, f, cur_time;
683         int go_active;
684
685         cur_time = 0;
686         go_active = 1;
687         for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
688                 if (go_active && cl->cl_nactive++ == 0)
689                         go_active = 1;
690                 else
691                         go_active = 0;
692
693                 if (go_active) {
694                         n = rb_last(&cl->cl_parent->vt_tree);
695                         if (n != NULL) {
696                                 max_cl = rb_entry(n, struct hfsc_class,vt_node);
697                                 /*
698                                  * set vt to the average of the min and max
699                                  * classes.  if the parent's period didn't
700                                  * change, don't decrease vt of the class.
701                                  */
702                                 vt = max_cl->cl_vt;
703                                 if (cl->cl_parent->cl_cvtmin != 0)
704                                         vt = (cl->cl_parent->cl_cvtmin + vt)/2;
705
706                                 if (cl->cl_parent->cl_vtperiod !=
707                                     cl->cl_parentperiod || vt > cl->cl_vt)
708                                         cl->cl_vt = vt;
709                         } else {
710                                 /*
711                                  * first child for a new parent backlog period.
712                                  * add parent's cvtmax to cvtoff to make a new
713                                  * vt (vtoff + vt) larger than the vt in the
714                                  * last period for all children.
715                                  */
716                                 vt = cl->cl_parent->cl_cvtmax;
717                                 cl->cl_parent->cl_cvtoff += vt;
718                                 cl->cl_parent->cl_cvtmax = 0;
719                                 cl->cl_parent->cl_cvtmin = 0;
720                                 cl->cl_vt = 0;
721                         }
722
723                         cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
724                                                         cl->cl_pcvtoff;
725
726                         /* update the virtual curve */
727                         vt = cl->cl_vt + cl->cl_vtoff;
728                         rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
729                                                       cl->cl_total);
730                         if (cl->cl_virtual.x == vt) {
731                                 cl->cl_virtual.x -= cl->cl_vtoff;
732                                 cl->cl_vtoff = 0;
733                         }
734                         cl->cl_vtadj = 0;
735
736                         cl->cl_vtperiod++;  /* increment vt period */
737                         cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
738                         if (cl->cl_parent->cl_nactive == 0)
739                                 cl->cl_parentperiod++;
740                         cl->cl_f = 0;
741
742                         vttree_insert(cl);
743                         cftree_insert(cl);
744
745                         if (cl->cl_flags & HFSC_USC) {
746                                 /* class has upper limit curve */
747                                 if (cur_time == 0)
748                                         cur_time = psched_get_time();
749
750                                 /* update the ulimit curve */
751                                 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
752                                          cl->cl_total);
753                                 /* compute myf */
754                                 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
755                                                       cl->cl_total);
756                                 cl->cl_myfadj = 0;
757                         }
758                 }
759
760                 f = max(cl->cl_myf, cl->cl_cfmin);
761                 if (f != cl->cl_f) {
762                         cl->cl_f = f;
763                         cftree_update(cl);
764                         update_cfmin(cl->cl_parent);
765                 }
766         }
767 }
768
769 static void
770 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
771 {
772         u64 f; /* , myf_bound, delta; */
773         int go_passive = 0;
774
775         if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
776                 go_passive = 1;
777
778         for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
779                 cl->cl_total += len;
780
781                 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
782                         continue;
783
784                 if (go_passive && --cl->cl_nactive == 0)
785                         go_passive = 1;
786                 else
787                         go_passive = 0;
788
789                 if (go_passive) {
790                         /* no more active child, going passive */
791
792                         /* update cvtmax of the parent class */
793                         if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
794                                 cl->cl_parent->cl_cvtmax = cl->cl_vt;
795
796                         /* remove this class from the vt tree */
797                         vttree_remove(cl);
798
799                         cftree_remove(cl);
800                         update_cfmin(cl->cl_parent);
801
802                         continue;
803                 }
804
805                 /*
806                  * update vt and f
807                  */
808                 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
809                             - cl->cl_vtoff + cl->cl_vtadj;
810
811                 /*
812                  * if vt of the class is smaller than cvtmin,
813                  * the class was skipped in the past due to non-fit.
814                  * if so, we need to adjust vtadj.
815                  */
816                 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
817                         cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
818                         cl->cl_vt = cl->cl_parent->cl_cvtmin;
819                 }
820
821                 /* update the vt tree */
822                 vttree_update(cl);
823
824                 if (cl->cl_flags & HFSC_USC) {
825                         cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
826                                                               cl->cl_total);
827 #if 0
828                         /*
829                          * This code causes classes to stay way under their
830                          * limit when multiple classes are used at gigabit
831                          * speed. needs investigation. -kaber
832                          */
833                         /*
834                          * if myf lags behind by more than one clock tick
835                          * from the current time, adjust myfadj to prevent
836                          * a rate-limited class from going greedy.
837                          * in a steady state under rate-limiting, myf
838                          * fluctuates within one clock tick.
839                          */
840                         myf_bound = cur_time - PSCHED_JIFFIE2US(1);
841                         if (cl->cl_myf < myf_bound) {
842                                 delta = cur_time - cl->cl_myf;
843                                 cl->cl_myfadj += delta;
844                                 cl->cl_myf += delta;
845                         }
846 #endif
847                 }
848
849                 f = max(cl->cl_myf, cl->cl_cfmin);
850                 if (f != cl->cl_f) {
851                         cl->cl_f = f;
852                         cftree_update(cl);
853                         update_cfmin(cl->cl_parent);
854                 }
855         }
856 }
857
858 static void
859 set_active(struct hfsc_class *cl, unsigned int len)
860 {
861         if (cl->cl_flags & HFSC_RSC)
862                 init_ed(cl, len);
863         if (cl->cl_flags & HFSC_FSC)
864                 init_vf(cl, len);
865
866         list_add_tail(&cl->dlist, &cl->sched->droplist);
867 }
868
869 static void
870 set_passive(struct hfsc_class *cl)
871 {
872         if (cl->cl_flags & HFSC_RSC)
873                 eltree_remove(cl);
874
875         list_del(&cl->dlist);
876
877         /*
878          * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
879          * needs to be called explicitly to remove a class from vttree.
880          */
881 }
882
883 /*
884  * hack to get length of first packet in queue.
885  */
886 static unsigned int
887 qdisc_peek_len(struct Qdisc *sch)
888 {
889         struct sk_buff *skb;
890         unsigned int len;
891
892         skb = sch->dequeue(sch);
893         if (skb == NULL) {
894                 if (net_ratelimit())
895                         printk("qdisc_peek_len: non work-conserving qdisc ?\n");
896                 return 0;
897         }
898         len = skb->len;
899         if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
900                 if (net_ratelimit())
901                         printk("qdisc_peek_len: failed to requeue\n");
902                 qdisc_tree_decrease_qlen(sch, 1);
903                 return 0;
904         }
905         return len;
906 }
907
908 static void
909 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
910 {
911         unsigned int len = cl->qdisc->q.qlen;
912
913         qdisc_reset(cl->qdisc);
914         qdisc_tree_decrease_qlen(cl->qdisc, len);
915 }
916
917 static void
918 hfsc_adjust_levels(struct hfsc_class *cl)
919 {
920         struct hfsc_class *p;
921         unsigned int level;
922
923         do {
924                 level = 0;
925                 list_for_each_entry(p, &cl->children, siblings) {
926                         if (p->level >= level)
927                                 level = p->level + 1;
928                 }
929                 cl->level = level;
930         } while ((cl = cl->cl_parent) != NULL);
931 }
932
933 static inline struct hfsc_class *
934 hfsc_find_class(u32 classid, struct Qdisc *sch)
935 {
936         struct hfsc_sched *q = qdisc_priv(sch);
937         struct Qdisc_class_common *clc;
938
939         clc = qdisc_class_find(&q->clhash, classid);
940         if (clc == NULL)
941                 return NULL;
942         return container_of(clc, struct hfsc_class, cl_common);
943 }
944
945 static void
946 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
947                 u64 cur_time)
948 {
949         sc2isc(rsc, &cl->cl_rsc);
950         rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
951         cl->cl_eligible = cl->cl_deadline;
952         if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
953                 cl->cl_eligible.dx = 0;
954                 cl->cl_eligible.dy = 0;
955         }
956         cl->cl_flags |= HFSC_RSC;
957 }
958
959 static void
960 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
961 {
962         sc2isc(fsc, &cl->cl_fsc);
963         rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
964         cl->cl_flags |= HFSC_FSC;
965 }
966
967 static void
968 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
969                 u64 cur_time)
970 {
971         sc2isc(usc, &cl->cl_usc);
972         rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
973         cl->cl_flags |= HFSC_USC;
974 }
975
976 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
977         [TCA_HFSC_RSC]  = { .len = sizeof(struct tc_service_curve) },
978         [TCA_HFSC_FSC]  = { .len = sizeof(struct tc_service_curve) },
979         [TCA_HFSC_USC]  = { .len = sizeof(struct tc_service_curve) },
980 };
981
982 static int
983 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
984                   struct nlattr **tca, unsigned long *arg)
985 {
986         struct hfsc_sched *q = qdisc_priv(sch);
987         struct hfsc_class *cl = (struct hfsc_class *)*arg;
988         struct hfsc_class *parent = NULL;
989         struct nlattr *opt = tca[TCA_OPTIONS];
990         struct nlattr *tb[TCA_HFSC_MAX + 1];
991         struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
992         u64 cur_time;
993         int err;
994
995         if (opt == NULL)
996                 return -EINVAL;
997
998         err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy);
999         if (err < 0)
1000                 return err;
1001
1002         if (tb[TCA_HFSC_RSC]) {
1003                 rsc = nla_data(tb[TCA_HFSC_RSC]);
1004                 if (rsc->m1 == 0 && rsc->m2 == 0)
1005                         rsc = NULL;
1006         }
1007
1008         if (tb[TCA_HFSC_FSC]) {
1009                 fsc = nla_data(tb[TCA_HFSC_FSC]);
1010                 if (fsc->m1 == 0 && fsc->m2 == 0)
1011                         fsc = NULL;
1012         }
1013
1014         if (tb[TCA_HFSC_USC]) {
1015                 usc = nla_data(tb[TCA_HFSC_USC]);
1016                 if (usc->m1 == 0 && usc->m2 == 0)
1017                         usc = NULL;
1018         }
1019
1020         if (cl != NULL) {
1021                 if (parentid) {
1022                         if (cl->cl_parent &&
1023                             cl->cl_parent->cl_common.classid != parentid)
1024                                 return -EINVAL;
1025                         if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1026                                 return -EINVAL;
1027                 }
1028                 cur_time = psched_get_time();
1029
1030                 sch_tree_lock(sch);
1031                 if (rsc != NULL)
1032                         hfsc_change_rsc(cl, rsc, cur_time);
1033                 if (fsc != NULL)
1034                         hfsc_change_fsc(cl, fsc);
1035                 if (usc != NULL)
1036                         hfsc_change_usc(cl, usc, cur_time);
1037
1038                 if (cl->qdisc->q.qlen != 0) {
1039                         if (cl->cl_flags & HFSC_RSC)
1040                                 update_ed(cl, qdisc_peek_len(cl->qdisc));
1041                         if (cl->cl_flags & HFSC_FSC)
1042                                 update_vf(cl, 0, cur_time);
1043                 }
1044                 sch_tree_unlock(sch);
1045
1046                 if (tca[TCA_RATE])
1047                         gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048                                               &sch->dev->queue_lock,
1049                                               tca[TCA_RATE]);
1050                 return 0;
1051         }
1052
1053         if (parentid == TC_H_ROOT)
1054                 return -EEXIST;
1055
1056         parent = &q->root;
1057         if (parentid) {
1058                 parent = hfsc_find_class(parentid, sch);
1059                 if (parent == NULL)
1060                         return -ENOENT;
1061         }
1062
1063         if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1064                 return -EINVAL;
1065         if (hfsc_find_class(classid, sch))
1066                 return -EEXIST;
1067
1068         if (rsc == NULL && fsc == NULL)
1069                 return -EINVAL;
1070
1071         cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1072         if (cl == NULL)
1073                 return -ENOBUFS;
1074
1075         if (rsc != NULL)
1076                 hfsc_change_rsc(cl, rsc, 0);
1077         if (fsc != NULL)
1078                 hfsc_change_fsc(cl, fsc);
1079         if (usc != NULL)
1080                 hfsc_change_usc(cl, usc, 0);
1081
1082         cl->cl_common.classid = classid;
1083         cl->refcnt    = 1;
1084         cl->sched     = q;
1085         cl->cl_parent = parent;
1086         cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1087         if (cl->qdisc == NULL)
1088                 cl->qdisc = &noop_qdisc;
1089         INIT_LIST_HEAD(&cl->children);
1090         cl->vt_tree = RB_ROOT;
1091         cl->cf_tree = RB_ROOT;
1092
1093         sch_tree_lock(sch);
1094         qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1095         list_add_tail(&cl->siblings, &parent->children);
1096         if (parent->level == 0)
1097                 hfsc_purge_queue(sch, parent);
1098         hfsc_adjust_levels(parent);
1099         cl->cl_pcvtoff = parent->cl_cvtoff;
1100         sch_tree_unlock(sch);
1101
1102         qdisc_class_hash_grow(sch, &q->clhash);
1103
1104         if (tca[TCA_RATE])
1105                 gen_new_estimator(&cl->bstats, &cl->rate_est,
1106                                   &sch->dev->queue_lock, tca[TCA_RATE]);
1107         *arg = (unsigned long)cl;
1108         return 0;
1109 }
1110
1111 static void
1112 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1113 {
1114         struct hfsc_sched *q = qdisc_priv(sch);
1115
1116         tcf_destroy_chain(&cl->filter_list);
1117         qdisc_destroy(cl->qdisc);
1118         gen_kill_estimator(&cl->bstats, &cl->rate_est);
1119         if (cl != &q->root)
1120                 kfree(cl);
1121 }
1122
1123 static int
1124 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1125 {
1126         struct hfsc_sched *q = qdisc_priv(sch);
1127         struct hfsc_class *cl = (struct hfsc_class *)arg;
1128
1129         if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1130                 return -EBUSY;
1131
1132         sch_tree_lock(sch);
1133
1134         list_del(&cl->siblings);
1135         hfsc_adjust_levels(cl->cl_parent);
1136
1137         hfsc_purge_queue(sch, cl);
1138         qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1139
1140         if (--cl->refcnt == 0)
1141                 hfsc_destroy_class(sch, cl);
1142
1143         sch_tree_unlock(sch);
1144         return 0;
1145 }
1146
1147 static struct hfsc_class *
1148 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1149 {
1150         struct hfsc_sched *q = qdisc_priv(sch);
1151         struct hfsc_class *cl;
1152         struct tcf_result res;
1153         struct tcf_proto *tcf;
1154         int result;
1155
1156         if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1157             (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1158                 if (cl->level == 0)
1159                         return cl;
1160
1161         *qerr = NET_XMIT_BYPASS;
1162         tcf = q->root.filter_list;
1163         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1164 #ifdef CONFIG_NET_CLS_ACT
1165                 switch (result) {
1166                 case TC_ACT_QUEUED:
1167                 case TC_ACT_STOLEN:
1168                         *qerr = NET_XMIT_SUCCESS;
1169                 case TC_ACT_SHOT:
1170                         return NULL;
1171                 }
1172 #endif
1173                 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1174                         if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1175                                 break; /* filter selected invalid classid */
1176                 }
1177
1178                 if (cl->level == 0)
1179                         return cl; /* hit leaf class */
1180
1181                 /* apply inner filter chain */
1182                 tcf = cl->filter_list;
1183         }
1184
1185         /* classification failed, try default class */
1186         cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1187         if (cl == NULL || cl->level > 0)
1188                 return NULL;
1189
1190         return cl;
1191 }
1192
1193 static int
1194 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1195                  struct Qdisc **old)
1196 {
1197         struct hfsc_class *cl = (struct hfsc_class *)arg;
1198
1199         if (cl == NULL)
1200                 return -ENOENT;
1201         if (cl->level > 0)
1202                 return -EINVAL;
1203         if (new == NULL) {
1204                 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1205                                         cl->cl_common.classid);
1206                 if (new == NULL)
1207                         new = &noop_qdisc;
1208         }
1209
1210         sch_tree_lock(sch);
1211         hfsc_purge_queue(sch, cl);
1212         *old = xchg(&cl->qdisc, new);
1213         sch_tree_unlock(sch);
1214         return 0;
1215 }
1216
1217 static struct Qdisc *
1218 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1219 {
1220         struct hfsc_class *cl = (struct hfsc_class *)arg;
1221
1222         if (cl != NULL && cl->level == 0)
1223                 return cl->qdisc;
1224
1225         return NULL;
1226 }
1227
1228 static void
1229 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1230 {
1231         struct hfsc_class *cl = (struct hfsc_class *)arg;
1232
1233         if (cl->qdisc->q.qlen == 0) {
1234                 update_vf(cl, 0, 0);
1235                 set_passive(cl);
1236         }
1237 }
1238
1239 static unsigned long
1240 hfsc_get_class(struct Qdisc *sch, u32 classid)
1241 {
1242         struct hfsc_class *cl = hfsc_find_class(classid, sch);
1243
1244         if (cl != NULL)
1245                 cl->refcnt++;
1246
1247         return (unsigned long)cl;
1248 }
1249
1250 static void
1251 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1252 {
1253         struct hfsc_class *cl = (struct hfsc_class *)arg;
1254
1255         if (--cl->refcnt == 0)
1256                 hfsc_destroy_class(sch, cl);
1257 }
1258
1259 static unsigned long
1260 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1261 {
1262         struct hfsc_class *p = (struct hfsc_class *)parent;
1263         struct hfsc_class *cl = hfsc_find_class(classid, sch);
1264
1265         if (cl != NULL) {
1266                 if (p != NULL && p->level <= cl->level)
1267                         return 0;
1268                 cl->filter_cnt++;
1269         }
1270
1271         return (unsigned long)cl;
1272 }
1273
1274 static void
1275 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1276 {
1277         struct hfsc_class *cl = (struct hfsc_class *)arg;
1278
1279         cl->filter_cnt--;
1280 }
1281
1282 static struct tcf_proto **
1283 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1284 {
1285         struct hfsc_sched *q = qdisc_priv(sch);
1286         struct hfsc_class *cl = (struct hfsc_class *)arg;
1287
1288         if (cl == NULL)
1289                 cl = &q->root;
1290
1291         return &cl->filter_list;
1292 }
1293
1294 static int
1295 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1296 {
1297         struct tc_service_curve tsc;
1298
1299         tsc.m1 = sm2m(sc->sm1);
1300         tsc.d  = dx2d(sc->dx);
1301         tsc.m2 = sm2m(sc->sm2);
1302         NLA_PUT(skb, attr, sizeof(tsc), &tsc);
1303
1304         return skb->len;
1305
1306  nla_put_failure:
1307         return -1;
1308 }
1309
1310 static inline int
1311 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1312 {
1313         if ((cl->cl_flags & HFSC_RSC) &&
1314             (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1315                 goto nla_put_failure;
1316
1317         if ((cl->cl_flags & HFSC_FSC) &&
1318             (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1319                 goto nla_put_failure;
1320
1321         if ((cl->cl_flags & HFSC_USC) &&
1322             (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1323                 goto nla_put_failure;
1324
1325         return skb->len;
1326
1327  nla_put_failure:
1328         return -1;
1329 }
1330
1331 static int
1332 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1333                 struct tcmsg *tcm)
1334 {
1335         struct hfsc_class *cl = (struct hfsc_class *)arg;
1336         struct nlattr *nest;
1337
1338         tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1339                                           TC_H_ROOT;
1340         tcm->tcm_handle = cl->cl_common.classid;
1341         if (cl->level == 0)
1342                 tcm->tcm_info = cl->qdisc->handle;
1343
1344         nest = nla_nest_start(skb, TCA_OPTIONS);
1345         if (nest == NULL)
1346                 goto nla_put_failure;
1347         if (hfsc_dump_curves(skb, cl) < 0)
1348                 goto nla_put_failure;
1349         nla_nest_end(skb, nest);
1350         return skb->len;
1351
1352  nla_put_failure:
1353         nla_nest_cancel(skb, nest);
1354         return -EMSGSIZE;
1355 }
1356
1357 static int
1358 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1359         struct gnet_dump *d)
1360 {
1361         struct hfsc_class *cl = (struct hfsc_class *)arg;
1362         struct tc_hfsc_stats xstats;
1363
1364         cl->qstats.qlen = cl->qdisc->q.qlen;
1365         xstats.level   = cl->level;
1366         xstats.period  = cl->cl_vtperiod;
1367         xstats.work    = cl->cl_total;
1368         xstats.rtwork  = cl->cl_cumul;
1369
1370         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1371             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1372             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1373                 return -1;
1374
1375         return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1376 }
1377
1378
1379
1380 static void
1381 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1382 {
1383         struct hfsc_sched *q = qdisc_priv(sch);
1384         struct hlist_node *n;
1385         struct hfsc_class *cl;
1386         unsigned int i;
1387
1388         if (arg->stop)
1389                 return;
1390
1391         for (i = 0; i < q->clhash.hashsize; i++) {
1392                 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1393                                      cl_common.hnode) {
1394                         if (arg->count < arg->skip) {
1395                                 arg->count++;
1396                                 continue;
1397                         }
1398                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1399                                 arg->stop = 1;
1400                                 return;
1401                         }
1402                         arg->count++;
1403                 }
1404         }
1405 }
1406
1407 static void
1408 hfsc_schedule_watchdog(struct Qdisc *sch)
1409 {
1410         struct hfsc_sched *q = qdisc_priv(sch);
1411         struct hfsc_class *cl;
1412         u64 next_time = 0;
1413
1414         if ((cl = eltree_get_minel(q)) != NULL)
1415                 next_time = cl->cl_e;
1416         if (q->root.cl_cfmin != 0) {
1417                 if (next_time == 0 || next_time > q->root.cl_cfmin)
1418                         next_time = q->root.cl_cfmin;
1419         }
1420         WARN_ON(next_time == 0);
1421         qdisc_watchdog_schedule(&q->watchdog, next_time);
1422 }
1423
1424 static int
1425 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1426 {
1427         struct hfsc_sched *q = qdisc_priv(sch);
1428         struct tc_hfsc_qopt *qopt;
1429         int err;
1430
1431         if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1432                 return -EINVAL;
1433         qopt = nla_data(opt);
1434
1435         q->defcls = qopt->defcls;
1436         err = qdisc_class_hash_init(&q->clhash);
1437         if (err < 0)
1438                 return err;
1439         q->eligible = RB_ROOT;
1440         INIT_LIST_HEAD(&q->droplist);
1441         skb_queue_head_init(&q->requeue);
1442
1443         q->root.cl_common.classid = sch->handle;
1444         q->root.refcnt  = 1;
1445         q->root.sched   = q;
1446         q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1447                                           sch->handle);
1448         if (q->root.qdisc == NULL)
1449                 q->root.qdisc = &noop_qdisc;
1450         INIT_LIST_HEAD(&q->root.children);
1451         q->root.vt_tree = RB_ROOT;
1452         q->root.cf_tree = RB_ROOT;
1453
1454         qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1455         qdisc_class_hash_grow(sch, &q->clhash);
1456
1457         qdisc_watchdog_init(&q->watchdog, sch);
1458
1459         return 0;
1460 }
1461
1462 static int
1463 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
1464 {
1465         struct hfsc_sched *q = qdisc_priv(sch);
1466         struct tc_hfsc_qopt *qopt;
1467
1468         if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1469                 return -EINVAL;
1470         qopt = nla_data(opt);
1471
1472         sch_tree_lock(sch);
1473         q->defcls = qopt->defcls;
1474         sch_tree_unlock(sch);
1475
1476         return 0;
1477 }
1478
1479 static void
1480 hfsc_reset_class(struct hfsc_class *cl)
1481 {
1482         cl->cl_total        = 0;
1483         cl->cl_cumul        = 0;
1484         cl->cl_d            = 0;
1485         cl->cl_e            = 0;
1486         cl->cl_vt           = 0;
1487         cl->cl_vtadj        = 0;
1488         cl->cl_vtoff        = 0;
1489         cl->cl_cvtmin       = 0;
1490         cl->cl_cvtmax       = 0;
1491         cl->cl_cvtoff       = 0;
1492         cl->cl_pcvtoff      = 0;
1493         cl->cl_vtperiod     = 0;
1494         cl->cl_parentperiod = 0;
1495         cl->cl_f            = 0;
1496         cl->cl_myf          = 0;
1497         cl->cl_myfadj       = 0;
1498         cl->cl_cfmin        = 0;
1499         cl->cl_nactive      = 0;
1500
1501         cl->vt_tree = RB_ROOT;
1502         cl->cf_tree = RB_ROOT;
1503         qdisc_reset(cl->qdisc);
1504
1505         if (cl->cl_flags & HFSC_RSC)
1506                 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1507         if (cl->cl_flags & HFSC_FSC)
1508                 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1509         if (cl->cl_flags & HFSC_USC)
1510                 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1511 }
1512
1513 static void
1514 hfsc_reset_qdisc(struct Qdisc *sch)
1515 {
1516         struct hfsc_sched *q = qdisc_priv(sch);
1517         struct hfsc_class *cl;
1518         struct hlist_node *n;
1519         unsigned int i;
1520
1521         for (i = 0; i < q->clhash.hashsize; i++) {
1522                 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1523                         hfsc_reset_class(cl);
1524         }
1525         __skb_queue_purge(&q->requeue);
1526         q->eligible = RB_ROOT;
1527         INIT_LIST_HEAD(&q->droplist);
1528         qdisc_watchdog_cancel(&q->watchdog);
1529         sch->q.qlen = 0;
1530 }
1531
1532 static void
1533 hfsc_destroy_qdisc(struct Qdisc *sch)
1534 {
1535         struct hfsc_sched *q = qdisc_priv(sch);
1536         struct hlist_node *n, *next;
1537         struct hfsc_class *cl;
1538         unsigned int i;
1539
1540         for (i = 0; i < q->clhash.hashsize; i++) {
1541                 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1542                         tcf_destroy_chain(&cl->filter_list);
1543         }
1544         for (i = 0; i < q->clhash.hashsize; i++) {
1545                 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1546                                           cl_common.hnode)
1547                         hfsc_destroy_class(sch, cl);
1548         }
1549         qdisc_class_hash_destroy(&q->clhash);
1550         __skb_queue_purge(&q->requeue);
1551         qdisc_watchdog_cancel(&q->watchdog);
1552 }
1553
1554 static int
1555 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1556 {
1557         struct hfsc_sched *q = qdisc_priv(sch);
1558         unsigned char *b = skb_tail_pointer(skb);
1559         struct tc_hfsc_qopt qopt;
1560
1561         qopt.defcls = q->defcls;
1562         NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1563         return skb->len;
1564
1565  nla_put_failure:
1566         nlmsg_trim(skb, b);
1567         return -1;
1568 }
1569
1570 static int
1571 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1572 {
1573         struct hfsc_class *cl;
1574         unsigned int len;
1575         int err;
1576
1577         cl = hfsc_classify(skb, sch, &err);
1578         if (cl == NULL) {
1579                 if (err == NET_XMIT_BYPASS)
1580                         sch->qstats.drops++;
1581                 kfree_skb(skb);
1582                 return err;
1583         }
1584
1585         len = skb->len;
1586         err = cl->qdisc->enqueue(skb, cl->qdisc);
1587         if (unlikely(err != NET_XMIT_SUCCESS)) {
1588                 cl->qstats.drops++;
1589                 sch->qstats.drops++;
1590                 return err;
1591         }
1592
1593         if (cl->qdisc->q.qlen == 1)
1594                 set_active(cl, len);
1595
1596         cl->bstats.packets++;
1597         cl->bstats.bytes += len;
1598         sch->bstats.packets++;
1599         sch->bstats.bytes += len;
1600         sch->q.qlen++;
1601
1602         return NET_XMIT_SUCCESS;
1603 }
1604
1605 static struct sk_buff *
1606 hfsc_dequeue(struct Qdisc *sch)
1607 {
1608         struct hfsc_sched *q = qdisc_priv(sch);
1609         struct hfsc_class *cl;
1610         struct sk_buff *skb;
1611         u64 cur_time;
1612         unsigned int next_len;
1613         int realtime = 0;
1614
1615         if (sch->q.qlen == 0)
1616                 return NULL;
1617         if ((skb = __skb_dequeue(&q->requeue)))
1618                 goto out;
1619
1620         cur_time = psched_get_time();
1621
1622         /*
1623          * if there are eligible classes, use real-time criteria.
1624          * find the class with the minimum deadline among
1625          * the eligible classes.
1626          */
1627         if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1628                 realtime = 1;
1629         } else {
1630                 /*
1631                  * use link-sharing criteria
1632                  * get the class with the minimum vt in the hierarchy
1633                  */
1634                 cl = vttree_get_minvt(&q->root, cur_time);
1635                 if (cl == NULL) {
1636                         sch->qstats.overlimits++;
1637                         hfsc_schedule_watchdog(sch);
1638                         return NULL;
1639                 }
1640         }
1641
1642         skb = cl->qdisc->dequeue(cl->qdisc);
1643         if (skb == NULL) {
1644                 if (net_ratelimit())
1645                         printk("HFSC: Non-work-conserving qdisc ?\n");
1646                 return NULL;
1647         }
1648
1649         update_vf(cl, skb->len, cur_time);
1650         if (realtime)
1651                 cl->cl_cumul += skb->len;
1652
1653         if (cl->qdisc->q.qlen != 0) {
1654                 if (cl->cl_flags & HFSC_RSC) {
1655                         /* update ed */
1656                         next_len = qdisc_peek_len(cl->qdisc);
1657                         if (realtime)
1658                                 update_ed(cl, next_len);
1659                         else
1660                                 update_d(cl, next_len);
1661                 }
1662         } else {
1663                 /* the class becomes passive */
1664                 set_passive(cl);
1665         }
1666
1667  out:
1668         sch->flags &= ~TCQ_F_THROTTLED;
1669         sch->q.qlen--;
1670
1671         return skb;
1672 }
1673
1674 static int
1675 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
1676 {
1677         struct hfsc_sched *q = qdisc_priv(sch);
1678
1679         __skb_queue_head(&q->requeue, skb);
1680         sch->q.qlen++;
1681         sch->qstats.requeues++;
1682         return NET_XMIT_SUCCESS;
1683 }
1684
1685 static unsigned int
1686 hfsc_drop(struct Qdisc *sch)
1687 {
1688         struct hfsc_sched *q = qdisc_priv(sch);
1689         struct hfsc_class *cl;
1690         unsigned int len;
1691
1692         list_for_each_entry(cl, &q->droplist, dlist) {
1693                 if (cl->qdisc->ops->drop != NULL &&
1694                     (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1695                         if (cl->qdisc->q.qlen == 0) {
1696                                 update_vf(cl, 0, 0);
1697                                 set_passive(cl);
1698                         } else {
1699                                 list_move_tail(&cl->dlist, &q->droplist);
1700                         }
1701                         cl->qstats.drops++;
1702                         sch->qstats.drops++;
1703                         sch->q.qlen--;
1704                         return len;
1705                 }
1706         }
1707         return 0;
1708 }
1709
1710 static const struct Qdisc_class_ops hfsc_class_ops = {
1711         .change         = hfsc_change_class,
1712         .delete         = hfsc_delete_class,
1713         .graft          = hfsc_graft_class,
1714         .leaf           = hfsc_class_leaf,
1715         .qlen_notify    = hfsc_qlen_notify,
1716         .get            = hfsc_get_class,
1717         .put            = hfsc_put_class,
1718         .bind_tcf       = hfsc_bind_tcf,
1719         .unbind_tcf     = hfsc_unbind_tcf,
1720         .tcf_chain      = hfsc_tcf_chain,
1721         .dump           = hfsc_dump_class,
1722         .dump_stats     = hfsc_dump_class_stats,
1723         .walk           = hfsc_walk
1724 };
1725
1726 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1727         .id             = "hfsc",
1728         .init           = hfsc_init_qdisc,
1729         .change         = hfsc_change_qdisc,
1730         .reset          = hfsc_reset_qdisc,
1731         .destroy        = hfsc_destroy_qdisc,
1732         .dump           = hfsc_dump_qdisc,
1733         .enqueue        = hfsc_enqueue,
1734         .dequeue        = hfsc_dequeue,
1735         .requeue        = hfsc_requeue,
1736         .drop           = hfsc_drop,
1737         .cl_ops         = &hfsc_class_ops,
1738         .priv_size      = sizeof(struct hfsc_sched),
1739         .owner          = THIS_MODULE
1740 };
1741
1742 static int __init
1743 hfsc_init(void)
1744 {
1745         return register_qdisc(&hfsc_qdisc_ops);
1746 }
1747
1748 static void __exit
1749 hfsc_cleanup(void)
1750 {
1751         unregister_qdisc(&hfsc_qdisc_ops);
1752 }
1753
1754 MODULE_LICENSE("GPL");
1755 module_init(hfsc_init);
1756 module_exit(hfsc_cleanup);