2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/config.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <net/pkt_sched.h>
24 #include <net/inet_ecn.h>
28 /* Parameters, settable by user:
29 -----------------------------
31 limit - bytes (must be > qth_max + burst)
33 Hard limit on queue length, should be chosen >qth_max
34 to allow packet bursts. This parameter does not
35 affect the algorithms behaviour and can be chosen
36 arbitrarily high (well, less than ram size)
37 Really, this limit will never be reached
38 if RED works correctly.
43 u32 limit; /* HARD maximal queue length */
45 struct red_parms parms;
46 struct red_stats stats;
49 static inline int red_use_ecn(struct red_sched_data *q)
51 return q->flags & TC_RED_ECN;
54 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
56 struct red_sched_data *q = qdisc_priv(sch);
58 q->parms.qavg = red_calc_qavg(&q->parms, sch->qstats.backlog);
60 if (red_is_idling(&q->parms))
61 red_end_of_idle_period(&q->parms);
63 switch (red_action(&q->parms, q->parms.qavg)) {
68 sch->qstats.overlimits++;
69 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
78 sch->qstats.overlimits++;
79 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
80 q->stats.forced_drop++;
84 q->stats.forced_mark++;
88 if (sch->qstats.backlog + skb->len <= q->limit)
89 return qdisc_enqueue_tail(skb, sch);
92 return qdisc_drop(skb, sch);
99 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
101 struct red_sched_data *q = qdisc_priv(sch);
103 if (red_is_idling(&q->parms))
104 red_end_of_idle_period(&q->parms);
106 return qdisc_requeue(skb, sch);
109 static struct sk_buff * red_dequeue(struct Qdisc* sch)
112 struct red_sched_data *q = qdisc_priv(sch);
114 skb = qdisc_dequeue_head(sch);
116 if (skb == NULL && !red_is_idling(&q->parms))
117 red_start_of_idle_period(&q->parms);
122 static unsigned int red_drop(struct Qdisc* sch)
125 struct red_sched_data *q = qdisc_priv(sch);
127 skb = qdisc_dequeue_tail(sch);
129 unsigned int len = skb->len;
131 qdisc_drop(skb, sch);
135 if (!red_is_idling(&q->parms))
136 red_start_of_idle_period(&q->parms);
141 static void red_reset(struct Qdisc* sch)
143 struct red_sched_data *q = qdisc_priv(sch);
145 qdisc_reset_queue(sch);
146 red_restart(&q->parms);
149 static int red_change(struct Qdisc *sch, struct rtattr *opt)
151 struct red_sched_data *q = qdisc_priv(sch);
152 struct rtattr *tb[TCA_RED_MAX];
153 struct tc_red_qopt *ctl;
155 if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt))
158 if (tb[TCA_RED_PARMS-1] == NULL ||
159 RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) ||
160 tb[TCA_RED_STAB-1] == NULL ||
161 RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE)
164 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
167 q->flags = ctl->flags;
168 q->limit = ctl->limit;
170 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
171 ctl->Plog, ctl->Scell_log,
172 RTA_DATA(tb[TCA_RED_STAB-1]));
174 if (skb_queue_empty(&sch->q))
175 red_end_of_idle_period(&q->parms);
177 sch_tree_unlock(sch);
181 static int red_init(struct Qdisc* sch, struct rtattr *opt)
183 return red_change(sch, opt);
186 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
188 struct red_sched_data *q = qdisc_priv(sch);
189 struct rtattr *opts = NULL;
190 struct tc_red_qopt opt = {
193 .qth_min = q->parms.qth_min >> q->parms.Wlog,
194 .qth_max = q->parms.qth_max >> q->parms.Wlog,
195 .Wlog = q->parms.Wlog,
196 .Plog = q->parms.Plog,
197 .Scell_log = q->parms.Scell_log,
200 opts = RTA_NEST(skb, TCA_OPTIONS);
201 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
202 return RTA_NEST_END(skb, opts);
205 return RTA_NEST_CANCEL(skb, opts);
208 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
210 struct red_sched_data *q = qdisc_priv(sch);
211 struct tc_red_xstats st = {
212 .early = q->stats.prob_drop + q->stats.forced_drop,
213 .pdrop = q->stats.pdrop,
214 .other = q->stats.other,
215 .marked = q->stats.prob_mark + q->stats.forced_mark,
218 return gnet_stats_copy_app(d, &st, sizeof(st));
221 static struct Qdisc_ops red_qdisc_ops = {
223 .priv_size = sizeof(struct red_sched_data),
224 .enqueue = red_enqueue,
225 .dequeue = red_dequeue,
226 .requeue = red_requeue,
230 .change = red_change,
232 .dump_stats = red_dump_stats,
233 .owner = THIS_MODULE,
236 static int __init red_module_init(void)
238 return register_qdisc(&red_qdisc_ops);
241 static void __exit red_module_exit(void)
243 unregister_qdisc(&red_qdisc_ops);
246 module_init(red_module_init)
247 module_exit(red_module_exit)
249 MODULE_LICENSE("GPL");