2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/ipv6.h>
21 #include <linux/skbuff.h>
22 #include <linux/jhash.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
28 /* Stochastic Fairness Queuing algorithm.
29 =======================================
32 Paul E. McKenney "Stochastic Fairness Queuing",
33 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35 Paul E. McKenney "Stochastic Fairness Queuing",
36 "Interworking: Research and Experience", v.2, 1991, p.113-131.
40 M. Shreedhar and George Varghese "Efficient Fair
41 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
44 This is not the thing that is usually called (W)FQ nowadays.
45 It does not use any timestamp mechanism, but instead
46 processes queues in round-robin order.
50 - It is very cheap. Both CPU and memory requirements are minimal.
54 - "Stochastic" -> It is not 100% fair.
55 When hash collisions occur, several flows are considered as one.
57 - "Round-robin" -> It introduces larger delays than virtual clock
58 based schemes, and should not be used for isolating interactive
59 traffic from non-interactive. It means, that this scheduler
60 should be used as leaf of CBQ or P3, which put interactive traffic
61 to higher priority band.
63 We still need true WFQ for top level CSZ, but using WFQ
64 for the best effort traffic is absolutely pointless:
65 SFQ is superior for this purpose.
68 This implementation limits maximal queue length to 128;
69 maximal mtu to 2^15-1; number of hash buckets to 1024.
70 The only goal of this restrictions was that all data
71 fit into one 4K page :-). Struct sfq_sched_data is
72 organized in anti-cache manner: all the data for a bucket
73 are scattered over different locations. This is not good,
74 but it allowed me to put it into 4K.
76 It is easy to increase these values, but not in flight. */
79 #define SFQ_HASH_DIVISOR 1024
81 /* This type should contain at least SFQ_DEPTH*2 values */
82 typedef unsigned char sfq_index;
94 unsigned quantum; /* Allotment per round: MUST BE >= MTU */
98 struct timer_list perturb_timer;
100 sfq_index tail; /* Index of current slot in round */
101 sfq_index max_depth; /* Maximal depth */
103 sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
104 sfq_index next[SFQ_DEPTH]; /* Active slots link */
105 short allot[SFQ_DEPTH]; /* Current allotment per slot */
106 unsigned short hash[SFQ_DEPTH]; /* Hash value indexed by slots */
107 struct sk_buff_head qs[SFQ_DEPTH]; /* Slot queue */
108 struct sfq_head dep[SFQ_DEPTH*2]; /* Linked list of slots, indexed by depth */
111 static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
113 return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
116 static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
120 switch (skb->protocol) {
121 case __constant_htons(ETH_P_IP):
123 const struct iphdr *iph = ip_hdr(skb);
125 h2 = iph->saddr ^ iph->protocol;
126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
127 (iph->protocol == IPPROTO_TCP ||
128 iph->protocol == IPPROTO_UDP ||
129 iph->protocol == IPPROTO_UDPLITE ||
130 iph->protocol == IPPROTO_SCTP ||
131 iph->protocol == IPPROTO_DCCP ||
132 iph->protocol == IPPROTO_ESP))
133 h2 ^= *(((u32*)iph) + iph->ihl);
136 case __constant_htons(ETH_P_IPV6):
138 struct ipv6hdr *iph = ipv6_hdr(skb);
139 h = iph->daddr.s6_addr32[3];
140 h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr;
141 if (iph->nexthdr == IPPROTO_TCP ||
142 iph->nexthdr == IPPROTO_UDP ||
143 iph->nexthdr == IPPROTO_UDPLITE ||
144 iph->nexthdr == IPPROTO_SCTP ||
145 iph->nexthdr == IPPROTO_DCCP ||
146 iph->nexthdr == IPPROTO_ESP)
147 h2 ^= *(u32*)&iph[1];
151 h = (unsigned long)skb->dst ^ skb->protocol;
152 h2 = (unsigned long)skb->sk;
155 return sfq_fold_hash(q, h, h2);
158 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
161 int d = q->qs[x].qlen + SFQ_DEPTH;
167 q->dep[p].next = q->dep[n].prev = x;
170 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
179 if (n == p && q->max_depth == q->qs[x].qlen + 1)
185 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
195 if (q->max_depth < d)
201 static unsigned int sfq_drop(struct Qdisc *sch)
203 struct sfq_sched_data *q = qdisc_priv(sch);
204 sfq_index d = q->max_depth;
208 /* Queue is full! Find the longest slot and
209 drop a packet from it */
212 sfq_index x = q->dep[d + SFQ_DEPTH].next;
215 __skb_unlink(skb, &q->qs[x]);
220 sch->qstats.backlog -= len;
225 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
226 d = q->next[q->tail];
227 q->next[q->tail] = q->next[d];
228 q->allot[q->next[d]] += q->quantum;
231 __skb_unlink(skb, &q->qs[d]);
235 q->ht[q->hash[d]] = SFQ_DEPTH;
237 sch->qstats.backlog -= len;
245 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
247 struct sfq_sched_data *q = qdisc_priv(sch);
248 unsigned hash = sfq_hash(q, skb);
252 if (x == SFQ_DEPTH) {
253 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
257 /* If selected queue has length q->limit, this means that
258 * all another queues are empty and that we do simple tail drop,
259 * i.e. drop _this_ packet.
261 if (q->qs[x].qlen >= q->limit)
262 return qdisc_drop(skb, sch);
264 sch->qstats.backlog += skb->len;
265 __skb_queue_tail(&q->qs[x], skb);
267 if (q->qs[x].qlen == 1) { /* The flow is new */
268 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
271 q->allot[x] = q->quantum;
273 q->next[x] = q->next[q->tail];
274 q->next[q->tail] = x;
278 if (++sch->q.qlen <= q->limit) {
279 sch->bstats.bytes += skb->len;
280 sch->bstats.packets++;
289 sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
291 struct sfq_sched_data *q = qdisc_priv(sch);
292 unsigned hash = sfq_hash(q, skb);
296 if (x == SFQ_DEPTH) {
297 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
301 sch->qstats.backlog += skb->len;
302 __skb_queue_head(&q->qs[x], skb);
303 /* If selected queue has length q->limit+1, this means that
304 * all another queues are empty and we do simple tail drop.
305 * This packet is still requeued at head of queue, tail packet
308 if (q->qs[x].qlen > q->limit) {
310 __skb_unlink(skb, &q->qs[x]);
312 sch->qstats.backlog -= skb->len;
318 if (q->qs[x].qlen == 1) { /* The flow is new */
319 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
322 q->allot[x] = q->quantum;
324 q->next[x] = q->next[q->tail];
325 q->next[q->tail] = x;
330 if (++sch->q.qlen <= q->limit) {
331 sch->qstats.requeues++;
343 static struct sk_buff *
344 sfq_dequeue(struct Qdisc *sch)
346 struct sfq_sched_data *q = qdisc_priv(sch);
350 /* No active slots */
351 if (q->tail == SFQ_DEPTH)
354 a = old_a = q->next[q->tail];
357 skb = __skb_dequeue(&q->qs[a]);
360 sch->qstats.backlog -= skb->len;
362 /* Is the slot empty? */
363 if (q->qs[a].qlen == 0) {
364 q->ht[q->hash[a]] = SFQ_DEPTH;
370 q->next[q->tail] = a;
371 q->allot[a] += q->quantum;
372 } else if ((q->allot[a] -= skb->len) <= 0) {
375 q->allot[a] += q->quantum;
381 sfq_reset(struct Qdisc *sch)
385 while ((skb = sfq_dequeue(sch)) != NULL)
389 static void sfq_perturbation(unsigned long arg)
391 struct Qdisc *sch = (struct Qdisc *)arg;
392 struct sfq_sched_data *q = qdisc_priv(sch);
394 q->perturbation = net_random();
396 if (q->perturb_period)
397 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
400 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
402 struct sfq_sched_data *q = qdisc_priv(sch);
403 struct tc_sfq_qopt *ctl = nla_data(opt);
406 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
410 q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
411 q->perturb_period = ctl->perturb_period * HZ;
413 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
416 while (sch->q.qlen > q->limit)
418 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
420 del_timer(&q->perturb_timer);
421 if (q->perturb_period) {
422 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
423 q->perturbation = net_random();
425 sch_tree_unlock(sch);
429 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
431 struct sfq_sched_data *q = qdisc_priv(sch);
434 q->perturb_timer.function = sfq_perturbation;
435 q->perturb_timer.data = (unsigned long)sch;;
436 init_timer_deferrable(&q->perturb_timer);
438 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
439 q->ht[i] = SFQ_DEPTH;
441 for (i = 0; i < SFQ_DEPTH; i++) {
442 skb_queue_head_init(&q->qs[i]);
443 q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH;
444 q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
447 q->limit = SFQ_DEPTH - 1;
451 q->quantum = psched_mtu(sch->dev);
452 q->perturb_period = 0;
453 q->perturbation = net_random();
455 int err = sfq_change(sch, opt);
460 for (i = 0; i < SFQ_DEPTH; i++)
465 static void sfq_destroy(struct Qdisc *sch)
467 struct sfq_sched_data *q = qdisc_priv(sch);
468 del_timer(&q->perturb_timer);
471 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
473 struct sfq_sched_data *q = qdisc_priv(sch);
474 unsigned char *b = skb_tail_pointer(skb);
475 struct tc_sfq_qopt opt;
477 opt.quantum = q->quantum;
478 opt.perturb_period = q->perturb_period / HZ;
480 opt.limit = q->limit;
481 opt.divisor = SFQ_HASH_DIVISOR;
482 opt.flows = q->limit;
484 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
493 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
497 .priv_size = sizeof(struct sfq_sched_data),
498 .enqueue = sfq_enqueue,
499 .dequeue = sfq_dequeue,
500 .requeue = sfq_requeue,
504 .destroy = sfq_destroy,
507 .owner = THIS_MODULE,
510 static int __init sfq_module_init(void)
512 return register_qdisc(&sfq_qdisc_ops);
514 static void __exit sfq_module_exit(void)
516 unregister_qdisc(&sfq_qdisc_ops);
518 module_init(sfq_module_init)
519 module_exit(sfq_module_exit)
520 MODULE_LICENSE("GPL");