2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
25 #include <net/pkt_sched.h>
29 /* Network Emulation Queuing algorithm.
30 ====================================
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
36 ----------------------------------------------------------------
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
51 The simulator is limited by the Linux timer resolution
52 and will create packet bursts on the HZ boundary (1ms).
55 struct netem_sched_data {
57 struct timer_list timer;
72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
80 /* Time stamp put into socket buffer control block */
82 psched_time_t time_to_send;
85 /* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
88 static void init_crandom(struct crndstate *state, unsigned long rho)
91 state->last = net_random();
94 /* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
98 static unsigned long get_crandom(struct crndstate *state)
101 unsigned long answer;
103 if (state->rho == 0) /* no correllation */
106 value = net_random();
107 rho = (u64)state->rho + 1;
108 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 state->last = answer;
113 /* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
117 static long tabledist(unsigned long mu, long sigma,
118 struct crndstate *state, const struct disttable *dist)
126 rnd = get_crandom(state);
128 /* default uniform distribution */
130 return (rnd % (2*sigma)) - sigma + mu;
132 t = dist->table[rnd % dist->size];
133 x = (sigma % NETEM_DIST_SCALE) * t;
135 x += NETEM_DIST_SCALE/2;
137 x -= NETEM_DIST_SCALE/2;
139 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
143 * Insert one skb into qdisc.
144 * Note: parent depends on return value to account for queue length.
145 * NET_XMIT_DROP: queue length didn't change.
146 * NET_XMIT_SUCCESS: one skb was queued.
148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
150 struct netem_sched_data *q = qdisc_priv(sch);
151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
153 struct sk_buff *skb2;
157 pr_debug("netem_enqueue skb=%p\n", skb);
159 /* Random duplication */
160 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
163 /* Random packet drop 0 => none, ~0 => all */
164 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
170 return NET_XMIT_BYPASS;
176 * If we need to duplicate packet, then re-insert at top of the
177 * qdisc tree, since parent queuer expects that only one
178 * skb will be queued.
180 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
181 struct Qdisc *rootq = sch->dev->qdisc;
182 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 rootq->enqueue(skb2, rootq);
186 q->duplicate = dupsave;
190 * Randomized packet corruption.
191 * Make copy if needed since we are modifying
192 * If packet is going to be hardware checksummed, then
193 * do it now in software before we mangle it.
195 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
196 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
197 || (skb->ip_summed == CHECKSUM_PARTIAL
198 && skb_checksum_help(skb))) {
200 return NET_XMIT_DROP;
203 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 cb = (struct netem_skb_cb *)skb->cb;
207 if (q->gap == 0 /* not doing reordering */
208 || q->counter < q->gap /* inside last reordering gap */
209 || q->reorder < get_crandom(&q->reorder_cor)) {
211 psched_tdiff_t delay;
213 delay = tabledist(q->latency, q->jitter,
214 &q->delay_cor, q->delay_dist);
216 PSCHED_GET_TIME(now);
217 PSCHED_TADD2(now, delay, cb->time_to_send);
219 ret = q->qdisc->enqueue(skb, q->qdisc);
222 * Do re-ordering by putting one out of N packets at the front
225 PSCHED_GET_TIME(cb->time_to_send);
227 ret = q->qdisc->ops->requeue(skb, q->qdisc);
230 if (likely(ret == NET_XMIT_SUCCESS)) {
232 sch->bstats.bytes += skb->len;
233 sch->bstats.packets++;
237 pr_debug("netem: enqueue ret %d\n", ret);
241 /* Requeue packets but don't change time stamp */
242 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
244 struct netem_sched_data *q = qdisc_priv(sch);
247 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
249 sch->qstats.requeues++;
255 static unsigned int netem_drop(struct Qdisc* sch)
257 struct netem_sched_data *q = qdisc_priv(sch);
258 unsigned int len = 0;
260 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
267 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
269 struct netem_sched_data *q = qdisc_priv(sch);
272 skb = q->qdisc->dequeue(q->qdisc);
274 const struct netem_skb_cb *cb
275 = (const struct netem_skb_cb *)skb->cb;
278 /* if more time remaining? */
279 PSCHED_GET_TIME(now);
281 if (PSCHED_TLESS(cb->time_to_send, now)) {
282 pr_debug("netem_dequeue: return skb=%p\n", skb);
284 sch->flags &= ~TCQ_F_THROTTLED;
287 psched_tdiff_t delay = PSCHED_TDIFF(cb->time_to_send, now);
289 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
290 qdisc_tree_decrease_qlen(q->qdisc, 1);
292 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
296 mod_timer(&q->timer, jiffies + PSCHED_US2JIFFIE(delay));
297 sch->flags |= TCQ_F_THROTTLED;
304 static void netem_watchdog(unsigned long arg)
306 struct Qdisc *sch = (struct Qdisc *)arg;
308 pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen);
309 sch->flags &= ~TCQ_F_THROTTLED;
310 netif_schedule(sch->dev);
313 static void netem_reset(struct Qdisc *sch)
315 struct netem_sched_data *q = qdisc_priv(sch);
317 qdisc_reset(q->qdisc);
319 sch->flags &= ~TCQ_F_THROTTLED;
320 del_timer_sync(&q->timer);
323 /* Pass size change message down to embedded FIFO */
324 static int set_fifo_limit(struct Qdisc *q, int limit)
329 /* Hack to avoid sending change message to non-FIFO */
330 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
333 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
335 rta->rta_type = RTM_NEWQDISC;
336 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
337 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
339 ret = q->ops->change(q, rta);
346 * Distribution data is a variable size payload containing
347 * signed 16 bit values.
349 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
351 struct netem_sched_data *q = qdisc_priv(sch);
352 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
353 const __s16 *data = RTA_DATA(attr);
360 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
365 for (i = 0; i < n; i++)
366 d->table[i] = data[i];
368 spin_lock_bh(&sch->dev->queue_lock);
369 d = xchg(&q->delay_dist, d);
370 spin_unlock_bh(&sch->dev->queue_lock);
376 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
378 struct netem_sched_data *q = qdisc_priv(sch);
379 const struct tc_netem_corr *c = RTA_DATA(attr);
381 if (RTA_PAYLOAD(attr) != sizeof(*c))
384 init_crandom(&q->delay_cor, c->delay_corr);
385 init_crandom(&q->loss_cor, c->loss_corr);
386 init_crandom(&q->dup_cor, c->dup_corr);
390 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
392 struct netem_sched_data *q = qdisc_priv(sch);
393 const struct tc_netem_reorder *r = RTA_DATA(attr);
395 if (RTA_PAYLOAD(attr) != sizeof(*r))
398 q->reorder = r->probability;
399 init_crandom(&q->reorder_cor, r->correlation);
403 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
405 struct netem_sched_data *q = qdisc_priv(sch);
406 const struct tc_netem_corrupt *r = RTA_DATA(attr);
408 if (RTA_PAYLOAD(attr) != sizeof(*r))
411 q->corrupt = r->probability;
412 init_crandom(&q->corrupt_cor, r->correlation);
416 /* Parse netlink message to set options */
417 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
419 struct netem_sched_data *q = qdisc_priv(sch);
420 struct tc_netem_qopt *qopt;
423 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
426 qopt = RTA_DATA(opt);
427 ret = set_fifo_limit(q->qdisc, qopt->limit);
429 pr_debug("netem: can't set fifo limit\n");
433 q->latency = qopt->latency;
434 q->jitter = qopt->jitter;
435 q->limit = qopt->limit;
438 q->loss = qopt->loss;
439 q->duplicate = qopt->duplicate;
441 /* for compatiablity with earlier versions.
442 * if gap is set, need to assume 100% probablity
446 /* Handle nested options after initial queue options.
447 * Should have put all options in nested format but too late now.
449 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
450 struct rtattr *tb[TCA_NETEM_MAX];
451 if (rtattr_parse(tb, TCA_NETEM_MAX,
452 RTA_DATA(opt) + sizeof(*qopt),
453 RTA_PAYLOAD(opt) - sizeof(*qopt)))
456 if (tb[TCA_NETEM_CORR-1]) {
457 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
462 if (tb[TCA_NETEM_DELAY_DIST-1]) {
463 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
468 if (tb[TCA_NETEM_REORDER-1]) {
469 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
474 if (tb[TCA_NETEM_CORRUPT-1]) {
475 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
485 * Special case version of FIFO queue for use by netem.
486 * It queues in order based on timestamps in skb's
488 struct fifo_sched_data {
492 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
494 struct fifo_sched_data *q = qdisc_priv(sch);
495 struct sk_buff_head *list = &sch->q;
496 const struct netem_skb_cb *ncb
497 = (const struct netem_skb_cb *)nskb->cb;
500 if (likely(skb_queue_len(list) < q->limit)) {
501 skb_queue_reverse_walk(list, skb) {
502 const struct netem_skb_cb *cb
503 = (const struct netem_skb_cb *)skb->cb;
505 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
509 __skb_queue_after(list, skb, nskb);
511 sch->qstats.backlog += nskb->len;
512 sch->bstats.bytes += nskb->len;
513 sch->bstats.packets++;
515 return NET_XMIT_SUCCESS;
518 return qdisc_drop(nskb, sch);
521 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
523 struct fifo_sched_data *q = qdisc_priv(sch);
526 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
527 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
530 q->limit = ctl->limit;
532 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
537 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
539 struct fifo_sched_data *q = qdisc_priv(sch);
540 struct tc_fifo_qopt opt = { .limit = q->limit };
542 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
549 static struct Qdisc_ops tfifo_qdisc_ops = {
551 .priv_size = sizeof(struct fifo_sched_data),
552 .enqueue = tfifo_enqueue,
553 .dequeue = qdisc_dequeue_head,
554 .requeue = qdisc_requeue,
555 .drop = qdisc_queue_drop,
557 .reset = qdisc_reset_queue,
558 .change = tfifo_init,
562 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
564 struct netem_sched_data *q = qdisc_priv(sch);
570 init_timer(&q->timer);
571 q->timer.function = netem_watchdog;
572 q->timer.data = (unsigned long) sch;
574 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
575 TC_H_MAKE(sch->handle, 1));
577 pr_debug("netem: qdisc create failed\n");
581 ret = netem_change(sch, opt);
583 pr_debug("netem: change failed\n");
584 qdisc_destroy(q->qdisc);
589 static void netem_destroy(struct Qdisc *sch)
591 struct netem_sched_data *q = qdisc_priv(sch);
593 del_timer_sync(&q->timer);
594 qdisc_destroy(q->qdisc);
595 kfree(q->delay_dist);
598 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
600 const struct netem_sched_data *q = qdisc_priv(sch);
601 unsigned char *b = skb->tail;
602 struct rtattr *rta = (struct rtattr *) b;
603 struct tc_netem_qopt qopt;
604 struct tc_netem_corr cor;
605 struct tc_netem_reorder reorder;
606 struct tc_netem_corrupt corrupt;
608 qopt.latency = q->latency;
609 qopt.jitter = q->jitter;
610 qopt.limit = q->limit;
613 qopt.duplicate = q->duplicate;
614 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
616 cor.delay_corr = q->delay_cor.rho;
617 cor.loss_corr = q->loss_cor.rho;
618 cor.dup_corr = q->dup_cor.rho;
619 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
621 reorder.probability = q->reorder;
622 reorder.correlation = q->reorder_cor.rho;
623 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
625 corrupt.probability = q->corrupt;
626 corrupt.correlation = q->corrupt_cor.rho;
627 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
629 rta->rta_len = skb->tail - b;
634 skb_trim(skb, b - skb->data);
638 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
639 struct sk_buff *skb, struct tcmsg *tcm)
641 struct netem_sched_data *q = qdisc_priv(sch);
643 if (cl != 1) /* only one class */
646 tcm->tcm_handle |= TC_H_MIN(1);
647 tcm->tcm_info = q->qdisc->handle;
652 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
655 struct netem_sched_data *q = qdisc_priv(sch);
661 *old = xchg(&q->qdisc, new);
662 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
664 sch_tree_unlock(sch);
669 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
671 struct netem_sched_data *q = qdisc_priv(sch);
675 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
680 static void netem_put(struct Qdisc *sch, unsigned long arg)
684 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
685 struct rtattr **tca, unsigned long *arg)
690 static int netem_delete(struct Qdisc *sch, unsigned long arg)
695 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
698 if (walker->count >= walker->skip)
699 if (walker->fn(sch, 1, walker) < 0) {
707 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
712 static struct Qdisc_class_ops netem_class_ops = {
713 .graft = netem_graft,
717 .change = netem_change_class,
718 .delete = netem_delete,
720 .tcf_chain = netem_find_tcf,
721 .dump = netem_dump_class,
724 static struct Qdisc_ops netem_qdisc_ops = {
726 .cl_ops = &netem_class_ops,
727 .priv_size = sizeof(struct netem_sched_data),
728 .enqueue = netem_enqueue,
729 .dequeue = netem_dequeue,
730 .requeue = netem_requeue,
733 .reset = netem_reset,
734 .destroy = netem_destroy,
735 .change = netem_change,
737 .owner = THIS_MODULE,
741 static int __init netem_module_init(void)
743 pr_info("netem: version " VERSION "\n");
744 return register_qdisc(&netem_qdisc_ops);
746 static void __exit netem_module_exit(void)
748 unregister_qdisc(&netem_qdisc_ops);
750 module_init(netem_module_init)
751 module_exit(netem_module_exit)
752 MODULE_LICENSE("GPL");