2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
30 /* Network Emulation Queuing algorithm.
31 ====================================
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
37 ----------------------------------------------------------------
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
56 struct netem_sched_data {
58 struct qdisc_watchdog watchdog;
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
82 /* Time stamp put into socket buffer control block */
84 psched_time_t time_to_send;
87 /* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
90 static void init_crandom(struct crndstate *state, unsigned long rho)
93 state->last = net_random();
96 /* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
100 static u32 get_crandom(struct crndstate *state)
103 unsigned long answer;
105 if (state->rho == 0) /* no correllation */
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
115 /* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
119 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
130 rnd = get_crandom(state);
132 /* default uniform distribution */
134 return (rnd % (2*sigma)) - sigma + mu;
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
139 x += NETEM_DIST_SCALE/2;
141 x -= NETEM_DIST_SCALE/2;
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
152 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
154 struct netem_sched_data *q = qdisc_priv(sch);
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
157 struct sk_buff *skb2;
161 pr_debug("netem_enqueue skb=%p\n", skb);
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
167 /* Random packet drop 0 => none, ~0 => all */
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 return NET_XMIT_BYPASS;
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
204 return NET_XMIT_DROP;
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 cb = (struct netem_skb_cb *)skb->cb;
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
215 psched_tdiff_t delay;
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
220 PSCHED_GET_TIME(now);
221 PSCHED_TADD2(now, delay, cb->time_to_send);
223 ret = q->qdisc->enqueue(skb, q->qdisc);
226 * Do re-ordering by putting one out of N packets at the front
229 PSCHED_GET_TIME(cb->time_to_send);
231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
234 if (likely(ret == NET_XMIT_SUCCESS)) {
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
241 pr_debug("netem: enqueue ret %d\n", ret);
245 /* Requeue packets but don't change time stamp */
246 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
248 struct netem_sched_data *q = qdisc_priv(sch);
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
253 sch->qstats.requeues++;
259 static unsigned int netem_drop(struct Qdisc* sch)
261 struct netem_sched_data *q = qdisc_priv(sch);
262 unsigned int len = 0;
264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
271 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
273 struct netem_sched_data *q = qdisc_priv(sch);
277 if (sch->flags & TCQ_F_THROTTLED)
280 skb = q->qdisc->dequeue(q->qdisc);
282 const struct netem_skb_cb *cb
283 = (const struct netem_skb_cb *)skb->cb;
286 /* if more time remaining? */
287 PSCHED_GET_TIME(now);
289 if (PSCHED_TLESS(cb->time_to_send, now)) {
290 pr_debug("netem_dequeue: return skb=%p\n", skb);
295 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
296 qdisc_tree_decrease_qlen(q->qdisc, 1);
298 printk(KERN_ERR "netem: %s could not requeue\n",
302 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
308 static void netem_reset(struct Qdisc *sch)
310 struct netem_sched_data *q = qdisc_priv(sch);
312 qdisc_reset(q->qdisc);
314 qdisc_watchdog_cancel(&q->watchdog);
317 /* Pass size change message down to embedded FIFO */
318 static int set_fifo_limit(struct Qdisc *q, int limit)
323 /* Hack to avoid sending change message to non-FIFO */
324 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
327 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
329 rta->rta_type = RTM_NEWQDISC;
330 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
331 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
333 ret = q->ops->change(q, rta);
340 * Distribution data is a variable size payload containing
341 * signed 16 bit values.
343 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
345 struct netem_sched_data *q = qdisc_priv(sch);
346 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
347 const __s16 *data = RTA_DATA(attr);
354 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
359 for (i = 0; i < n; i++)
360 d->table[i] = data[i];
362 spin_lock_bh(&sch->dev->queue_lock);
363 d = xchg(&q->delay_dist, d);
364 spin_unlock_bh(&sch->dev->queue_lock);
370 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
372 struct netem_sched_data *q = qdisc_priv(sch);
373 const struct tc_netem_corr *c = RTA_DATA(attr);
375 if (RTA_PAYLOAD(attr) != sizeof(*c))
378 init_crandom(&q->delay_cor, c->delay_corr);
379 init_crandom(&q->loss_cor, c->loss_corr);
380 init_crandom(&q->dup_cor, c->dup_corr);
384 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
386 struct netem_sched_data *q = qdisc_priv(sch);
387 const struct tc_netem_reorder *r = RTA_DATA(attr);
389 if (RTA_PAYLOAD(attr) != sizeof(*r))
392 q->reorder = r->probability;
393 init_crandom(&q->reorder_cor, r->correlation);
397 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
399 struct netem_sched_data *q = qdisc_priv(sch);
400 const struct tc_netem_corrupt *r = RTA_DATA(attr);
402 if (RTA_PAYLOAD(attr) != sizeof(*r))
405 q->corrupt = r->probability;
406 init_crandom(&q->corrupt_cor, r->correlation);
410 /* Parse netlink message to set options */
411 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
413 struct netem_sched_data *q = qdisc_priv(sch);
414 struct tc_netem_qopt *qopt;
417 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
420 qopt = RTA_DATA(opt);
421 ret = set_fifo_limit(q->qdisc, qopt->limit);
423 pr_debug("netem: can't set fifo limit\n");
427 q->latency = qopt->latency;
428 q->jitter = qopt->jitter;
429 q->limit = qopt->limit;
432 q->loss = qopt->loss;
433 q->duplicate = qopt->duplicate;
435 /* for compatiablity with earlier versions.
436 * if gap is set, need to assume 100% probablity
441 /* Handle nested options after initial queue options.
442 * Should have put all options in nested format but too late now.
444 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
445 struct rtattr *tb[TCA_NETEM_MAX];
446 if (rtattr_parse(tb, TCA_NETEM_MAX,
447 RTA_DATA(opt) + sizeof(*qopt),
448 RTA_PAYLOAD(opt) - sizeof(*qopt)))
451 if (tb[TCA_NETEM_CORR-1]) {
452 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
457 if (tb[TCA_NETEM_DELAY_DIST-1]) {
458 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
463 if (tb[TCA_NETEM_REORDER-1]) {
464 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
469 if (tb[TCA_NETEM_CORRUPT-1]) {
470 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
480 * Special case version of FIFO queue for use by netem.
481 * It queues in order based on timestamps in skb's
483 struct fifo_sched_data {
485 psched_time_t oldest;
488 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
490 struct fifo_sched_data *q = qdisc_priv(sch);
491 struct sk_buff_head *list = &sch->q;
492 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
495 if (likely(skb_queue_len(list) < q->limit)) {
496 /* Optimize for add at tail */
497 if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) {
499 return qdisc_enqueue_tail(nskb, sch);
502 skb_queue_reverse_walk(list, skb) {
503 const struct netem_skb_cb *cb
504 = (const struct netem_skb_cb *)skb->cb;
506 if (!PSCHED_TLESS(tnext, cb->time_to_send))
510 __skb_queue_after(list, skb, nskb);
512 sch->qstats.backlog += nskb->len;
513 sch->bstats.bytes += nskb->len;
514 sch->bstats.packets++;
516 return NET_XMIT_SUCCESS;
519 return qdisc_reshape_fail(nskb, sch);
522 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
524 struct fifo_sched_data *q = qdisc_priv(sch);
527 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
528 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
531 q->limit = ctl->limit;
533 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
535 PSCHED_SET_PASTPERFECT(q->oldest);
539 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
541 struct fifo_sched_data *q = qdisc_priv(sch);
542 struct tc_fifo_qopt opt = { .limit = q->limit };
544 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
551 static struct Qdisc_ops tfifo_qdisc_ops = {
553 .priv_size = sizeof(struct fifo_sched_data),
554 .enqueue = tfifo_enqueue,
555 .dequeue = qdisc_dequeue_head,
556 .requeue = qdisc_requeue,
557 .drop = qdisc_queue_drop,
559 .reset = qdisc_reset_queue,
560 .change = tfifo_init,
564 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
566 struct netem_sched_data *q = qdisc_priv(sch);
572 qdisc_watchdog_init(&q->watchdog, sch);
574 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
575 TC_H_MAKE(sch->handle, 1));
577 pr_debug("netem: qdisc create failed\n");
581 ret = netem_change(sch, opt);
583 pr_debug("netem: change failed\n");
584 qdisc_destroy(q->qdisc);
589 static void netem_destroy(struct Qdisc *sch)
591 struct netem_sched_data *q = qdisc_priv(sch);
593 qdisc_watchdog_cancel(&q->watchdog);
594 qdisc_destroy(q->qdisc);
595 kfree(q->delay_dist);
598 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
600 const struct netem_sched_data *q = qdisc_priv(sch);
601 unsigned char *b = skb_tail_pointer(skb);
602 struct rtattr *rta = (struct rtattr *) b;
603 struct tc_netem_qopt qopt;
604 struct tc_netem_corr cor;
605 struct tc_netem_reorder reorder;
606 struct tc_netem_corrupt corrupt;
608 qopt.latency = q->latency;
609 qopt.jitter = q->jitter;
610 qopt.limit = q->limit;
613 qopt.duplicate = q->duplicate;
614 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
616 cor.delay_corr = q->delay_cor.rho;
617 cor.loss_corr = q->loss_cor.rho;
618 cor.dup_corr = q->dup_cor.rho;
619 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
621 reorder.probability = q->reorder;
622 reorder.correlation = q->reorder_cor.rho;
623 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
625 corrupt.probability = q->corrupt;
626 corrupt.correlation = q->corrupt_cor.rho;
627 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
629 rta->rta_len = skb_tail_pointer(skb) - b;
638 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
639 struct sk_buff *skb, struct tcmsg *tcm)
641 struct netem_sched_data *q = qdisc_priv(sch);
643 if (cl != 1) /* only one class */
646 tcm->tcm_handle |= TC_H_MIN(1);
647 tcm->tcm_info = q->qdisc->handle;
652 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
655 struct netem_sched_data *q = qdisc_priv(sch);
661 *old = xchg(&q->qdisc, new);
662 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
664 sch_tree_unlock(sch);
669 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
671 struct netem_sched_data *q = qdisc_priv(sch);
675 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
680 static void netem_put(struct Qdisc *sch, unsigned long arg)
684 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
685 struct rtattr **tca, unsigned long *arg)
690 static int netem_delete(struct Qdisc *sch, unsigned long arg)
695 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
698 if (walker->count >= walker->skip)
699 if (walker->fn(sch, 1, walker) < 0) {
707 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
712 static struct Qdisc_class_ops netem_class_ops = {
713 .graft = netem_graft,
717 .change = netem_change_class,
718 .delete = netem_delete,
720 .tcf_chain = netem_find_tcf,
721 .dump = netem_dump_class,
724 static struct Qdisc_ops netem_qdisc_ops = {
726 .cl_ops = &netem_class_ops,
727 .priv_size = sizeof(struct netem_sched_data),
728 .enqueue = netem_enqueue,
729 .dequeue = netem_dequeue,
730 .requeue = netem_requeue,
733 .reset = netem_reset,
734 .destroy = netem_destroy,
735 .change = netem_change,
737 .owner = THIS_MODULE,
741 static int __init netem_module_init(void)
743 pr_info("netem: version " VERSION "\n");
744 return register_qdisc(&netem_qdisc_ops);
746 static void __exit netem_module_exit(void)
748 unregister_qdisc(&netem_qdisc_ops);
750 module_init(netem_module_init)
751 module_exit(netem_module_exit)
752 MODULE_LICENSE("GPL");