2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
28 /* Network Emulation Queuing algorithm.
29 ====================================
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
35 ----------------------------------------------------------------
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
51 struct netem_sched_data {
53 struct qdisc_watchdog watchdog;
55 psched_tdiff_t latency;
56 psched_tdiff_t jitter;
69 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
77 /* Time stamp put into socket buffer control block */
79 psched_time_t time_to_send;
82 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
84 BUILD_BUG_ON(sizeof(skb->cb) <
85 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
86 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
89 /* init_crandom - initialize correlated random number generator
90 * Use entropy source for initial seed.
92 static void init_crandom(struct crndstate *state, unsigned long rho)
95 state->last = net_random();
98 /* get_crandom - correlated random number generator
99 * Next number depends on last value.
100 * rho is scaled to avoid floating point.
102 static u32 get_crandom(struct crndstate *state)
105 unsigned long answer;
107 if (state->rho == 0) /* no correlation */
110 value = net_random();
111 rho = (u64)state->rho + 1;
112 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
113 state->last = answer;
117 /* tabledist - return a pseudo-randomly distributed value with mean mu and
118 * std deviation sigma. Uses table lookup to approximate the desired
119 * distribution, and a uniformly-distributed pseudo-random source.
121 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
122 struct crndstate *state,
123 const struct disttable *dist)
132 rnd = get_crandom(state);
134 /* default uniform distribution */
136 return (rnd % (2*sigma)) - sigma + mu;
138 t = dist->table[rnd % dist->size];
139 x = (sigma % NETEM_DIST_SCALE) * t;
141 x += NETEM_DIST_SCALE/2;
143 x -= NETEM_DIST_SCALE/2;
145 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149 * Insert one skb into qdisc.
150 * Note: parent depends on return value to account for queue length.
151 * NET_XMIT_DROP: queue length didn't change.
152 * NET_XMIT_SUCCESS: one skb was queued.
154 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
156 struct netem_sched_data *q = qdisc_priv(sch);
157 /* We don't fill cb now as skb_unshare() may invalidate it */
158 struct netem_skb_cb *cb;
159 struct sk_buff *skb2;
163 pr_debug("netem_enqueue skb=%p\n", skb);
165 /* Random duplication */
166 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
169 /* Random packet drop 0 => none, ~0 => all */
170 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
176 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
182 * If we need to duplicate packet, then re-insert at top of the
183 * qdisc tree, since parent queuer expects that only one
184 * skb will be queued.
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
187 struct Qdisc *rootq = qdisc_root(sch);
188 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
191 qdisc_enqueue_root(skb2, rootq);
192 q->duplicate = dupsave;
196 * Randomized packet corruption.
197 * Make copy if needed since we are modifying
198 * If packet is going to be hardware checksummed, then
199 * do it now in software before we mangle it.
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
202 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
203 || (skb->ip_summed == CHECKSUM_PARTIAL
204 && skb_checksum_help(skb))) {
206 return NET_XMIT_DROP;
209 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
212 cb = netem_skb_cb(skb);
213 if (q->gap == 0 /* not doing reordering */
214 || q->counter < q->gap /* inside last reordering gap */
215 || q->reorder < get_crandom(&q->reorder_cor)) {
217 psched_tdiff_t delay;
219 delay = tabledist(q->latency, q->jitter,
220 &q->delay_cor, q->delay_dist);
222 now = psched_get_time();
223 cb->time_to_send = now + delay;
225 ret = qdisc_enqueue(skb, q->qdisc);
228 * Do re-ordering by putting one out of N packets at the front
231 cb->time_to_send = psched_get_time();
234 __skb_queue_head(&q->qdisc->q, skb);
235 q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
236 q->qdisc->qstats.requeues++;
237 ret = NET_XMIT_SUCCESS;
240 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->bstats.bytes += qdisc_pkt_len(skb);
243 sch->bstats.packets++;
244 } else if (net_xmit_drop_count(ret)) {
248 pr_debug("netem: enqueue ret %d\n", ret);
252 static unsigned int netem_drop(struct Qdisc* sch)
254 struct netem_sched_data *q = qdisc_priv(sch);
255 unsigned int len = 0;
257 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
264 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
266 struct netem_sched_data *q = qdisc_priv(sch);
269 if (sch->flags & TCQ_F_THROTTLED)
272 skb = q->qdisc->ops->peek(q->qdisc);
274 const struct netem_skb_cb *cb = netem_skb_cb(skb);
275 psched_time_t now = psched_get_time();
277 /* if more time remaining? */
278 if (cb->time_to_send <= now) {
279 skb = qdisc_dequeue_peeked(q->qdisc);
283 #ifdef CONFIG_NET_CLS_ACT
285 * If it's at ingress let's pretend the delay is
286 * from the network (tstamp will be updated).
288 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
289 skb->tstamp.tv64 = 0;
291 pr_debug("netem_dequeue: return skb=%p\n", skb);
296 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
302 static void netem_reset(struct Qdisc *sch)
304 struct netem_sched_data *q = qdisc_priv(sch);
306 qdisc_reset(q->qdisc);
308 qdisc_watchdog_cancel(&q->watchdog);
312 * Distribution data is a variable size payload containing
313 * signed 16 bit values.
315 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
317 struct netem_sched_data *q = qdisc_priv(sch);
318 unsigned long n = nla_len(attr)/sizeof(__s16);
319 const __s16 *data = nla_data(attr);
320 spinlock_t *root_lock;
327 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
332 for (i = 0; i < n; i++)
333 d->table[i] = data[i];
335 root_lock = qdisc_root_sleeping_lock(sch);
337 spin_lock_bh(root_lock);
338 kfree(q->delay_dist);
340 spin_unlock_bh(root_lock);
344 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
346 struct netem_sched_data *q = qdisc_priv(sch);
347 const struct tc_netem_corr *c = nla_data(attr);
349 init_crandom(&q->delay_cor, c->delay_corr);
350 init_crandom(&q->loss_cor, c->loss_corr);
351 init_crandom(&q->dup_cor, c->dup_corr);
354 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
356 struct netem_sched_data *q = qdisc_priv(sch);
357 const struct tc_netem_reorder *r = nla_data(attr);
359 q->reorder = r->probability;
360 init_crandom(&q->reorder_cor, r->correlation);
363 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
365 struct netem_sched_data *q = qdisc_priv(sch);
366 const struct tc_netem_corrupt *r = nla_data(attr);
368 q->corrupt = r->probability;
369 init_crandom(&q->corrupt_cor, r->correlation);
372 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
373 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
374 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
375 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
378 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
379 const struct nla_policy *policy, int len)
381 int nested_len = nla_len(nla) - NLA_ALIGN(len);
385 if (nested_len >= nla_attr_size(0))
386 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
388 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
392 /* Parse netlink message to set options */
393 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
395 struct netem_sched_data *q = qdisc_priv(sch);
396 struct nlattr *tb[TCA_NETEM_MAX + 1];
397 struct tc_netem_qopt *qopt;
403 qopt = nla_data(opt);
404 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
408 ret = fifo_set_limit(q->qdisc, qopt->limit);
410 pr_debug("netem: can't set fifo limit\n");
414 q->latency = qopt->latency;
415 q->jitter = qopt->jitter;
416 q->limit = qopt->limit;
419 q->loss = qopt->loss;
420 q->duplicate = qopt->duplicate;
422 /* for compatibility with earlier versions.
423 * if gap is set, need to assume 100% probability
428 if (tb[TCA_NETEM_CORR])
429 get_correlation(sch, tb[TCA_NETEM_CORR]);
431 if (tb[TCA_NETEM_DELAY_DIST]) {
432 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
437 if (tb[TCA_NETEM_REORDER])
438 get_reorder(sch, tb[TCA_NETEM_REORDER]);
440 if (tb[TCA_NETEM_CORRUPT])
441 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
447 * Special case version of FIFO queue for use by netem.
448 * It queues in order based on timestamps in skb's
450 struct fifo_sched_data {
452 psched_time_t oldest;
455 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
457 struct fifo_sched_data *q = qdisc_priv(sch);
458 struct sk_buff_head *list = &sch->q;
459 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
462 if (likely(skb_queue_len(list) < q->limit)) {
463 /* Optimize for add at tail */
464 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
466 return qdisc_enqueue_tail(nskb, sch);
469 skb_queue_reverse_walk(list, skb) {
470 const struct netem_skb_cb *cb = netem_skb_cb(skb);
472 if (tnext >= cb->time_to_send)
476 __skb_queue_after(list, skb, nskb);
478 sch->qstats.backlog += qdisc_pkt_len(nskb);
479 sch->bstats.bytes += qdisc_pkt_len(nskb);
480 sch->bstats.packets++;
482 return NET_XMIT_SUCCESS;
485 return qdisc_reshape_fail(nskb, sch);
488 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
490 struct fifo_sched_data *q = qdisc_priv(sch);
493 struct tc_fifo_qopt *ctl = nla_data(opt);
494 if (nla_len(opt) < sizeof(*ctl))
497 q->limit = ctl->limit;
499 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
501 q->oldest = PSCHED_PASTPERFECT;
505 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
507 struct fifo_sched_data *q = qdisc_priv(sch);
508 struct tc_fifo_qopt opt = { .limit = q->limit };
510 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
517 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
519 .priv_size = sizeof(struct fifo_sched_data),
520 .enqueue = tfifo_enqueue,
521 .dequeue = qdisc_dequeue_head,
522 .peek = qdisc_peek_head,
523 .drop = qdisc_queue_drop,
525 .reset = qdisc_reset_queue,
526 .change = tfifo_init,
530 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
532 struct netem_sched_data *q = qdisc_priv(sch);
538 qdisc_watchdog_init(&q->watchdog, sch);
540 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
542 TC_H_MAKE(sch->handle, 1));
544 pr_debug("netem: qdisc create failed\n");
548 ret = netem_change(sch, opt);
550 pr_debug("netem: change failed\n");
551 qdisc_destroy(q->qdisc);
556 static void netem_destroy(struct Qdisc *sch)
558 struct netem_sched_data *q = qdisc_priv(sch);
560 qdisc_watchdog_cancel(&q->watchdog);
561 qdisc_destroy(q->qdisc);
562 kfree(q->delay_dist);
565 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
567 const struct netem_sched_data *q = qdisc_priv(sch);
568 unsigned char *b = skb_tail_pointer(skb);
569 struct nlattr *nla = (struct nlattr *) b;
570 struct tc_netem_qopt qopt;
571 struct tc_netem_corr cor;
572 struct tc_netem_reorder reorder;
573 struct tc_netem_corrupt corrupt;
575 qopt.latency = q->latency;
576 qopt.jitter = q->jitter;
577 qopt.limit = q->limit;
580 qopt.duplicate = q->duplicate;
581 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
583 cor.delay_corr = q->delay_cor.rho;
584 cor.loss_corr = q->loss_cor.rho;
585 cor.dup_corr = q->dup_cor.rho;
586 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
588 reorder.probability = q->reorder;
589 reorder.correlation = q->reorder_cor.rho;
590 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
592 corrupt.probability = q->corrupt;
593 corrupt.correlation = q->corrupt_cor.rho;
594 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
596 nla->nla_len = skb_tail_pointer(skb) - b;
605 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
607 .priv_size = sizeof(struct netem_sched_data),
608 .enqueue = netem_enqueue,
609 .dequeue = netem_dequeue,
610 .peek = qdisc_peek_dequeued,
613 .reset = netem_reset,
614 .destroy = netem_destroy,
615 .change = netem_change,
617 .owner = THIS_MODULE,
621 static int __init netem_module_init(void)
623 pr_info("netem: version " VERSION "\n");
624 return register_qdisc(&netem_qdisc_ops);
626 static void __exit netem_module_exit(void)
628 unregister_qdisc(&netem_qdisc_ops);
630 module_init(netem_module_init)
631 module_exit(netem_module_exit)
632 MODULE_LICENSE("GPL");