1 /* net/sched/sch_teql.c "True" (or "trivial") link equalizer.
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 #include <linux/module.h>
12 #include <asm/uaccess.h>
13 #include <asm/system.h>
14 #include <linux/bitops.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/string.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/if_ether.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/notifier.h>
30 #include <linux/init.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
34 #include <linux/moduleparam.h>
36 #include <net/pkt_sched.h>
42 After loading this module you will find a new device teqlN
43 and new qdisc with the same name. To join a slave to the equalizer
44 you should just set this qdisc on a device f.e.
46 # tc qdisc add dev eth0 root teql0
47 # tc qdisc add dev eth1 root teql0
49 That's all. Full PnP 8)
54 1. Slave devices MUST be active devices, i.e., they must raise the tbusy
55 signal and generate EOI events. If you want to equalize virtual devices
56 like tunnels, use a normal eql device.
57 2. This device puts no limitations on physical slave characteristics
58 f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
59 Certainly, large difference in link speeds will make the resulting
60 eqalized link unusable, because of huge packet reordering.
61 I estimate an upper useful difference as ~10 times.
62 3. If the slave requires address resolution, only protocols using
63 neighbour cache (IPv4/IPv6) will work over the equalized link.
64 Other protocols are still allowed to use the slave device directly,
65 which will not break load balancing, though native slave
66 traffic will have the highest priority. */
70 struct Qdisc_ops qops;
71 struct net_device *dev;
73 struct list_head master_list;
74 struct net_device_stats stats;
77 struct teql_sched_data
80 struct teql_master *m;
81 struct neighbour *ncache;
82 struct sk_buff_head q;
85 #define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
87 #define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST)
89 /* "teql*" qdisc routines */
92 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 struct net_device *dev = sch->dev;
95 struct teql_sched_data *q = qdisc_priv(sch);
97 __skb_queue_tail(&q->q, skb);
98 if (q->q.qlen <= dev->tx_queue_len) {
99 sch->bstats.bytes += skb->len;
100 sch->bstats.packets++;
104 __skb_unlink(skb, &q->q);
107 return NET_XMIT_DROP;
111 teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 struct teql_sched_data *q = qdisc_priv(sch);
115 __skb_queue_head(&q->q, skb);
116 sch->qstats.requeues++;
120 static struct sk_buff *
121 teql_dequeue(struct Qdisc* sch)
123 struct teql_sched_data *dat = qdisc_priv(sch);
126 skb = __skb_dequeue(&dat->q);
128 struct net_device *m = dat->m->dev->qdisc->dev;
130 dat->m->slaves = sch;
134 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen;
138 static __inline__ void
139 teql_neigh_release(struct neighbour *n)
146 teql_reset(struct Qdisc* sch)
148 struct teql_sched_data *dat = qdisc_priv(sch);
150 skb_queue_purge(&dat->q);
152 teql_neigh_release(xchg(&dat->ncache, NULL));
156 teql_destroy(struct Qdisc* sch)
158 struct Qdisc *q, *prev;
159 struct teql_sched_data *dat = qdisc_priv(sch);
160 struct teql_master *master = dat->m;
162 if ((prev = master->slaves) != NULL) {
164 q = NEXT_SLAVE(prev);
166 NEXT_SLAVE(prev) = NEXT_SLAVE(q);
167 if (q == master->slaves) {
168 master->slaves = NEXT_SLAVE(q);
169 if (q == master->slaves) {
170 master->slaves = NULL;
171 spin_lock_bh(&master->dev->queue_lock);
172 qdisc_reset(master->dev->qdisc);
173 spin_unlock_bh(&master->dev->queue_lock);
176 skb_queue_purge(&dat->q);
177 teql_neigh_release(xchg(&dat->ncache, NULL));
181 } while ((prev = q) != master->slaves);
185 static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt)
187 struct net_device *dev = sch->dev;
188 struct teql_master *m = (struct teql_master*)sch->ops;
189 struct teql_sched_data *q = qdisc_priv(sch);
191 if (dev->hard_header_len > m->dev->hard_header_len)
199 skb_queue_head_init(&q->q);
202 if (m->dev->flags & IFF_UP) {
203 if ((m->dev->flags&IFF_POINTOPOINT && !(dev->flags&IFF_POINTOPOINT))
204 || (m->dev->flags&IFF_BROADCAST && !(dev->flags&IFF_BROADCAST))
205 || (m->dev->flags&IFF_MULTICAST && !(dev->flags&IFF_MULTICAST))
206 || dev->mtu < m->dev->mtu)
209 if (!(dev->flags&IFF_POINTOPOINT))
210 m->dev->flags &= ~IFF_POINTOPOINT;
211 if (!(dev->flags&IFF_BROADCAST))
212 m->dev->flags &= ~IFF_BROADCAST;
213 if (!(dev->flags&IFF_MULTICAST))
214 m->dev->flags &= ~IFF_MULTICAST;
215 if (dev->mtu < m->dev->mtu)
216 m->dev->mtu = dev->mtu;
218 q->next = NEXT_SLAVE(m->slaves);
219 NEXT_SLAVE(m->slaves) = sch;
223 m->dev->mtu = dev->mtu;
224 m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
229 /* "teql*" netdevice routines */
232 __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
234 struct teql_sched_data *q = qdisc_priv(dev->qdisc);
235 struct neighbour *mn = skb->dst->neighbour;
236 struct neighbour *n = q->ncache;
240 if (n && n->tbl == mn->tbl &&
241 memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
242 atomic_inc(&n->refcnt);
244 n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
248 if (neigh_event_send(n, skb_res) == 0) {
251 err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len);
252 read_unlock(&n->lock);
257 teql_neigh_release(xchg(&q->ncache, n));
261 return (skb_res == NULL) ? -EAGAIN : 1;
264 static __inline__ int
265 teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
267 if (dev->hard_header == NULL ||
269 skb->dst->neighbour == NULL)
271 return __teql_resolve(skb, skb_res, dev);
274 static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
276 struct teql_master *master = (void*)dev->priv;
277 struct Qdisc *start, *q;
281 struct sk_buff *skb_res = NULL;
283 start = master->slaves;
289 if ((q = start) == NULL)
293 struct net_device *slave = q->dev;
295 if (slave->qdisc_sleeping != q)
297 if (netif_queue_stopped(slave) || ! netif_running(slave)) {
302 switch (teql_resolve(skb, skb_res, slave)) {
304 if (spin_trylock(&slave->xmit_lock)) {
305 slave->xmit_lock_owner = smp_processor_id();
306 if (!netif_queue_stopped(slave) &&
307 slave->hard_start_xmit(skb, slave) == 0) {
308 slave->xmit_lock_owner = -1;
309 spin_unlock(&slave->xmit_lock);
310 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev);
312 master->stats.tx_packets++;
313 master->stats.tx_bytes += len;
316 slave->xmit_lock_owner = -1;
317 spin_unlock(&slave->xmit_lock);
319 if (netif_queue_stopped(dev))
323 master->slaves = NEXT_SLAVE(q);
329 __skb_pull(skb, skb->nh.raw - skb->data);
330 } while ((q = NEXT_SLAVE(q)) != start);
332 if (nores && skb_res == NULL) {
338 netif_stop_queue(dev);
341 master->stats.tx_errors++;
344 master->stats.tx_dropped++;
349 static int teql_master_open(struct net_device *dev)
352 struct teql_master *m = (void*)dev->priv;
354 unsigned flags = IFF_NOARP|IFF_MULTICAST;
356 if (m->slaves == NULL)
363 struct net_device *slave = q->dev;
368 if (slave->mtu < mtu)
370 if (slave->hard_header_len > LL_MAX_HEADER)
373 /* If all the slaves are BROADCAST, master is BROADCAST
374 If all the slaves are PtP, master is PtP
375 Otherwise, master is NBMA.
377 if (!(slave->flags&IFF_POINTOPOINT))
378 flags &= ~IFF_POINTOPOINT;
379 if (!(slave->flags&IFF_BROADCAST))
380 flags &= ~IFF_BROADCAST;
381 if (!(slave->flags&IFF_MULTICAST))
382 flags &= ~IFF_MULTICAST;
383 } while ((q = NEXT_SLAVE(q)) != m->slaves);
386 m->dev->flags = (m->dev->flags&~FMASK) | flags;
387 netif_start_queue(m->dev);
391 static int teql_master_close(struct net_device *dev)
393 netif_stop_queue(dev);
397 static struct net_device_stats *teql_master_stats(struct net_device *dev)
399 struct teql_master *m = (void*)dev->priv;
403 static int teql_master_mtu(struct net_device *dev, int new_mtu)
405 struct teql_master *m = (void*)dev->priv;
414 if (new_mtu > q->dev->mtu)
416 } while ((q=NEXT_SLAVE(q)) != m->slaves);
423 static __init void teql_master_setup(struct net_device *dev)
425 struct teql_master *master = dev->priv;
426 struct Qdisc_ops *ops = &master->qops;
429 ops->priv_size = sizeof(struct teql_sched_data);
431 ops->enqueue = teql_enqueue;
432 ops->dequeue = teql_dequeue;
433 ops->requeue = teql_requeue;
434 ops->init = teql_qdisc_init;
435 ops->reset = teql_reset;
436 ops->destroy = teql_destroy;
437 ops->owner = THIS_MODULE;
439 dev->open = teql_master_open;
440 dev->hard_start_xmit = teql_master_xmit;
441 dev->stop = teql_master_close;
442 dev->get_stats = teql_master_stats;
443 dev->change_mtu = teql_master_mtu;
444 dev->type = ARPHRD_VOID;
446 dev->tx_queue_len = 100;
447 dev->flags = IFF_NOARP;
448 dev->hard_header_len = LL_MAX_HEADER;
449 SET_MODULE_OWNER(dev);
452 static LIST_HEAD(master_dev_list);
453 static int max_equalizers = 1;
454 module_param(max_equalizers, int, 0);
455 MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
457 static int __init teql_init(void)
462 for (i = 0; i < max_equalizers; i++) {
463 struct net_device *dev;
464 struct teql_master *master;
466 dev = alloc_netdev(sizeof(struct teql_master),
467 "teql%d", teql_master_setup);
473 if ((err = register_netdev(dev))) {
480 strlcpy(master->qops.id, dev->name, IFNAMSIZ);
481 err = register_qdisc(&master->qops);
484 unregister_netdev(dev);
489 list_add_tail(&master->master_list, &master_dev_list);
494 static void __exit teql_exit(void)
496 struct teql_master *master, *nxt;
498 list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
500 list_del(&master->master_list);
502 unregister_qdisc(&master->qops);
503 unregister_netdev(master->dev);
504 free_netdev(master->dev);
508 module_init(teql_init);
509 module_exit(teql_exit);
511 MODULE_LICENSE("GPL");