1 /* net/sched/sch_teql.c "True" (or "trivial") link equalizer.
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 #include <linux/module.h>
12 #include <asm/uaccess.h>
13 #include <asm/system.h>
14 #include <linux/bitops.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
19 #include <linux/socket.h>
20 #include <linux/sockios.h>
22 #include <linux/errno.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_ether.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/notifier.h>
30 #include <linux/init.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
34 #include <linux/moduleparam.h>
36 #include <net/pkt_sched.h>
42 After loading this module you will find a new device teqlN
43 and new qdisc with the same name. To join a slave to the equalizer
44 you should just set this qdisc on a device f.e.
46 # tc qdisc add dev eth0 root teql0
47 # tc qdisc add dev eth1 root teql0
49 That's all. Full PnP 8)
54 1. Slave devices MUST be active devices, i.e., they must raise the tbusy
55 signal and generate EOI events. If you want to equalize virtual devices
56 like tunnels, use a normal eql device.
57 2. This device puts no limitations on physical slave characteristics
58 f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
59 Certainly, large difference in link speeds will make the resulting
60 eqalized link unusable, because of huge packet reordering.
61 I estimate an upper useful difference as ~10 times.
62 3. If the slave requires address resolution, only protocols using
63 neighbour cache (IPv4/IPv6) will work over the equalized link.
64 Other protocols are still allowed to use the slave device directly,
65 which will not break load balancing, though native slave
66 traffic will have the highest priority. */
70 struct Qdisc_ops qops;
71 struct net_device *dev;
73 struct list_head master_list;
74 struct net_device_stats stats;
77 struct teql_sched_data
80 struct teql_master *m;
81 struct neighbour *ncache;
82 struct sk_buff_head q;
85 #define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
87 #define FMASK (IFF_BROADCAST|IFF_POINTOPOINT|IFF_BROADCAST)
89 /* "teql*" qdisc routines */
92 teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 struct net_device *dev = sch->dev;
95 struct teql_sched_data *q = qdisc_priv(sch);
97 __skb_queue_tail(&q->q, skb);
98 if (q->q.qlen <= dev->tx_queue_len) {
99 sch->bstats.bytes += skb->len;
100 sch->bstats.packets++;
104 __skb_unlink(skb, &q->q);
107 return NET_XMIT_DROP;
111 teql_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 struct teql_sched_data *q = qdisc_priv(sch);
115 __skb_queue_head(&q->q, skb);
116 sch->qstats.requeues++;
120 static struct sk_buff *
121 teql_dequeue(struct Qdisc* sch)
123 struct teql_sched_data *dat = qdisc_priv(sch);
126 skb = __skb_dequeue(&dat->q);
128 struct net_device *m = dat->m->dev->qdisc->dev;
130 dat->m->slaves = sch;
134 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen;
138 static __inline__ void
139 teql_neigh_release(struct neighbour *n)
146 teql_reset(struct Qdisc* sch)
148 struct teql_sched_data *dat = qdisc_priv(sch);
150 skb_queue_purge(&dat->q);
152 teql_neigh_release(xchg(&dat->ncache, NULL));
156 teql_destroy(struct Qdisc* sch)
158 struct Qdisc *q, *prev;
159 struct teql_sched_data *dat = qdisc_priv(sch);
160 struct teql_master *master = dat->m;
162 if ((prev = master->slaves) != NULL) {
164 q = NEXT_SLAVE(prev);
166 NEXT_SLAVE(prev) = NEXT_SLAVE(q);
167 if (q == master->slaves) {
168 master->slaves = NEXT_SLAVE(q);
169 if (q == master->slaves) {
170 master->slaves = NULL;
171 spin_lock_bh(&master->dev->queue_lock);
172 qdisc_reset(master->dev->qdisc);
173 spin_unlock_bh(&master->dev->queue_lock);
176 skb_queue_purge(&dat->q);
177 teql_neigh_release(xchg(&dat->ncache, NULL));
181 } while ((prev = q) != master->slaves);
185 static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt)
187 struct net_device *dev = sch->dev;
188 struct teql_master *m = (struct teql_master*)sch->ops;
189 struct teql_sched_data *q = qdisc_priv(sch);
191 if (dev->hard_header_len > m->dev->hard_header_len)
199 skb_queue_head_init(&q->q);
202 if (m->dev->flags & IFF_UP) {
203 if ((m->dev->flags&IFF_POINTOPOINT && !(dev->flags&IFF_POINTOPOINT))
204 || (m->dev->flags&IFF_BROADCAST && !(dev->flags&IFF_BROADCAST))
205 || (m->dev->flags&IFF_MULTICAST && !(dev->flags&IFF_MULTICAST))
206 || dev->mtu < m->dev->mtu)
209 if (!(dev->flags&IFF_POINTOPOINT))
210 m->dev->flags &= ~IFF_POINTOPOINT;
211 if (!(dev->flags&IFF_BROADCAST))
212 m->dev->flags &= ~IFF_BROADCAST;
213 if (!(dev->flags&IFF_MULTICAST))
214 m->dev->flags &= ~IFF_MULTICAST;
215 if (dev->mtu < m->dev->mtu)
216 m->dev->mtu = dev->mtu;
218 q->next = NEXT_SLAVE(m->slaves);
219 NEXT_SLAVE(m->slaves) = sch;
223 m->dev->mtu = dev->mtu;
224 m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
229 /* "teql*" netdevice routines */
232 __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
234 struct teql_sched_data *q = qdisc_priv(dev->qdisc);
235 struct neighbour *mn = skb->dst->neighbour;
236 struct neighbour *n = q->ncache;
240 if (n && n->tbl == mn->tbl &&
241 memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
242 atomic_inc(&n->refcnt);
244 n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
248 if (neigh_event_send(n, skb_res) == 0) {
251 err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len);
252 read_unlock(&n->lock);
257 teql_neigh_release(xchg(&q->ncache, n));
261 return (skb_res == NULL) ? -EAGAIN : 1;
264 static __inline__ int
265 teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
267 if (dev->hard_header == NULL ||
269 skb->dst->neighbour == NULL)
271 return __teql_resolve(skb, skb_res, dev);
274 static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
276 struct teql_master *master = netdev_priv(dev);
277 struct Qdisc *start, *q;
281 struct sk_buff *skb_res = NULL;
283 start = master->slaves;
289 if ((q = start) == NULL)
293 struct net_device *slave = q->dev;
295 if (slave->qdisc_sleeping != q)
297 if (netif_queue_stopped(slave) || ! netif_running(slave)) {
302 switch (teql_resolve(skb, skb_res, slave)) {
304 if (netif_tx_trylock(slave)) {
305 if (!netif_queue_stopped(slave) &&
306 slave->hard_start_xmit(skb, slave) == 0) {
307 netif_tx_unlock(slave);
308 master->slaves = NEXT_SLAVE(q);
309 netif_wake_queue(dev);
310 master->stats.tx_packets++;
311 master->stats.tx_bytes += len;
314 netif_tx_unlock(slave);
316 if (netif_queue_stopped(dev))
320 master->slaves = NEXT_SLAVE(q);
326 __skb_pull(skb, skb_network_offset(skb));
327 } while ((q = NEXT_SLAVE(q)) != start);
329 if (nores && skb_res == NULL) {
335 netif_stop_queue(dev);
338 master->stats.tx_errors++;
341 master->stats.tx_dropped++;
346 static int teql_master_open(struct net_device *dev)
349 struct teql_master *m = netdev_priv(dev);
351 unsigned flags = IFF_NOARP|IFF_MULTICAST;
353 if (m->slaves == NULL)
360 struct net_device *slave = q->dev;
365 if (slave->mtu < mtu)
367 if (slave->hard_header_len > LL_MAX_HEADER)
370 /* If all the slaves are BROADCAST, master is BROADCAST
371 If all the slaves are PtP, master is PtP
372 Otherwise, master is NBMA.
374 if (!(slave->flags&IFF_POINTOPOINT))
375 flags &= ~IFF_POINTOPOINT;
376 if (!(slave->flags&IFF_BROADCAST))
377 flags &= ~IFF_BROADCAST;
378 if (!(slave->flags&IFF_MULTICAST))
379 flags &= ~IFF_MULTICAST;
380 } while ((q = NEXT_SLAVE(q)) != m->slaves);
383 m->dev->flags = (m->dev->flags&~FMASK) | flags;
384 netif_start_queue(m->dev);
388 static int teql_master_close(struct net_device *dev)
390 netif_stop_queue(dev);
394 static struct net_device_stats *teql_master_stats(struct net_device *dev)
396 struct teql_master *m = netdev_priv(dev);
400 static int teql_master_mtu(struct net_device *dev, int new_mtu)
402 struct teql_master *m = netdev_priv(dev);
411 if (new_mtu > q->dev->mtu)
413 } while ((q=NEXT_SLAVE(q)) != m->slaves);
420 static __init void teql_master_setup(struct net_device *dev)
422 struct teql_master *master = netdev_priv(dev);
423 struct Qdisc_ops *ops = &master->qops;
426 ops->priv_size = sizeof(struct teql_sched_data);
428 ops->enqueue = teql_enqueue;
429 ops->dequeue = teql_dequeue;
430 ops->requeue = teql_requeue;
431 ops->init = teql_qdisc_init;
432 ops->reset = teql_reset;
433 ops->destroy = teql_destroy;
434 ops->owner = THIS_MODULE;
436 dev->open = teql_master_open;
437 dev->hard_start_xmit = teql_master_xmit;
438 dev->stop = teql_master_close;
439 dev->get_stats = teql_master_stats;
440 dev->change_mtu = teql_master_mtu;
441 dev->type = ARPHRD_VOID;
443 dev->tx_queue_len = 100;
444 dev->flags = IFF_NOARP;
445 dev->hard_header_len = LL_MAX_HEADER;
446 SET_MODULE_OWNER(dev);
449 static LIST_HEAD(master_dev_list);
450 static int max_equalizers = 1;
451 module_param(max_equalizers, int, 0);
452 MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
454 static int __init teql_init(void)
459 for (i = 0; i < max_equalizers; i++) {
460 struct net_device *dev;
461 struct teql_master *master;
463 dev = alloc_netdev(sizeof(struct teql_master),
464 "teql%d", teql_master_setup);
470 if ((err = register_netdev(dev))) {
475 master = netdev_priv(dev);
477 strlcpy(master->qops.id, dev->name, IFNAMSIZ);
478 err = register_qdisc(&master->qops);
481 unregister_netdev(dev);
486 list_add_tail(&master->master_list, &master_dev_list);
491 static void __exit teql_exit(void)
493 struct teql_master *master, *nxt;
495 list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
497 list_del(&master->master_list);
499 unregister_qdisc(&master->qops);
500 unregister_netdev(master->dev);
501 free_netdev(master->dev);
505 module_init(teql_init);
506 module_exit(teql_exit);
508 MODULE_LICENSE("GPL");