V4L/DVB (3723): Avoid unnecessary firmware re-loads in or51132 frontend
[linux-2.6] / drivers / net / shaper.c
1 /*
2  *                      Simple traffic shaper for Linux NET3.
3  *
4  *      (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5  *                              http://www.redhat.com
6  *
7  *      This program is free software; you can redistribute it and/or
8  *      modify it under the terms of the GNU General Public License
9  *      as published by the Free Software Foundation; either version
10  *      2 of the License, or (at your option) any later version.
11  *      
12  *      Neither Alan Cox nor CymruNet Ltd. admit liability nor provide 
13  *      warranty for any of this software. This material is provided 
14  *      "AS-IS" and at no charge.       
15  *
16  *      
17  *      Algorithm:
18  *
19  *      Queue Frame:
20  *              Compute time length of frame at regulated speed
21  *              Add frame to queue at appropriate point
22  *              Adjust time length computation for followup frames
23  *              Any frame that falls outside of its boundaries is freed
24  *
25  *      We work to the following constants
26  *
27  *              SHAPER_QLEN     Maximum queued frames
28  *              SHAPER_LATENCY  Bounding latency on a frame. Leaving this latency
29  *                              window drops the frame. This stops us queueing 
30  *                              frames for a long time and confusing a remote
31  *                              host.
32  *              SHAPER_MAXSLIP  Maximum time a priority frame may jump forward.
33  *                              That bounds the penalty we will inflict on low
34  *                              priority traffic.
35  *              SHAPER_BURST    Time range we call "now" in order to reduce
36  *                              system load. The more we make this the burstier
37  *                              the behaviour, the better local performance you
38  *                              get through packet clustering on routers and the
39  *                              worse the remote end gets to judge rtts.
40  *
41  *      This is designed to handle lower speed links ( < 200K/second or so). We
42  *      run off a 100-150Hz base clock typically. This gives us a resolution at
43  *      200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44  *      resolution may start to cause much more burstiness in the traffic. We
45  *      could avoid a lot of that by calling kick_shaper() at the end of the 
46  *      tied device transmissions. If you run above about 100K second you 
47  *      may need to tune the supposed speed rate for the right values.
48  *
49  *      BUGS:
50  *              Downing the interface under the shaper before the shaper
51  *              will render your machine defunct. Don't for now shape over
52  *              PPP or SLIP therefore!
53  *              This will be fixed in BETA4
54  *
55  * Update History :
56  *
57  *              bh_atomic() SMP races fixes and rewritten the locking code to
58  *              be SMP safe and irq-mask friendly.
59  *              NOTE: we can't use start_bh_atomic() in kick_shaper()
60  *              because it's going to be recalled from an irq handler,
61  *              and synchronize_bh() is a nono if called from irq context.
62  *                                              1999  Andrea Arcangeli
63  *
64  *              Device statistics (tx_pakets, tx_bytes,
65  *              tx_drops: queue_over_time and collisions: max_queue_exceded)
66  *                               1999/06/18 Jordi Murgo <savage@apostols.org>
67  *
68  *              Use skb->cb for private data.
69  *                               2000/03 Andi Kleen
70  */
71  
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/fcntl.h>
76 #include <linux/mm.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/errno.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/init.h>
85 #include <linux/if_shaper.h>
86 #include <linux/jiffies.h>
87
88 #include <net/dst.h>
89 #include <net/arp.h>
90
91 struct shaper_cb { 
92         unsigned long   shapeclock;             /* Time it should go out */
93         unsigned long   shapestamp;             /* Stamp for shaper    */
94         __u32           shapelatency;           /* Latency on frame */
95         __u32           shapelen;               /* Frame length in clocks */
96         __u16           shapepend;              /* Pending */
97 }; 
98 #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
99
100 static int sh_debug;            /* Debug flag */
101
102 #define SHAPER_BANNER   "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
103
104 static void shaper_kick(struct shaper *sh);
105
106 /*
107  *      Compute clocks on a buffer
108  */
109   
110 static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
111 {
112         int t=skb->len/shaper->bytespertick;
113         return t;
114 }
115
116 /*
117  *      Set the speed of a shaper. We compute this in bytes per tick since
118  *      thats how the machine wants to run. Quoted input is in bits per second
119  *      as is traditional (note not BAUD). We assume 8 bit bytes. 
120  */
121   
122 static void shaper_setspeed(struct shaper *shaper, int bitspersec)
123 {
124         shaper->bitspersec=bitspersec;
125         shaper->bytespertick=(bitspersec/HZ)/8;
126         if(!shaper->bytespertick)
127                 shaper->bytespertick++;
128 }
129
130 /*
131  *      Throw a frame at a shaper.
132  */
133   
134
135 static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
136 {
137         struct shaper *shaper = dev->priv;
138         struct sk_buff *ptr;
139   
140         spin_lock(&shaper->lock);
141         ptr=shaper->sendq.prev;
142         
143         /*
144          *      Set up our packet details
145          */
146          
147         SHAPERCB(skb)->shapelatency=0;
148         SHAPERCB(skb)->shapeclock=shaper->recovery;
149         if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
150                 SHAPERCB(skb)->shapeclock=jiffies;
151         skb->priority=0;        /* short term bug fix */
152         SHAPERCB(skb)->shapestamp=jiffies;
153         
154         /*
155          *      Time slots for this packet.
156          */
157          
158         SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
159         
160         {
161                 struct sk_buff *tmp;
162                 /*
163                  *      Up our shape clock by the time pending on the queue
164                  *      (Should keep this in the shaper as a variable..)
165                  */
166                 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && 
167                         tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
168                         SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
169                 /*
170                  *      Queue over time. Spill packet.
171                  */
172                 if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) {
173                         dev_kfree_skb(skb);
174                         shaper->stats.tx_dropped++;
175                 } else
176                         skb_queue_tail(&shaper->sendq, skb);
177         }
178
179         if(sh_debug)
180                 printk("Frame queued.\n");
181         if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
182         {
183                 ptr=skb_dequeue(&shaper->sendq);
184                 dev_kfree_skb(ptr);
185                 shaper->stats.collisions++;
186         }
187         shaper_kick(shaper);
188         spin_unlock(&shaper->lock);
189         return 0;
190 }
191
192 /*
193  *      Transmit from a shaper
194  */
195  
196 static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
197 {
198         struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
199         if(sh_debug)
200                 printk("Kick frame on %p\n",newskb);
201         if(newskb)
202         {
203                 newskb->dev=shaper->dev;
204                 newskb->priority=2;
205                 if(sh_debug)
206                         printk("Kick new frame to %s, %d\n",
207                                 shaper->dev->name,newskb->priority);
208                 dev_queue_xmit(newskb);
209
210                 shaper->stats.tx_bytes += skb->len;
211                 shaper->stats.tx_packets++;
212
213                 if(sh_debug)
214                         printk("Kicked new frame out.\n");
215                 dev_kfree_skb(skb);
216         }
217 }
218
219 /*
220  *      Timer handler for shaping clock
221  */
222  
223 static void shaper_timer(unsigned long data)
224 {
225         struct shaper *shaper = (struct shaper *)data;
226
227         spin_lock(&shaper->lock);
228         shaper_kick(shaper);
229         spin_unlock(&shaper->lock);
230 }
231
232 /*
233  *      Kick a shaper queue and try and do something sensible with the 
234  *      queue. 
235  */
236
237 static void shaper_kick(struct shaper *shaper)
238 {
239         struct sk_buff *skb;
240         
241         /*
242          *      Walk the list (may be empty)
243          */
244          
245         while((skb=skb_peek(&shaper->sendq))!=NULL)
246         {
247                 /*
248                  *      Each packet due to go out by now (within an error
249                  *      of SHAPER_BURST) gets kicked onto the link 
250                  */
251                  
252                 if(sh_debug)
253                         printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
254                 if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
255                 {
256                         /*
257                          *      Pull the frame and get interrupts back on.
258                          */
259                          
260                         skb_unlink(skb, &shaper->sendq);
261                         if (shaper->recovery < 
262                             SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
263                                 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
264                         /*
265                          *      Pass on to the physical target device via
266                          *      our low level packet thrower.
267                          */
268                         
269                         SHAPERCB(skb)->shapepend=0;
270                         shaper_queue_xmit(shaper, skb); /* Fire */
271                 }
272                 else
273                         break;
274         }
275
276         /*
277          *      Next kick.
278          */
279          
280         if(skb!=NULL)
281                 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
282 }
283
284
285 /*
286  *      Bring the interface up. We just disallow this until a 
287  *      bind.
288  */
289
290 static int shaper_open(struct net_device *dev)
291 {
292         struct shaper *shaper=dev->priv;
293         
294         /*
295          *      Can't open until attached.
296          *      Also can't open until speed is set, or we'll get
297          *      a division by zero.
298          */
299          
300         if(shaper->dev==NULL)
301                 return -ENODEV;
302         if(shaper->bitspersec==0)
303                 return -EINVAL;
304         return 0;
305 }
306
307 /*
308  *      Closing a shaper flushes the queues.
309  */
310  
311 static int shaper_close(struct net_device *dev)
312 {
313         struct shaper *shaper=dev->priv;
314         struct sk_buff *skb;
315
316         while ((skb = skb_dequeue(&shaper->sendq)) != NULL)
317                 dev_kfree_skb(skb);
318
319         spin_lock_bh(&shaper->lock);
320         shaper_kick(shaper);
321         spin_unlock_bh(&shaper->lock);
322
323         del_timer_sync(&shaper->timer);
324         return 0;
325 }
326
327 /*
328  *      Revectored calls. We alter the parameters and call the functions
329  *      for our attached device. This enables us to bandwidth allocate after
330  *      ARP and other resolutions and not before.
331  */
332
333 static struct net_device_stats *shaper_get_stats(struct net_device *dev)
334 {
335         struct shaper *sh=dev->priv;
336         return &sh->stats;
337 }
338
339 static int shaper_header(struct sk_buff *skb, struct net_device *dev, 
340         unsigned short type, void *daddr, void *saddr, unsigned len)
341 {
342         struct shaper *sh=dev->priv;
343         int v;
344         if(sh_debug)
345                 printk("Shaper header\n");
346         skb->dev=sh->dev;
347         v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
348         skb->dev=dev;
349         return v;
350 }
351
352 static int shaper_rebuild_header(struct sk_buff *skb)
353 {
354         struct shaper *sh=skb->dev->priv;
355         struct net_device *dev=skb->dev;
356         int v;
357         if(sh_debug)
358                 printk("Shaper rebuild header\n");
359         skb->dev=sh->dev;
360         v=sh->rebuild_header(skb);
361         skb->dev=dev;
362         return v;
363 }
364
365 #if 0
366 static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
367 {
368         struct shaper *sh=neigh->dev->priv;
369         struct net_device *tmp;
370         int ret;
371         if(sh_debug)
372                 printk("Shaper header cache bind\n");
373         tmp=neigh->dev;
374         neigh->dev=sh->dev;
375         ret=sh->hard_header_cache(neigh,hh);
376         neigh->dev=tmp;
377         return ret;
378 }
379
380 static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
381         unsigned char *haddr)
382 {
383         struct shaper *sh=dev->priv;
384         if(sh_debug)
385                 printk("Shaper cache update\n");
386         sh->header_cache_update(hh, sh->dev, haddr);
387 }
388 #endif
389
390 #ifdef CONFIG_INET
391
392 static int shaper_neigh_setup(struct neighbour *n)
393 {
394 #ifdef CONFIG_INET
395         if (n->nud_state == NUD_NONE) {
396                 n->ops = &arp_broken_ops;
397                 n->output = n->ops->output;
398         }
399 #endif  
400         return 0;
401 }
402
403 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
404 {
405 #ifdef CONFIG_INET
406         if (p->tbl->family == AF_INET) {
407                 p->neigh_setup = shaper_neigh_setup;
408                 p->ucast_probes = 0;
409                 p->mcast_probes = 0;
410         }
411 #endif  
412         return 0;
413 }
414
415 #else /* !(CONFIG_INET) */
416
417 static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
418 {
419         return 0;
420 }
421
422 #endif
423
424 static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
425 {
426         sh->dev = dev;
427         sh->hard_start_xmit=dev->hard_start_xmit;
428         sh->get_stats=dev->get_stats;
429         if(dev->hard_header)
430         {
431                 sh->hard_header=dev->hard_header;
432                 shdev->hard_header = shaper_header;
433         }
434         else
435                 shdev->hard_header = NULL;
436                 
437         if(dev->rebuild_header)
438         {
439                 sh->rebuild_header      = dev->rebuild_header;
440                 shdev->rebuild_header   = shaper_rebuild_header;
441         }
442         else
443                 shdev->rebuild_header   = NULL;
444         
445 #if 0
446         if(dev->hard_header_cache)
447         {
448                 sh->hard_header_cache   = dev->hard_header_cache;
449                 shdev->hard_header_cache= shaper_cache;
450         }
451         else
452         {
453                 shdev->hard_header_cache= NULL;
454         }
455                         
456         if(dev->header_cache_update)
457         {
458                 sh->header_cache_update = dev->header_cache_update;
459                 shdev->header_cache_update = shaper_cache_update;
460         }
461         else
462                 shdev->header_cache_update= NULL;
463 #else
464         shdev->header_cache_update = NULL;
465         shdev->hard_header_cache = NULL;
466 #endif
467         shdev->neigh_setup = shaper_neigh_setup_dev;
468         
469         shdev->hard_header_len=dev->hard_header_len;
470         shdev->type=dev->type;
471         shdev->addr_len=dev->addr_len;
472         shdev->mtu=dev->mtu;
473         sh->bitspersec=0;
474         return 0;
475 }
476
477 static int shaper_ioctl(struct net_device *dev,  struct ifreq *ifr, int cmd)
478 {
479         struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
480         struct shaper *sh=dev->priv;
481         
482         if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
483         {
484                 if(!capable(CAP_NET_ADMIN))
485                         return -EPERM;
486         }
487         
488         switch(ss->ss_cmd)
489         {
490                 case SHAPER_SET_DEV:
491                 {
492                         struct net_device *them=__dev_get_by_name(ss->ss_name);
493                         if(them==NULL)
494                                 return -ENODEV;
495                         if(sh->dev)
496                                 return -EBUSY;
497                         return shaper_attach(dev,dev->priv, them);
498                 }
499                 case SHAPER_GET_DEV:
500                         if(sh->dev==NULL)
501                                 return -ENODEV;
502                         strcpy(ss->ss_name, sh->dev->name);
503                         return 0;
504                 case SHAPER_SET_SPEED:
505                         shaper_setspeed(sh,ss->ss_speed);
506                         return 0;
507                 case SHAPER_GET_SPEED:
508                         ss->ss_speed=sh->bitspersec;
509                         return 0;
510                 default:
511                         return -EINVAL;
512         }
513 }
514
515 static void shaper_init_priv(struct net_device *dev)
516 {
517         struct shaper *sh = dev->priv;
518
519         skb_queue_head_init(&sh->sendq);
520         init_timer(&sh->timer);
521         sh->timer.function=shaper_timer;
522         sh->timer.data=(unsigned long)sh;
523         spin_lock_init(&sh->lock);
524 }
525
526 /*
527  *      Add a shaper device to the system
528  */
529  
530 static void __init shaper_setup(struct net_device *dev)
531 {
532         /*
533          *      Set up the shaper.
534          */
535
536         SET_MODULE_OWNER(dev);
537
538         shaper_init_priv(dev);
539
540         dev->open               = shaper_open;
541         dev->stop               = shaper_close;
542         dev->hard_start_xmit    = shaper_start_xmit;
543         dev->get_stats          = shaper_get_stats;
544         dev->set_multicast_list = NULL;
545         
546         /*
547          *      Intialise the packet queues
548          */
549          
550         /*
551          *      Handlers for when we attach to a device.
552          */
553
554         dev->hard_header        = shaper_header;
555         dev->rebuild_header     = shaper_rebuild_header;
556 #if 0
557         dev->hard_header_cache  = shaper_cache;
558         dev->header_cache_update= shaper_cache_update;
559 #endif
560         dev->neigh_setup        = shaper_neigh_setup_dev;
561         dev->do_ioctl           = shaper_ioctl;
562         dev->hard_header_len    = 0;
563         dev->type               = ARPHRD_ETHER; /* initially */
564         dev->set_mac_address    = NULL;
565         dev->mtu                = 1500;
566         dev->addr_len           = 0;
567         dev->tx_queue_len       = 10;
568         dev->flags              = 0;
569 }
570  
571 static int shapers = 1;
572 #ifdef MODULE
573
574 module_param(shapers, int, 0);
575 MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
576
577 #else /* MODULE */
578
579 static int __init set_num_shapers(char *str)
580 {
581         shapers = simple_strtol(str, NULL, 0);
582         return 1;
583 }
584
585 __setup("shapers=", set_num_shapers);
586
587 #endif /* MODULE */
588
589 static struct net_device **devs;
590
591 static unsigned int shapers_registered = 0;
592
593 static int __init shaper_init(void)
594 {
595         int i;
596         size_t alloc_size;
597         struct net_device *dev;
598         char name[IFNAMSIZ];
599
600         if (shapers < 1)
601                 return -ENODEV;
602
603         alloc_size = sizeof(*dev) * shapers;
604         devs = kmalloc(alloc_size, GFP_KERNEL);
605         if (!devs)
606                 return -ENOMEM;
607         memset(devs, 0, alloc_size);
608
609         for (i = 0; i < shapers; i++) {
610
611                 snprintf(name, IFNAMSIZ, "shaper%d", i);
612                 dev = alloc_netdev(sizeof(struct shaper), name,
613                                    shaper_setup);
614                 if (!dev) 
615                         break;
616
617                 if (register_netdev(dev)) {
618                         free_netdev(dev);
619                         break;
620                 }
621
622                 devs[i] = dev;
623                 shapers_registered++;
624         }
625
626         if (!shapers_registered) {
627                 kfree(devs);
628                 devs = NULL;
629         }
630
631         return (shapers_registered ? 0 : -ENODEV);
632 }
633
634 static void __exit shaper_exit (void)
635 {
636         int i;
637
638         for (i = 0; i < shapers_registered; i++) {
639                 if (devs[i]) {
640                         unregister_netdev(devs[i]);
641                         free_netdev(devs[i]);
642                 }
643         }
644
645         kfree(devs);
646         devs = NULL;
647 }
648
649 module_init(shaper_init);
650 module_exit(shaper_exit);
651 MODULE_LICENSE("GPL");
652