[NET]: Convert init_timer into setup_timer
[linux-2.6] / net / 802 / tr.c
1 /*
2  * NET3:        Token ring device handling subroutines
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Fixes:       3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10  *              Added rif table to /proc/net/tr_rif and rif timeout to
11  *              /proc/sys/net/token-ring/rif_timeout.
12  *              22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13  *              tr_header and tr_type_trans to handle passing IPX SNAP and
14  *              802.2 through the correct layers. Eliminated tr_reformat.
15  *
16  */
17
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
24 #include <linux/string.h>
25 #include <linux/mm.h>
26 #include <linux/socket.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/trdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/net.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <net/arp.h>
39 #include <net/net_namespace.h>
40
41 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
42 static void rif_check_expire(unsigned long dummy);
43
44 #define TR_SR_DEBUG 0
45
46 /*
47  *      Each RIF entry we learn is kept this way
48  */
49
50 struct rif_cache {
51         unsigned char addr[TR_ALEN];
52         int iface;
53         __be16 rcf;
54         __be16 rseg[8];
55         struct rif_cache *next;
56         unsigned long last_used;
57         unsigned char local_ring;
58 };
59
60 #define RIF_TABLE_SIZE 32
61
62 /*
63  *      We hash the RIF cache 32 ways. We do after all have to look it
64  *      up a lot.
65  */
66
67 static struct rif_cache *rif_table[RIF_TABLE_SIZE];
68
69 static DEFINE_SPINLOCK(rif_lock);
70
71
72 /*
73  *      Garbage disposal timer.
74  */
75
76 static struct timer_list rif_timer;
77
78 int sysctl_tr_rif_timeout = 60*10*HZ;
79
80 static inline unsigned long rif_hash(const unsigned char *addr)
81 {
82         unsigned long x;
83
84         x = addr[0];
85         x = (x << 2) ^ addr[1];
86         x = (x << 2) ^ addr[2];
87         x = (x << 2) ^ addr[3];
88         x = (x << 2) ^ addr[4];
89         x = (x << 2) ^ addr[5];
90
91         x ^= x >> 8;
92
93         return x & (RIF_TABLE_SIZE - 1);
94 }
95
96 /*
97  *      Put the headers on a token ring packet. Token ring source routing
98  *      makes this a little more exciting than on ethernet.
99  */
100
101 static int tr_header(struct sk_buff *skb, struct net_device *dev,
102                      unsigned short type,
103                      const void *daddr, const void *saddr, unsigned len)
104 {
105         struct trh_hdr *trh;
106         int hdr_len;
107
108         /*
109          * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
110          * dev->hard_header directly.
111          */
112         if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
113         {
114                 struct trllc *trllc;
115
116                 hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
117                 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
118                 trllc = (struct trllc *)(trh+1);
119                 trllc->dsap = trllc->ssap = EXTENDED_SAP;
120                 trllc->llc = UI_CMD;
121                 trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
122                 trllc->ethertype = htons(type);
123         }
124         else
125         {
126                 hdr_len = sizeof(struct trh_hdr);
127                 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
128         }
129
130         trh->ac=AC;
131         trh->fc=LLC_FRAME;
132
133         if(saddr)
134                 memcpy(trh->saddr,saddr,dev->addr_len);
135         else
136                 memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
137
138         /*
139          *      Build the destination and then source route the frame
140          */
141
142         if(daddr)
143         {
144                 memcpy(trh->daddr,daddr,dev->addr_len);
145                 tr_source_route(skb, trh, dev);
146                 return(hdr_len);
147         }
148
149         return -hdr_len;
150 }
151
152 /*
153  *      A neighbour discovery of some species (eg arp) has completed. We
154  *      can now send the packet.
155  */
156
157 static int tr_rebuild_header(struct sk_buff *skb)
158 {
159         struct trh_hdr *trh=(struct trh_hdr *)skb->data;
160         struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
161         struct net_device *dev = skb->dev;
162
163         /*
164          *      FIXME: We don't yet support IPv6 over token rings
165          */
166
167         if(trllc->ethertype != htons(ETH_P_IP)) {
168                 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
169                 return 0;
170         }
171
172 #ifdef CONFIG_INET
173         if(arp_find(trh->daddr, skb)) {
174                         return 1;
175         }
176         else
177 #endif
178         {
179                 tr_source_route(skb,trh,dev);
180                 return 0;
181         }
182 }
183
184 /*
185  *      Some of this is a bit hackish. We intercept RIF information
186  *      used for source routing. We also grab IP directly and don't feed
187  *      it via SNAP.
188  */
189
190 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
191 {
192
193         struct trh_hdr *trh;
194         struct trllc *trllc;
195         unsigned riflen=0;
196
197         skb->dev = dev;
198         skb_reset_mac_header(skb);
199         trh = tr_hdr(skb);
200
201         if(trh->saddr[0] & TR_RII)
202                 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
203
204         trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
205
206         skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
207
208         if(*trh->daddr & 0x80)
209         {
210                 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
211                         skb->pkt_type=PACKET_BROADCAST;
212                 else
213                         skb->pkt_type=PACKET_MULTICAST;
214         }
215         else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
216         {
217                 skb->pkt_type=PACKET_MULTICAST;
218         }
219         else if(dev->flags & IFF_PROMISC)
220         {
221                 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
222                         skb->pkt_type=PACKET_OTHERHOST;
223         }
224
225         if ((skb->pkt_type != PACKET_BROADCAST) &&
226             (skb->pkt_type != PACKET_MULTICAST))
227                 tr_add_rif_info(trh,dev) ;
228
229         /*
230          * Strip the SNAP header from ARP packets since we don't
231          * pass them through to the 802.2/SNAP layers.
232          */
233
234         if (trllc->dsap == EXTENDED_SAP &&
235             (trllc->ethertype == htons(ETH_P_IP) ||
236              trllc->ethertype == htons(ETH_P_IPV6) ||
237              trllc->ethertype == htons(ETH_P_ARP)))
238         {
239                 skb_pull(skb, sizeof(struct trllc));
240                 return trllc->ethertype;
241         }
242
243         return htons(ETH_P_TR_802_2);
244 }
245
246 /*
247  *      We try to do source routing...
248  */
249
250 void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,
251                      struct net_device *dev)
252 {
253         int slack;
254         unsigned int hash;
255         struct rif_cache *entry;
256         unsigned char *olddata;
257         unsigned long flags;
258         static const unsigned char mcast_func_addr[]
259                 = {0xC0,0x00,0x00,0x04,0x00,0x00};
260
261         spin_lock_irqsave(&rif_lock, flags);
262
263         /*
264          *      Broadcasts are single route as stated in RFC 1042
265          */
266         if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
267             (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN))  )
268         {
269                 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
270                                | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
271                 trh->saddr[0]|=TR_RII;
272         }
273         else
274         {
275                 hash = rif_hash(trh->daddr);
276                 /*
277                  *      Walk the hash table and look for an entry
278                  */
279                 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
280
281                 /*
282                  *      If we found an entry we can route the frame.
283                  */
284                 if(entry)
285                 {
286 #if TR_SR_DEBUG
287 {
288 DECLARE_MAC_BUF(mac);
289 printk("source routing for %s\n",print_mac(mac, trh->daddr));
290 }
291 #endif
292                         if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
293                         {
294                                 trh->rcf=entry->rcf;
295                                 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
296                                 trh->rcf^=htons(TR_RCF_DIR_BIT);
297                                 trh->rcf&=htons(0x1fff);        /* Issam Chehab <ichehab@madge1.demon.co.uk> */
298
299                                 trh->saddr[0]|=TR_RII;
300 #if TR_SR_DEBUG
301                                 printk("entry found with rcf %04x\n", entry->rcf);
302                         }
303                         else
304                         {
305                                 printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
306 #endif
307                         }
308                         entry->last_used=jiffies;
309                 }
310                 else
311                 {
312                         /*
313                          *      Without the information we simply have to shout
314                          *      on the wire. The replies should rapidly clean this
315                          *      situation up.
316                          */
317                         trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
318                                        | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
319                         trh->saddr[0]|=TR_RII;
320 #if TR_SR_DEBUG
321                         printk("no entry in rif table found - broadcasting frame\n");
322 #endif
323                 }
324         }
325
326         /* Compress the RIF here so we don't have to do it in the driver(s) */
327         if (!(trh->saddr[0] & 0x80))
328                 slack = 18;
329         else
330                 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
331         olddata = skb->data;
332         spin_unlock_irqrestore(&rif_lock, flags);
333
334         skb_pull(skb, slack);
335         memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
336 }
337
338 /*
339  *      We have learned some new RIF information for our source
340  *      routing.
341  */
342
343 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
344 {
345         unsigned int hash, rii_p = 0;
346         unsigned long flags;
347         struct rif_cache *entry;
348         unsigned char saddr0;
349
350         spin_lock_irqsave(&rif_lock, flags);
351         saddr0 = trh->saddr[0];
352
353         /*
354          *      Firstly see if the entry exists
355          */
356
357         if(trh->saddr[0] & TR_RII)
358         {
359                 trh->saddr[0]&=0x7f;
360                 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
361                 {
362                         rii_p = 1;
363                 }
364         }
365
366         hash = rif_hash(trh->saddr);
367         for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
368
369         if(entry==NULL)
370         {
371 #if TR_SR_DEBUG
372                 DECLARE_MAC_BUF(mac);
373                 printk("adding rif_entry: addr:%s rcf:%04X\n",
374                        print_mac(mac, trh->saddr), ntohs(trh->rcf));
375 #endif
376                 /*
377                  *      Allocate our new entry. A failure to allocate loses
378                  *      use the information. This is harmless.
379                  *
380                  *      FIXME: We ought to keep some kind of cache size
381                  *      limiting and adjust the timers to suit.
382                  */
383                 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
384
385                 if(!entry)
386                 {
387                         printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
388                         spin_unlock_irqrestore(&rif_lock, flags);
389                         return;
390                 }
391
392                 memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
393                 entry->iface = dev->ifindex;
394                 entry->next=rif_table[hash];
395                 entry->last_used=jiffies;
396                 rif_table[hash]=entry;
397
398                 if (rii_p)
399                 {
400                         entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
401                         memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
402                         entry->local_ring = 0;
403                 }
404                 else
405                 {
406                         entry->local_ring = 1;
407                 }
408         }
409         else    /* Y. Tahara added */
410         {
411                 /*
412                  *      Update existing entries
413                  */
414                 if (!entry->local_ring)
415                     if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
416                          !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
417                     {
418 #if TR_SR_DEBUG
419 {
420 DECLARE_MAC_BUF(mac);
421 printk("updating rif_entry: addr:%s rcf:%04X\n",
422                 print_mac(mac, trh->saddr), ntohs(trh->rcf));
423 }
424 #endif
425                             entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
426                             memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
427                     }
428                 entry->last_used=jiffies;
429         }
430         trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
431         spin_unlock_irqrestore(&rif_lock, flags);
432 }
433
434 /*
435  *      Scan the cache with a timer and see what we need to throw out.
436  */
437
438 static void rif_check_expire(unsigned long dummy)
439 {
440         int i;
441         unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
442
443         spin_lock_irqsave(&rif_lock, flags);
444
445         for(i =0; i < RIF_TABLE_SIZE; i++) {
446                 struct rif_cache *entry, **pentry;
447
448                 pentry = rif_table+i;
449                 while((entry=*pentry) != NULL) {
450                         unsigned long expires
451                                 = entry->last_used + sysctl_tr_rif_timeout;
452
453                         if (time_before_eq(expires, jiffies)) {
454                                 *pentry = entry->next;
455                                 kfree(entry);
456                         } else {
457                                 pentry = &entry->next;
458
459                                 if (time_before(expires, next_interval))
460                                         next_interval = expires;
461                         }
462                 }
463         }
464
465         spin_unlock_irqrestore(&rif_lock, flags);
466
467         mod_timer(&rif_timer, next_interval);
468
469 }
470
471 /*
472  *      Generate the /proc/net information for the token ring RIF
473  *      routing.
474  */
475
476 #ifdef CONFIG_PROC_FS
477
478 static struct rif_cache *rif_get_idx(loff_t pos)
479 {
480         int i;
481         struct rif_cache *entry;
482         loff_t off = 0;
483
484         for(i = 0; i < RIF_TABLE_SIZE; i++)
485                 for(entry = rif_table[i]; entry; entry = entry->next) {
486                         if (off == pos)
487                                 return entry;
488                         ++off;
489                 }
490
491         return NULL;
492 }
493
494 static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
495 {
496         spin_lock_irq(&rif_lock);
497
498         return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
499 }
500
501 static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
502 {
503         int i;
504         struct rif_cache *ent = v;
505
506         ++*pos;
507
508         if (v == SEQ_START_TOKEN) {
509                 i = -1;
510                 goto scan;
511         }
512
513         if (ent->next)
514                 return ent->next;
515
516         i = rif_hash(ent->addr);
517  scan:
518         while (++i < RIF_TABLE_SIZE) {
519                 if ((ent = rif_table[i]) != NULL)
520                         return ent;
521         }
522         return NULL;
523 }
524
525 static void rif_seq_stop(struct seq_file *seq, void *v)
526 {
527         spin_unlock_irq(&rif_lock);
528 }
529
530 static int rif_seq_show(struct seq_file *seq, void *v)
531 {
532         int j, rcf_len, segment, brdgnmb;
533         struct rif_cache *entry = v;
534         DECLARE_MAC_BUF(mac);
535
536         if (v == SEQ_START_TOKEN)
537                 seq_puts(seq,
538                      "if     TR address       TTL   rcf   routing segments\n");
539         else {
540                 struct net_device *dev = dev_get_by_index(&init_net, entry->iface);
541                 long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
542                                 - (long) jiffies;
543
544                 seq_printf(seq, "%s %s %7li ",
545                            dev?dev->name:"?",
546                            print_mac(mac, entry->addr),
547                            ttl/HZ);
548
549                         if (entry->local_ring)
550                                 seq_puts(seq, "local\n");
551                         else {
552
553                                 seq_printf(seq, "%04X", ntohs(entry->rcf));
554                                 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
555                                 if (rcf_len)
556                                         rcf_len >>= 1;
557                                 for(j = 1; j < rcf_len; j++) {
558                                         if(j==1) {
559                                                 segment=ntohs(entry->rseg[j-1])>>4;
560                                                 seq_printf(seq,"  %03X",segment);
561                                         }
562
563                                         segment=ntohs(entry->rseg[j])>>4;
564                                         brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
565                                         seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
566                                 }
567                                 seq_putc(seq, '\n');
568                         }
569                 }
570         return 0;
571 }
572
573
574 static const struct seq_operations rif_seq_ops = {
575         .start = rif_seq_start,
576         .next  = rif_seq_next,
577         .stop  = rif_seq_stop,
578         .show  = rif_seq_show,
579 };
580
581 static int rif_seq_open(struct inode *inode, struct file *file)
582 {
583         return seq_open(file, &rif_seq_ops);
584 }
585
586 static const struct file_operations rif_seq_fops = {
587         .owner   = THIS_MODULE,
588         .open    = rif_seq_open,
589         .read    = seq_read,
590         .llseek  = seq_lseek,
591         .release = seq_release,
592 };
593
594 #endif
595
596 static const struct header_ops tr_header_ops = {
597         .create = tr_header,
598         .rebuild= tr_rebuild_header,
599 };
600
601 static void tr_setup(struct net_device *dev)
602 {
603         /*
604          *      Configure and register
605          */
606
607         dev->header_ops = &tr_header_ops;
608
609         dev->type               = ARPHRD_IEEE802_TR;
610         dev->hard_header_len    = TR_HLEN;
611         dev->mtu                = 2000;
612         dev->addr_len           = TR_ALEN;
613         dev->tx_queue_len       = 100;  /* Long queues on tr */
614
615         memset(dev->broadcast,0xFF, TR_ALEN);
616
617         /* New-style flags. */
618         dev->flags              = IFF_BROADCAST | IFF_MULTICAST ;
619 }
620
621 /**
622  * alloc_trdev - Register token ring device
623  * @sizeof_priv: Size of additional driver-private structure to be allocated
624  *      for this token ring device
625  *
626  * Fill in the fields of the device structure with token ring-generic values.
627  *
628  * Constructs a new net device, complete with a private data area of
629  * size @sizeof_priv.  A 32-byte (not bit) alignment is enforced for
630  * this private data area.
631  */
632 struct net_device *alloc_trdev(int sizeof_priv)
633 {
634         return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
635 }
636
637 /*
638  *      Called during bootup.  We don't actually have to initialise
639  *      too much for this.
640  */
641
642 static int __init rif_init(void)
643 {
644         rif_timer.expires  = jiffies + sysctl_tr_rif_timeout;
645         setup_timer(&rif_timer, rif_check_expire, 0);
646         add_timer(&rif_timer);
647
648         proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
649         return 0;
650 }
651
652 module_init(rif_init);
653
654 EXPORT_SYMBOL(tr_type_trans);
655 EXPORT_SYMBOL(alloc_trdev);