2 * NET3: Token ring device handling subroutines
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes.
10 * Added rif table to /proc/net/tr_rif and rif timeout to
11 * /proc/sys/net/token-ring/rif_timeout.
12 * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged
13 * tr_header and tr_type_trans to handle passing IPX SNAP and
14 * 802.2 through the correct layers. Eliminated tr_reformat.
18 #include <asm/uaccess.h>
19 #include <asm/system.h>
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/jiffies.h>
24 #include <linux/string.h>
26 #include <linux/socket.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/trdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/net.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
40 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev);
41 static void rif_check_expire(unsigned long dummy);
46 * Each RIF entry we learn is kept this way
50 unsigned char addr[TR_ALEN];
54 struct rif_cache *next;
55 unsigned long last_used;
56 unsigned char local_ring;
59 #define RIF_TABLE_SIZE 32
62 * We hash the RIF cache 32 ways. We do after all have to look it
66 static struct rif_cache *rif_table[RIF_TABLE_SIZE];
68 static DEFINE_SPINLOCK(rif_lock);
72 * Garbage disposal timer.
75 static struct timer_list rif_timer;
77 int sysctl_tr_rif_timeout = 60*10*HZ;
79 static inline unsigned long rif_hash(const unsigned char *addr)
84 x = (x << 2) ^ addr[1];
85 x = (x << 2) ^ addr[2];
86 x = (x << 2) ^ addr[3];
87 x = (x << 2) ^ addr[4];
88 x = (x << 2) ^ addr[5];
92 return x & (RIF_TABLE_SIZE - 1);
96 * Put the headers on a token ring packet. Token ring source routing
97 * makes this a little more exciting than on ethernet.
100 static int tr_header(struct sk_buff *skb, struct net_device *dev,
102 void *daddr, void *saddr, unsigned len)
108 * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls
109 * dev->hard_header directly.
111 if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP)
115 hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc);
116 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
117 trllc = (struct trllc *)(trh+1);
118 trllc->dsap = trllc->ssap = EXTENDED_SAP;
120 trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00;
121 trllc->ethertype = htons(type);
125 hdr_len = sizeof(struct trh_hdr);
126 trh = (struct trh_hdr *)skb_push(skb, hdr_len);
133 memcpy(trh->saddr,saddr,dev->addr_len);
135 memcpy(trh->saddr,dev->dev_addr,dev->addr_len);
138 * Build the destination and then source route the frame
143 memcpy(trh->daddr,daddr,dev->addr_len);
144 tr_source_route(skb,trh,dev);
152 * A neighbour discovery of some species (eg arp) has completed. We
153 * can now send the packet.
156 static int tr_rebuild_header(struct sk_buff *skb)
158 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
159 struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr));
160 struct net_device *dev = skb->dev;
163 * FIXME: We don't yet support IPv6 over token rings
166 if(trllc->ethertype != htons(ETH_P_IP)) {
167 printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype));
172 if(arp_find(trh->daddr, skb)) {
178 tr_source_route(skb,trh,dev);
184 * Some of this is a bit hackish. We intercept RIF information
185 * used for source routing. We also grab IP directly and don't feed
189 __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
192 struct trh_hdr *trh=(struct trh_hdr *)skb->data;
196 skb->mac.raw = skb->data;
198 if(trh->saddr[0] & TR_RII)
199 riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8;
201 trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
203 skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen);
205 if(*trh->daddr & 0x80)
207 if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN))
208 skb->pkt_type=PACKET_BROADCAST;
210 skb->pkt_type=PACKET_MULTICAST;
212 else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E))
214 skb->pkt_type=PACKET_MULTICAST;
216 else if(dev->flags & IFF_PROMISC)
218 if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN))
219 skb->pkt_type=PACKET_OTHERHOST;
222 if ((skb->pkt_type != PACKET_BROADCAST) &&
223 (skb->pkt_type != PACKET_MULTICAST))
224 tr_add_rif_info(trh,dev) ;
227 * Strip the SNAP header from ARP packets since we don't
228 * pass them through to the 802.2/SNAP layers.
231 if (trllc->dsap == EXTENDED_SAP &&
232 (trllc->ethertype == htons(ETH_P_IP) ||
233 trllc->ethertype == htons(ETH_P_IPV6) ||
234 trllc->ethertype == htons(ETH_P_ARP)))
236 skb_pull(skb, sizeof(struct trllc));
237 return trllc->ethertype;
240 return htons(ETH_P_TR_802_2);
244 * We try to do source routing...
247 void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *dev)
251 struct rif_cache *entry;
252 unsigned char *olddata;
254 static const unsigned char mcast_func_addr[]
255 = {0xC0,0x00,0x00,0x04,0x00,0x00};
257 spin_lock_irqsave(&rif_lock, flags);
260 * Broadcasts are single route as stated in RFC 1042
262 if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) ||
263 (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) )
265 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
266 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
267 trh->saddr[0]|=TR_RII;
271 hash = rif_hash(trh->daddr);
273 * Walk the hash table and look for an entry
275 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next);
278 * If we found an entry we can route the frame.
283 printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
284 trh->daddr[1],trh->daddr[2],trh->daddr[3],trh->daddr[4],trh->daddr[5]);
286 if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8)
289 memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short));
290 trh->rcf^=htons(TR_RCF_DIR_BIT);
291 trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */
293 trh->saddr[0]|=TR_RII;
295 printk("entry found with rcf %04x\n", entry->rcf);
299 printk("entry found but without rcf length, local=%02x\n", entry->local_ring);
302 entry->last_used=jiffies;
307 * Without the information we simply have to shout
308 * on the wire. The replies should rapidly clean this
311 trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK)
312 | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST);
313 trh->saddr[0]|=TR_RII;
315 printk("no entry in rif table found - broadcasting frame\n");
320 /* Compress the RIF here so we don't have to do it in the driver(s) */
321 if (!(trh->saddr[0] & 0x80))
324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
326 spin_unlock_irqrestore(&rif_lock, flags);
328 skb_pull(skb, slack);
329 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
333 * We have learned some new RIF information for our source
337 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
339 unsigned int hash, rii_p = 0;
341 struct rif_cache *entry;
342 unsigned char saddr0;
344 spin_lock_irqsave(&rif_lock, flags);
345 saddr0 = trh->saddr[0];
348 * Firstly see if the entry exists
351 if(trh->saddr[0] & TR_RII)
354 if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2)
360 hash = rif_hash(trh->saddr);
361 for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next);
366 printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
367 trh->saddr[0],trh->saddr[1],trh->saddr[2],
368 trh->saddr[3],trh->saddr[4],trh->saddr[5],
372 * Allocate our new entry. A failure to allocate loses
373 * use the information. This is harmless.
375 * FIXME: We ought to keep some kind of cache size
376 * limiting and adjust the timers to suit.
378 entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
382 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
383 spin_unlock_irqrestore(&rif_lock, flags);
387 memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);
388 entry->iface = dev->ifindex;
389 entry->next=rif_table[hash];
390 entry->last_used=jiffies;
391 rif_table[hash]=entry;
395 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
396 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
397 entry->local_ring = 0;
401 entry->local_ring = 1;
404 else /* Y. Tahara added */
407 * Update existing entries
409 if (!entry->local_ring)
410 if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) &&
411 !(trh->rcf & htons(TR_RCF_BROADCAST_MASK)))
414 printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
415 trh->saddr[0],trh->saddr[1],trh->saddr[2],
416 trh->saddr[3],trh->saddr[4],trh->saddr[5],
419 entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK);
420 memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short));
422 entry->last_used=jiffies;
424 trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */
425 spin_unlock_irqrestore(&rif_lock, flags);
429 * Scan the cache with a timer and see what we need to throw out.
432 static void rif_check_expire(unsigned long dummy)
435 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
437 spin_lock_irqsave(&rif_lock, flags);
439 for(i =0; i < RIF_TABLE_SIZE; i++) {
440 struct rif_cache *entry, **pentry;
442 pentry = rif_table+i;
443 while((entry=*pentry) != NULL) {
444 unsigned long expires
445 = entry->last_used + sysctl_tr_rif_timeout;
447 if (time_before_eq(expires, jiffies)) {
448 *pentry = entry->next;
451 pentry = &entry->next;
453 if (time_before(expires, next_interval))
454 next_interval = expires;
459 spin_unlock_irqrestore(&rif_lock, flags);
461 mod_timer(&rif_timer, next_interval);
466 * Generate the /proc/net information for the token ring RIF
470 #ifdef CONFIG_PROC_FS
472 static struct rif_cache *rif_get_idx(loff_t pos)
475 struct rif_cache *entry;
478 for(i = 0; i < RIF_TABLE_SIZE; i++)
479 for(entry = rif_table[i]; entry; entry = entry->next) {
488 static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
490 spin_lock_irq(&rif_lock);
492 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
495 static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
498 struct rif_cache *ent = v;
502 if (v == SEQ_START_TOKEN) {
510 i = rif_hash(ent->addr);
512 while (++i < RIF_TABLE_SIZE) {
513 if ((ent = rif_table[i]) != NULL)
519 static void rif_seq_stop(struct seq_file *seq, void *v)
521 spin_unlock_irq(&rif_lock);
524 static int rif_seq_show(struct seq_file *seq, void *v)
526 int j, rcf_len, segment, brdgnmb;
527 struct rif_cache *entry = v;
529 if (v == SEQ_START_TOKEN)
531 "if TR address TTL rcf routing segments\n");
533 struct net_device *dev = dev_get_by_index(entry->iface);
534 long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout)
537 seq_printf(seq, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ",
539 entry->addr[0],entry->addr[1],entry->addr[2],
540 entry->addr[3],entry->addr[4],entry->addr[5],
543 if (entry->local_ring)
544 seq_puts(seq, "local\n");
547 seq_printf(seq, "%04X", ntohs(entry->rcf));
548 rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2;
551 for(j = 1; j < rcf_len; j++) {
553 segment=ntohs(entry->rseg[j-1])>>4;
554 seq_printf(seq," %03X",segment);
556 segment=ntohs(entry->rseg[j])>>4;
557 brdgnmb=ntohs(entry->rseg[j-1])&0x00f;
558 seq_printf(seq,"-%01X-%03X",brdgnmb,segment);
567 static struct seq_operations rif_seq_ops = {
568 .start = rif_seq_start,
569 .next = rif_seq_next,
570 .stop = rif_seq_stop,
571 .show = rif_seq_show,
574 static int rif_seq_open(struct inode *inode, struct file *file)
576 return seq_open(file, &rif_seq_ops);
579 static struct file_operations rif_seq_fops = {
580 .owner = THIS_MODULE,
581 .open = rif_seq_open,
584 .release = seq_release,
589 static void tr_setup(struct net_device *dev)
592 * Configure and register
595 dev->hard_header = tr_header;
596 dev->rebuild_header = tr_rebuild_header;
598 dev->type = ARPHRD_IEEE802_TR;
599 dev->hard_header_len = TR_HLEN;
601 dev->addr_len = TR_ALEN;
602 dev->tx_queue_len = 100; /* Long queues on tr */
604 memset(dev->broadcast,0xFF, TR_ALEN);
606 /* New-style flags. */
607 dev->flags = IFF_BROADCAST | IFF_MULTICAST ;
611 * alloc_trdev - Register token ring device
612 * @sizeof_priv: Size of additional driver-private structure to be allocated
613 * for this token ring device
615 * Fill in the fields of the device structure with token ring-generic values.
617 * Constructs a new net device, complete with a private data area of
618 * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
619 * this private data area.
621 struct net_device *alloc_trdev(int sizeof_priv)
623 return alloc_netdev(sizeof_priv, "tr%d", tr_setup);
627 * Called during bootup. We don't actually have to initialise
631 static int __init rif_init(void)
633 init_timer(&rif_timer);
634 rif_timer.expires = sysctl_tr_rif_timeout;
636 rif_timer.function = rif_check_expire;
637 add_timer(&rif_timer);
639 proc_net_fops_create("tr_rif", S_IRUGO, &rif_seq_fops);
643 module_init(rif_init);
645 EXPORT_SYMBOL(tr_type_trans);
646 EXPORT_SYMBOL(alloc_trdev);