2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/errno.h>
86 #include <linux/timer.h>
88 #include <linux/ioport.h>
89 #include <linux/string.h>
90 #include <linux/proc_fs.h>
91 #include <linux/ptrace.h>
92 #include <linux/skbuff.h>
93 #include <linux/interrupt.h>
94 #include <linux/delay.h>
95 #include <linux/netdevice.h>
96 #include <linux/trdevice.h>
97 #include <linux/stddef.h>
98 #include <linux/init.h>
99 #include <linux/pci.h>
100 #include <linux/spinlock.h>
101 #include <linux/bitops.h>
102 #include <linux/jiffies.h>
104 #include <net/checksum.h>
105 #include <net/net_namespace.h>
108 #include <asm/system.h>
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 * Official releases will only have an a.b.c version number format.
120 static char version[] =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135 /* Module paramters */
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140 /* Ring Speed 0,4,16,100
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
149 * WARNING: Some hubs will allow you to insert
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 module_param_array(ringspeed, int, NULL, 0);
156 /* Packet buffer size */
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 module_param_array(pkt_buf_sz, int, NULL, 0) ;
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 module_param_array(message_level, int, NULL, 0) ;
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 module_param_array(network_monitor, int, NULL, 0);
175 static struct pci_device_id olympic_pci_tbl[] = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
186 static int olympic_close(struct net_device *dev);
187 static void olympic_set_rx_mode(struct net_device *dev);
188 static void olympic_freemem(struct net_device *dev) ;
189 static irqreturn_t olympic_interrupt(int irq, void *dev_id);
190 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
191 static void olympic_arb_cmd(struct net_device *dev);
192 static int olympic_change_mtu(struct net_device *dev, int mtu);
193 static void olympic_srb_bh(struct net_device *dev) ;
194 static void olympic_asb_bh(struct net_device *dev) ;
195 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 static const struct net_device_ops olympic_netdev_ops = {
198 .ndo_open = olympic_open,
199 .ndo_stop = olympic_close,
200 .ndo_start_xmit = olympic_xmit,
201 .ndo_change_mtu = olympic_change_mtu,
202 .ndo_set_multicast_list = olympic_set_rx_mode,
203 .ndo_set_mac_address = olympic_set_mac_address,
206 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
208 struct net_device *dev ;
209 struct olympic_private *olympic_priv;
210 static int card_no = -1 ;
215 if ((i = pci_enable_device(pdev))) {
219 pci_set_master(pdev);
221 if ((i = pci_request_regions(pdev,"olympic"))) {
225 dev = alloc_trdev(sizeof(struct olympic_private)) ;
231 olympic_priv = netdev_priv(dev) ;
233 spin_lock_init(&olympic_priv->olympic_lock) ;
235 init_waitqueue_head(&olympic_priv->srb_wait);
236 init_waitqueue_head(&olympic_priv->trb_wait);
238 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
241 dev->base_addr=pci_resource_start(pdev, 0);
242 olympic_priv->olympic_card_name = pci_name(pdev);
243 olympic_priv->pdev = pdev;
244 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
245 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
246 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
250 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
251 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
253 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
255 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
256 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
257 olympic_priv->olympic_message_level = message_level[card_no] ;
258 olympic_priv->olympic_network_monitor = network_monitor[card_no];
260 if ((i = olympic_init(dev))) {
264 dev->netdev_ops = &olympic_netdev_ops;
265 SET_NETDEV_DEV(dev, &pdev->dev);
267 pci_set_drvdata(pdev,dev) ;
268 register_netdev(dev) ;
269 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
270 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
272 strcpy(proc_name,"olympic_") ;
273 strcat(proc_name,dev->name) ;
274 create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
275 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
280 if (olympic_priv->olympic_mmio)
281 iounmap(olympic_priv->olympic_mmio);
282 if (olympic_priv->olympic_lap)
283 iounmap(olympic_priv->olympic_lap);
287 pci_release_regions(pdev);
290 pci_disable_device(pdev);
294 static int olympic_init(struct net_device *dev)
296 struct olympic_private *olympic_priv;
297 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
299 unsigned int uaa_addr;
301 olympic_priv=netdev_priv(dev);
302 olympic_mmio=olympic_priv->olympic_mmio;
304 printk("%s \n", version);
305 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
307 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
309 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
311 if(time_after(jiffies, t + 40*HZ)) {
312 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
318 /* Needed for cardbus */
319 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
320 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
324 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
325 printk("GPR: %x\n",readw(olympic_mmio+GPR));
326 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
328 /* Aaaahhh, You have got to be real careful setting GPR, the card
329 holds the previous values from flash memory, including autosense
332 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
334 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
335 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
336 if (olympic_priv->olympic_message_level)
337 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
338 } else if (olympic_priv->olympic_ring_speed == 16) {
339 if (olympic_priv->olympic_message_level)
340 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
341 writew(GPR_16MBPS, olympic_mmio+GPR);
342 } else if (olympic_priv->olympic_ring_speed == 4) {
343 if (olympic_priv->olympic_message_level)
344 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
345 writew(0, olympic_mmio+GPR);
348 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
351 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
353 /* Solo has been paused to meet the Cardbus power
354 * specs if the adapter is cardbus. Check to
355 * see its been paused and then restart solo. The
356 * adapter should set the pause bit within 1 second.
359 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
361 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
363 if(time_after(jiffies, t + 2*HZ)) {
364 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
368 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
371 /* start solo init */
372 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
375 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
377 if(time_after(jiffies, t + 15*HZ)) {
378 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
383 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
386 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
389 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
394 printk("init_srb(%p): ",init_srb);
396 printk("%x ",readb(init_srb+i));
400 if(readw(init_srb+6)) {
401 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
405 if (olympic_priv->olympic_message_level) {
406 if ( readb(init_srb +2) & 0x40) {
407 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
409 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
413 uaa_addr=swab16(readw(init_srb+8));
416 printk("UAA resides at %x\n",uaa_addr);
419 writel(uaa_addr,olympic_mmio+LAPA);
420 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
422 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
425 printk("adapter address: %pM\n", dev->dev_addr);
428 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
429 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
435 static int olympic_open(struct net_device *dev)
437 struct olympic_private *olympic_priv=netdev_priv(dev);
438 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
439 unsigned long flags, t;
440 int i, open_finished = 1 ;
443 DECLARE_WAITQUEUE(wait,current) ;
447 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) {
452 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
453 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
456 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
458 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
460 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
462 /* adapter is closed, so SRB is pointed to by LAPWWO */
464 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
465 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
468 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
469 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
470 printk("Before the open command \n");
473 memset_io(init_srb,0,SRB_COMMAND_SIZE);
475 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
476 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
478 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
479 if (olympic_priv->olympic_network_monitor)
480 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
482 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
484 /* Test OR of first 3 bytes as its totally possible for
485 * someone to set the first 2 bytes to be zero, although this
486 * is an error, the first byte must have bit 6 set to 1 */
488 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
489 writeb(olympic_priv->olympic_laa[0],init_srb+12);
490 writeb(olympic_priv->olympic_laa[1],init_srb+13);
491 writeb(olympic_priv->olympic_laa[2],init_srb+14);
492 writeb(olympic_priv->olympic_laa[3],init_srb+15);
493 writeb(olympic_priv->olympic_laa[4],init_srb+16);
494 writeb(olympic_priv->olympic_laa[5],init_srb+17);
495 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
497 writeb(1,init_srb+30);
499 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
500 olympic_priv->srb_queued=1;
502 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
503 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
507 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
508 set_current_state(TASK_INTERRUPTIBLE) ;
510 while(olympic_priv->srb_queued) {
512 if(signal_pending(current)) {
513 printk(KERN_WARNING "%s: Signal received in open.\n",
515 printk(KERN_WARNING "SISR=%x LISR=%x\n",
516 readl(olympic_mmio+SISR),
517 readl(olympic_mmio+LISR));
518 olympic_priv->srb_queued=0;
521 if (time_after(jiffies, t + 10*HZ)) {
522 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
523 olympic_priv->srb_queued=0;
526 set_current_state(TASK_INTERRUPTIBLE) ;
528 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
529 set_current_state(TASK_RUNNING) ;
530 olympic_priv->srb_queued = 0 ;
532 printk("init_srb(%p): ",init_srb);
534 printk("%02x ",readb(init_srb+i));
538 /* If we get the same return response as we set, the interrupt wasn't raised and the open
542 switch (resp = readb(init_srb+2)) {
543 case OLYMPIC_CLEAR_RET_CODE:
544 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
550 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
551 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
556 err = readb(init_srb+7);
558 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
559 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
560 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
562 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
563 open_maj_error[(err & 0xf0) >> 4],
564 open_min_error[(err & 0x0f)]);
569 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
570 dev->name, olympic_priv->olympic_laa);
574 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
578 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
580 if (readb(init_srb+18) & (1<<3))
581 if (olympic_priv->olympic_message_level)
582 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
584 if (readb(init_srb+18) & (1<<1))
585 olympic_priv->olympic_ring_speed = 100 ;
586 else if (readb(init_srb+18) & 1)
587 olympic_priv->olympic_ring_speed = 16 ;
589 olympic_priv->olympic_ring_speed = 4 ;
591 if (olympic_priv->olympic_message_level)
592 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
594 olympic_priv->asb = swab16(readw(init_srb+8));
595 olympic_priv->srb = swab16(readw(init_srb+10));
596 olympic_priv->arb = swab16(readw(init_srb+12));
597 olympic_priv->trb = swab16(readw(init_srb+16));
599 olympic_priv->olympic_receive_options = 0x01 ;
600 olympic_priv->olympic_copy_all_options = 0 ;
604 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
606 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
608 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
612 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
618 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
619 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
620 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
621 olympic_priv->rx_ring_skb[i]=skb;
625 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
629 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
630 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
631 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
632 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
633 writew(i, olympic_mmio+RXDESCQCNT);
635 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
636 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
637 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
638 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
640 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
641 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
643 writew(i, olympic_mmio+RXSTATQCNT);
646 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
647 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
648 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
649 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
650 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
652 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
653 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
654 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
657 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
660 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
661 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
662 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
665 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
669 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
670 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
671 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
673 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
674 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
675 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
676 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
677 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
678 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
680 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
681 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
682 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
683 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
684 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
686 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
687 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
689 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
690 writel(0,olympic_mmio+EISR) ;
691 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
692 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
695 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
696 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
699 if (olympic_priv->olympic_network_monitor) {
703 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
704 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
706 for (i = 0; i < 6; i++)
707 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
708 printk("%s: Node Address: %pM\n", dev->name, addr);
709 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
710 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
715 for (i = 0; i < 6; i++)
716 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
717 printk("%s: NAUN Address: %pM\n", dev->name, addr);
720 netif_start_queue(dev);
724 free_irq(dev->irq, dev);
729 * When we enter the rx routine we do not know how many frames have been
730 * queued on the rx channel. Therefore we start at the next rx status
731 * position and travel around the receive ring until we have completed
734 * This means that we may process the frame before we receive the end
735 * of frame interrupt. This is why we always test the status instead
736 * of blindly processing the next frame.
738 * We also remove the last 4 bytes from the packet as well, these are
739 * just token ring trailer info and upset protocols that don't check
740 * their own length, i.e. SNA.
743 static void olympic_rx(struct net_device *dev)
745 struct olympic_private *olympic_priv=netdev_priv(dev);
746 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
747 struct olympic_rx_status *rx_status;
748 struct olympic_rx_desc *rx_desc ;
749 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
750 struct sk_buff *skb, *skb2;
753 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
755 while (rx_status->status_buffercnt) {
756 u32 l_status_buffercnt;
758 olympic_priv->rx_status_last_received++ ;
759 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
761 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
763 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
764 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
765 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
766 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
769 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
771 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
772 if(l_status_buffercnt & 0xC0000000) {
773 if (l_status_buffercnt & 0x3B000000) {
774 if (olympic_priv->olympic_message_level) {
775 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
776 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
777 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
778 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
779 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
780 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
781 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
782 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
783 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
784 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
786 olympic_priv->rx_ring_last_received += i ;
787 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
788 dev->stats.rx_errors++;
791 if (buffer_cnt == 1) {
792 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
794 skb = dev_alloc_skb(length) ;
798 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
799 dev->stats.rx_dropped++;
800 /* Update counters even though we don't transfer the frame */
801 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
804 /* Optimise based upon number of buffers used.
805 If only one buffer is used we can simply swap the buffers around.
806 If more than one then we must use the new buffer and copy the information
807 first. Ideally all frames would be in a single buffer, this can be tuned by
808 altering the buffer size. If the length of the packet is less than
809 1500 bytes we're going to copy it over anyway to stop packets getting
810 dropped from sockets with buffers smaller than our pkt_buf_sz. */
813 olympic_priv->rx_ring_last_received++ ;
814 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
815 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
817 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
819 pci_unmap_single(olympic_priv->pdev,
820 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
821 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
822 skb_put(skb2,length-4);
823 skb2->protocol = tr_type_trans(skb2,dev);
824 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
825 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
826 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
827 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
828 cpu_to_le32(olympic_priv->pkt_buf_sz);
829 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
832 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
833 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
834 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
835 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
836 skb_put(skb,length - 4),
838 pci_dma_sync_single_for_device(olympic_priv->pdev,
839 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
840 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
841 skb->protocol = tr_type_trans(skb,dev) ;
845 do { /* Walk the buffers */
846 olympic_priv->rx_ring_last_received++ ;
847 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
848 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
849 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
850 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
851 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
852 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
853 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
854 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
855 skb_put(skb, cpy_length),
857 pci_dma_sync_single_for_device(olympic_priv->pdev,
858 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
859 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
861 skb_trim(skb,skb->len-4) ;
862 skb->protocol = tr_type_trans(skb,dev);
865 dev->stats.rx_packets++ ;
866 dev->stats.rx_bytes += length ;
867 } /* if skb == null */
868 } /* If status & 0x3b */
870 } else { /*if buffercnt & 0xC */
871 olympic_priv->rx_ring_last_received += i ;
872 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
875 rx_status->fragmentcnt_framelen = 0 ;
876 rx_status->status_buffercnt = 0 ;
877 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
879 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
884 static void olympic_freemem(struct net_device *dev)
886 struct olympic_private *olympic_priv=netdev_priv(dev);
889 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
890 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
891 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
892 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
894 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
895 pci_unmap_single(olympic_priv->pdev,
896 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
897 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
899 olympic_priv->rx_status_last_received++;
900 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
903 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
904 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
905 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
906 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
908 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
909 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
910 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
911 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
916 static irqreturn_t olympic_interrupt(int irq, void *dev_id)
918 struct net_device *dev= (struct net_device *)dev_id;
919 struct olympic_private *olympic_priv=netdev_priv(dev);
920 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
922 u8 __iomem *adapter_check_area ;
925 * Read sisr but don't reset it yet.
926 * The indication bit may have been set but the interrupt latch
927 * bit may not be set, so we'd lose the interrupt later.
929 sisr=readl(olympic_mmio+SISR) ;
930 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
932 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
934 spin_lock(&olympic_priv->olympic_lock);
936 /* Hotswap gives us this on removal */
937 if (sisr == 0xffffffff) {
938 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
939 spin_unlock(&olympic_priv->olympic_lock) ;
943 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
944 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
946 /* If we ever get this the adapter is seriously dead. Only a reset is going to
947 * bring it back to life. We're talking pci bus errors and such like :( */
948 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
949 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
950 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
951 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
952 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
953 wake_up_interruptible(&olympic_priv->srb_wait);
954 spin_unlock(&olympic_priv->olympic_lock) ;
958 if(sisr & SISR_SRB_REPLY) {
959 if(olympic_priv->srb_queued==1) {
960 wake_up_interruptible(&olympic_priv->srb_wait);
961 } else if (olympic_priv->srb_queued==2) {
962 olympic_srb_bh(dev) ;
964 olympic_priv->srb_queued=0;
965 } /* SISR_SRB_REPLY */
967 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
968 we get all tx completions. */
969 if (sisr & SISR_TX1_EOF) {
970 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
971 olympic_priv->tx_ring_last_status++;
972 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
973 olympic_priv->free_tx_ring_entries++;
974 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
975 dev->stats.tx_packets++ ;
976 pci_unmap_single(olympic_priv->pdev,
977 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
978 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
979 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
980 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
981 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
983 netif_wake_queue(dev);
986 if (sisr & SISR_RX_STATUS) {
988 } /* SISR_RX_STATUS */
990 if (sisr & SISR_ADAPTER_CHECK) {
991 netif_stop_queue(dev);
992 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
993 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
994 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
995 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
996 spin_unlock(&olympic_priv->olympic_lock) ;
998 } /* SISR_ADAPTER_CHECK */
1000 if (sisr & SISR_ASB_FREE) {
1001 /* Wake up anything that is waiting for the asb response */
1002 if (olympic_priv->asb_queued) {
1003 olympic_asb_bh(dev) ;
1005 } /* SISR_ASB_FREE */
1007 if (sisr & SISR_ARB_CMD) {
1008 olympic_arb_cmd(dev) ;
1009 } /* SISR_ARB_CMD */
1011 if (sisr & SISR_TRB_REPLY) {
1012 /* Wake up anything that is waiting for the trb response */
1013 if (olympic_priv->trb_queued) {
1014 wake_up_interruptible(&olympic_priv->trb_wait);
1016 olympic_priv->trb_queued = 0 ;
1017 } /* SISR_TRB_REPLY */
1019 if (sisr & SISR_RX_NOBUF) {
1020 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1021 /var/log/messages. */
1022 } /* SISR_RX_NOBUF */
1024 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1025 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1026 } /* One if the interrupts we want */
1027 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1029 spin_unlock(&olympic_priv->olympic_lock) ;
1033 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1035 struct olympic_private *olympic_priv=netdev_priv(dev);
1036 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1037 unsigned long flags ;
1039 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1041 netif_stop_queue(dev);
1043 if(olympic_priv->free_tx_ring_entries) {
1044 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1045 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1046 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1047 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1048 olympic_priv->free_tx_ring_entries--;
1050 olympic_priv->tx_ring_free++;
1051 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1052 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1053 netif_wake_queue(dev);
1054 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1057 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1064 static int olympic_close(struct net_device *dev)
1066 struct olympic_private *olympic_priv=netdev_priv(dev);
1067 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1068 unsigned long t,flags;
1070 DECLARE_WAITQUEUE(wait,current) ;
1072 netif_stop_queue(dev);
1074 writel(olympic_priv->srb,olympic_mmio+LAPA);
1075 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1077 writeb(SRB_CLOSE_ADAPTER,srb+0);
1079 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1081 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1082 set_current_state(TASK_INTERRUPTIBLE) ;
1084 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1085 olympic_priv->srb_queued=1;
1087 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1088 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1090 while(olympic_priv->srb_queued) {
1092 t = schedule_timeout_interruptible(60*HZ);
1094 if(signal_pending(current)) {
1095 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1096 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1097 olympic_priv->srb_queued=0;
1102 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1104 olympic_priv->srb_queued=0;
1106 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1108 olympic_priv->rx_status_last_received++;
1109 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1111 olympic_freemem(dev) ;
1113 /* reset tx/rx fifo's and busmaster logic */
1115 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1117 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1122 printk("srb(%p): ",srb);
1124 printk("%x ",readb(srb+i));
1128 free_irq(dev->irq,dev);
1134 static void olympic_set_rx_mode(struct net_device *dev)
1136 struct olympic_private *olympic_priv = netdev_priv(dev);
1137 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 struct dev_mc_list *dmi ;
1141 unsigned char dev_mc_address[4] ;
1144 writel(olympic_priv->srb,olympic_mmio+LAPA);
1145 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1146 options = olympic_priv->olympic_copy_all_options;
1148 if (dev->flags&IFF_PROMISC)
1153 /* Only issue the srb if there is a change in options */
1155 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1157 /* Now to issue the srb command to alter the copy.all.options */
1159 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1161 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1163 writeb(olympic_priv->olympic_receive_options,srb+4);
1164 writeb(options,srb+5);
1166 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1168 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1170 olympic_priv->olympic_copy_all_options = options ;
1175 /* Set the functional addresses we need for multicast */
1177 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1179 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1180 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1181 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1182 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1183 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1186 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1188 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1192 writeb(dev_mc_address[0],srb+6);
1193 writeb(dev_mc_address[1],srb+7);
1194 writeb(dev_mc_address[2],srb+8);
1195 writeb(dev_mc_address[3],srb+9);
1197 olympic_priv->srb_queued = 2 ;
1198 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1202 static void olympic_srb_bh(struct net_device *dev)
1204 struct olympic_private *olympic_priv = netdev_priv(dev);
1205 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1208 writel(olympic_priv->srb,olympic_mmio+LAPA);
1209 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1211 switch (readb(srb)) {
1213 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1214 * At some point we should do something if we get an error, such as
1215 * resetting the IFF_PROMISC flag in dev
1218 case SRB_MODIFY_RECEIVE_OPTIONS:
1219 switch (readb(srb+2)) {
1221 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1224 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1227 if (olympic_priv->olympic_message_level)
1228 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1230 } /* switch srb[2] */
1233 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1236 case SRB_SET_GROUP_ADDRESS:
1237 switch (readb(srb+2)) {
1241 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1244 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1247 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1249 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1250 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1253 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1257 } /* switch srb[2] */
1260 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1263 case SRB_RESET_GROUP_ADDRESS:
1264 switch (readb(srb+2)) {
1268 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1271 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 case 0x39: /* Must deal with this if individual multicast addresses used */
1274 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1278 } /* switch srb[2] */
1282 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1285 case SRB_SET_FUNC_ADDRESS:
1286 switch (readb(srb+2)) {
1288 if (olympic_priv->olympic_message_level)
1289 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1292 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1295 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1299 } /* switch srb[2] */
1302 /* SRB_READ_LOG - Read and reset the adapter error counters
1306 switch (readb(srb+2)) {
1308 if (olympic_priv->olympic_message_level)
1309 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1312 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1315 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1318 } /* switch srb[2] */
1321 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1323 case SRB_READ_SR_COUNTERS:
1324 switch (readb(srb+2)) {
1326 if (olympic_priv->olympic_message_level)
1327 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1330 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1333 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1337 } /* switch srb[2] */
1341 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1343 } /* switch srb[0] */
1347 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1349 struct sockaddr *saddr = addr ;
1350 struct olympic_private *olympic_priv = netdev_priv(dev);
1352 if (netif_running(dev)) {
1353 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1357 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1359 if (olympic_priv->olympic_message_level) {
1360 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1361 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1362 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1363 olympic_priv->olympic_laa[5]);
1369 static void olympic_arb_cmd(struct net_device *dev)
1371 struct olympic_private *olympic_priv = netdev_priv(dev);
1372 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1373 u8 __iomem *arb_block, *asb_block, *srb ;
1375 u16 frame_len, buffer_len ;
1376 struct sk_buff *mac_frame ;
1377 u8 __iomem *buf_ptr ;
1378 u8 __iomem *frame_data ;
1380 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1384 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1385 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1386 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1388 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1390 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1391 frame_len = swab16(readw(arb_block + 10)) ;
1393 buff_off = swab16(readw(arb_block + 6)) ;
1395 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1400 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1402 for (i=0 ; i < 14 ; i++) {
1403 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1406 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1409 mac_frame = dev_alloc_skb(frame_len) ;
1411 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1415 /* Walk the buffer chain, creating the frame */
1418 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1419 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1420 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1421 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1422 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1424 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1426 if (olympic_priv->olympic_network_monitor) {
1427 struct trh_hdr *mac_hdr;
1428 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
1429 mac_hdr = tr_hdr(mac_frame);
1430 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1431 dev->name, mac_hdr->daddr);
1432 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1433 dev->name, mac_hdr->saddr);
1435 netif_rx(mac_frame);
1438 /* Now tell the card we have dealt with the received frame */
1440 /* Set LISR Bit 1 */
1441 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1443 /* Is the ASB free ? */
1445 if (readb(asb_block + 2) != 0xff) {
1446 olympic_priv->asb_queued = 1 ;
1447 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1449 /* Drop out and wait for the bottom half to be run */
1452 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1453 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1454 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1455 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1457 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1459 olympic_priv->asb_queued = 2 ;
1463 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1464 lan_status = swab16(readw(arb_block+6));
1465 fdx_prot_error = readb(arb_block+8) ;
1467 /* Issue ARB Free */
1468 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1470 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1472 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1473 if (lan_status_diff & LSC_LWF)
1474 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1475 if (lan_status_diff & LSC_ARW)
1476 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1477 if (lan_status_diff & LSC_FPE)
1478 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1479 if (lan_status_diff & LSC_RR)
1480 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1482 /* Adapter has been closed by the hardware */
1484 /* reset tx/rx fifo's and busmaster logic */
1486 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1488 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1489 netif_stop_queue(dev);
1490 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1491 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1492 } /* If serious error */
1494 if (olympic_priv->olympic_message_level) {
1495 if (lan_status_diff & LSC_SIG_LOSS)
1496 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1497 if (lan_status_diff & LSC_HARD_ERR)
1498 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1499 if (lan_status_diff & LSC_SOFT_ERR)
1500 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1501 if (lan_status_diff & LSC_TRAN_BCN)
1502 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1503 if (lan_status_diff & LSC_SS)
1504 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1505 if (lan_status_diff & LSC_RING_REC)
1506 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1507 if (lan_status_diff & LSC_FDX_MODE)
1508 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1511 if (lan_status_diff & LSC_CO) {
1513 if (olympic_priv->olympic_message_level)
1514 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1516 /* Issue READ.LOG command */
1518 writeb(SRB_READ_LOG, srb);
1520 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1525 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1527 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1531 if (lan_status_diff & LSC_SR_CO) {
1533 if (olympic_priv->olympic_message_level)
1534 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1536 /* Issue a READ.SR.COUNTERS */
1538 writeb(SRB_READ_SR_COUNTERS,srb);
1540 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1543 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1545 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1549 olympic_priv->olympic_lan_status = lan_status ;
1551 } /* Lan.change.status */
1553 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1556 static void olympic_asb_bh(struct net_device *dev)
1558 struct olympic_private *olympic_priv = netdev_priv(dev);
1559 u8 __iomem *arb_block, *asb_block ;
1561 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1562 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1564 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1566 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1567 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1568 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1569 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1571 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1572 olympic_priv->asb_queued = 2 ;
1577 if (olympic_priv->asb_queued == 2) {
1578 switch (readb(asb_block+2)) {
1580 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1583 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1586 /* Valid response, everything should be ok again */
1589 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1593 olympic_priv->asb_queued = 0 ;
1596 static int olympic_change_mtu(struct net_device *dev, int mtu)
1598 struct olympic_private *olympic_priv = netdev_priv(dev);
1601 if (olympic_priv->olympic_ring_speed == 4)
1612 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1617 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1619 struct net_device *dev = (struct net_device *)data ;
1620 struct olympic_private *olympic_priv=netdev_priv(dev);
1621 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1622 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1631 size = sprintf(buffer,
1632 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1633 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1636 for (i = 0 ; i < 6 ; i++)
1637 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1639 size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1641 dev->dev_addr, addr,
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1643 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1644 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1645 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1647 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1649 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1652 for (i = 0 ; i < 6 ; i++)
1653 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1654 for (i = 0 ; i < 6 ; i++)
1655 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1657 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1660 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1661 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1662 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1664 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1665 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1666 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1668 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1671 for (i = 0 ; i < 6 ; i++)
1672 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1673 size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1678 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1679 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1680 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1682 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1685 for (i = 0 ; i < 6 ; i++)
1686 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1687 size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1689 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1690 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1693 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1694 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1695 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1703 *start=buffer+(offset-begin); /* Start of wanted data */
1704 len-=(offset-begin); /* Start slop */
1706 len=length; /* Ending slop */
1710 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1712 struct net_device *dev = pci_get_drvdata(pdev) ;
1713 struct olympic_private *olympic_priv=netdev_priv(dev);
1715 if (olympic_priv->olympic_network_monitor) {
1716 char proc_name[20] ;
1717 strcpy(proc_name,"olympic_") ;
1718 strcat(proc_name,dev->name) ;
1719 remove_proc_entry(proc_name,init_net.proc_net);
1721 unregister_netdev(dev) ;
1722 iounmap(olympic_priv->olympic_mmio) ;
1723 iounmap(olympic_priv->olympic_lap) ;
1724 pci_release_regions(pdev) ;
1725 pci_set_drvdata(pdev,NULL) ;
1729 static struct pci_driver olympic_driver = {
1731 .id_table = olympic_pci_tbl,
1732 .probe = olympic_probe,
1733 .remove = __devexit_p(olympic_remove_one),
1736 static int __init olympic_pci_init(void)
1738 return pci_register_driver(&olympic_driver) ;
1741 static void __exit olympic_pci_cleanup(void)
1743 pci_unregister_driver(&olympic_driver) ;
1747 module_init(olympic_pci_init) ;
1748 module_exit(olympic_pci_cleanup) ;
1750 MODULE_LICENSE("GPL");