1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/pci.h>
85 #include <linux/types.h>
86 #include <linux/bitops.h>
89 #include <asm/pgtable.h>
92 #include <asm/cache.h>
93 #include <asm/parisc-device.h>
95 #define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
100 #define DEB_INIT 0x0001
101 #define DEB_PROBE 0x0002
102 #define DEB_SERIOUS 0x0004
103 #define DEB_ERRORS 0x0008
104 #define DEB_MULTI 0x0010
105 #define DEB_TDR 0x0020
106 #define DEB_OPEN 0x0040
107 #define DEB_RESET 0x0080
108 #define DEB_ADDCMD 0x0100
109 #define DEB_STATUS 0x0200
110 #define DEB_STARTTX 0x0400
111 #define DEB_RXADDR 0x0800
112 #define DEB_TXADDR 0x1000
113 #define DEB_RXFRAME 0x2000
114 #define DEB_INTS 0x4000
115 #define DEB_STRUCT 0x8000
116 #define DEB_ANY 0xffff
119 #define DEB(x,y) if (i596_debug & (x)) { y; }
122 #define CHECK_WBACK(addr,len) \
123 do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
125 #define CHECK_INV(addr,len) \
126 do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
128 #define CHECK_WBACK_INV(addr,len) \
129 do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
132 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
133 #define PA_CPU_PORT_L_ACCESS 4
134 #define PA_CHANNEL_ATTENTION 8
138 * Define various macros for Channel Attention, word swapping etc., dependent
139 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
143 #define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
144 #define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
145 #define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146 #define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147 #define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148 #define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149 #define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150 #define ISCP_BUSY 0x00010000
151 #define MACH_IS_APRICOT 0
153 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
154 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
155 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
156 #define WSWAPscb(x) ((struct i596_scb *)(x))
157 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
158 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
159 #define WSWAPchar(x) ((char *)(x))
160 #define ISCP_BUSY 0x0001
161 #define MACH_IS_APRICOT 1
165 * The MPU_PORT command allows direct access to the 82596. With PORT access
166 * the following commands are available (p5-18). The 32-bit port command
167 * must be word-swapped with the most significant word written first.
168 * This only applies to VME boards.
170 #define PORT_RESET 0x00 /* reset 82596 */
171 #define PORT_SELFTEST 0x01 /* selftest */
172 #define PORT_ALTSCP 0x02 /* alternate SCB address */
173 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
175 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
177 MODULE_AUTHOR("Richard Hirst");
178 MODULE_DESCRIPTION("i82596 driver");
179 MODULE_LICENSE("GPL");
180 MODULE_PARM(i596_debug, "i");
181 MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
183 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
184 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
186 static int rx_copybreak = 100;
188 #define MAX_DRIVERS 4 /* max count of drivers */
190 #define PKT_BUF_SZ 1536
191 #define MAX_MC_CNT 64
193 #define I596_NULL ((u32)0xffffffff)
195 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
196 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
197 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
199 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
202 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
203 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
206 #define STAT_C 0x8000 /* Set to 0 after execution */
207 #define STAT_B 0x4000 /* Command being executed */
208 #define STAT_OK 0x2000 /* Command executed ok */
209 #define STAT_A 0x1000 /* Command aborted */
211 #define CUC_START 0x0100
212 #define CUC_RESUME 0x0200
213 #define CUC_SUSPEND 0x0300
214 #define CUC_ABORT 0x0400
215 #define RX_START 0x0010
216 #define RX_RESUME 0x0020
217 #define RX_SUSPEND 0x0030
218 #define RX_ABORT 0x0040
222 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
226 unsigned short porthi;
227 unsigned short portlo;
232 #define SIZE_MASK 0x3fff
239 u32 cache_pad[5]; /* Total 32 bytes... */
242 /* The command structure has two 'next' pointers; v_next is the address of
243 * the next command as seen by the CPU, b_next is the address of the next
244 * command as seen by the 82596. The b_next pointer, as used by the 82596
245 * always references the status field of the next command, rather than the
246 * v_next field, because the 82596 is unaware of v_next. It may seem more
247 * logical to put v_next at the end of the structure, but we cannot do that
248 * because the 82596 expects other fields to be there, depending on command
253 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
254 unsigned short status;
255 unsigned short command;
256 dma_addr_t b_next; /* Address from i596 viewpoint */
264 struct sk_buff *skb; /* So we can free it after tx */
267 u32 cache_pad[6]; /* Total 64 bytes... */
269 u32 cache_pad[1]; /* Total 32 bytes... */
275 unsigned short status;
282 char mc_addrs[MAX_MC_CNT*6];
292 char i596_config[16];
298 dma_addr_t b_next; /* Address from i596 viewpoint */
300 unsigned short count;
302 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
303 struct i596_rfd *v_prev;
305 u32 cache_pad[2]; /* Total 32 bytes... */
311 unsigned short count;
312 unsigned short zero1;
314 dma_addr_t b_data; /* Address from i596 viewpoint */
316 unsigned short zero2;
319 struct i596_rbd *v_next;
320 dma_addr_t b_addr; /* This rbd addr from i596 view */
321 unsigned char *v_data; /* Address from CPUs viewpoint */
322 /* Total 32 bytes... */
328 /* These values as chosen so struct i596_private fits in one page... */
330 #define TX_RING_SIZE 32
331 #define RX_RING_SIZE 16
334 unsigned short status;
335 unsigned short command;
345 unsigned short t_off;
359 struct i596_private {
360 volatile struct i596_scp scp __attribute__((aligned(32)));
361 volatile struct i596_iscp iscp __attribute__((aligned(32)));
362 volatile struct i596_scb scb __attribute__((aligned(32)));
363 struct sa_cmd sa_cmd __attribute__((aligned(32)));
364 struct cf_cmd cf_cmd __attribute__((aligned(32)));
365 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
366 struct mc_cmd mc_cmd __attribute__((aligned(32)));
367 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
368 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
369 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
370 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
373 struct i596_rfd *rfd_head;
374 struct i596_rbd *rbd_head;
375 struct i596_cmd *cmd_tail;
376 struct i596_cmd *cmd_head;
379 struct net_device_stats stats;
387 static char init_setup[] =
389 0x8E, /* length, prefetch on */
390 0xC8, /* fifo to 8, monitor off */
391 0x80, /* don't save bad frames */
392 0x2E, /* No source address insertion, 8 byte preamble */
393 0x00, /* priority and backoff defaults */
394 0x60, /* interframe spacing */
395 0x00, /* slot time LSB */
396 0xf2, /* slot time and retries */
397 0x00, /* promiscuous mode */
398 0x00, /* collision detect */
399 0x40, /* minimum frame length */
402 0x7f /* *multi IA */ };
404 static int i596_open(struct net_device *dev);
405 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
406 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
407 static int i596_close(struct net_device *dev);
408 static struct net_device_stats *i596_get_stats(struct net_device *dev);
409 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
410 static void i596_tx_timeout (struct net_device *dev);
411 static void print_eth(unsigned char *buf, char *str);
412 static void set_multicast_list(struct net_device *dev);
414 static int rx_ring_size = RX_RING_SIZE;
415 static int ticks_limit = 100;
416 static int max_cmd_backlog = TX_RING_SIZE-1;
419 static inline void CA(struct net_device *dev)
421 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
425 static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
427 struct i596_private *lp = dev->priv;
429 u32 v = (u32) (c) | (u32) (x);
432 if (lp->options & OPT_SWAP_PORT) {
440 gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
442 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
446 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
448 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
449 while (--delcnt && lp->iscp.stat) {
451 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
454 printk("%s: %s, iscp.stat %04x, didn't clear\n",
455 dev->name, str, lp->iscp.stat);
463 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
465 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
466 while (--delcnt && lp->scb.command) {
468 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
471 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
472 dev->name, str, lp->scb.status, lp->scb.command);
480 static void i596_display_data(struct net_device *dev)
482 struct i596_private *lp = dev->priv;
483 struct i596_cmd *cmd;
484 struct i596_rfd *rfd;
485 struct i596_rbd *rbd;
487 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
488 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
489 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
490 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
491 printk("scb at %p, scb.status = %04x, .command = %04x,"
492 " .cmd = %08x, .rfd = %08x\n",
493 &lp->scb, lp->scb.status, lp->scb.command,
494 lp->scb.cmd, lp->scb.rfd);
495 printk(" errors: crc %x, align %x, resource %x,"
496 " over %x, rcvdt %x, short %x\n",
497 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
498 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
500 while (cmd != NULL) {
501 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
502 cmd, cmd->status, cmd->command, cmd->b_next);
506 printk("rfd_head = %p\n", rfd);
508 printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
510 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
513 } while (rfd != lp->rfd_head);
515 printk("rbd_head = %p\n", rbd);
517 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
518 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
520 } while (rbd != lp->rbd_head);
521 CHECK_INV(lp, sizeof(struct i596_private));
525 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
526 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
528 struct net_device *dev = dev_id;
529 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
533 printk("%s: Error interrupt\n", dev->name);
534 i596_display_data(dev);
538 #define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
540 static inline void init_rx_bufs(struct net_device *dev)
542 struct i596_private *lp = dev->priv;
544 struct i596_rfd *rfd;
545 struct i596_rbd *rbd;
547 /* First build the Receive Buffer Descriptor List */
549 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
551 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
554 panic("%s: alloc_skb() failed", __FILE__);
556 dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
563 rbd->v_data = skb->tail;
564 rbd->b_data = WSWAPchar(dma_addr);
565 rbd->size = PKT_BUF_SZ;
567 lp->rbd_head = lp->rbds;
568 rbd = lp->rbds + rx_ring_size - 1;
569 rbd->v_next = lp->rbds;
570 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
572 /* Now build the Receive Frame Descriptor List */
574 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
575 rfd->rbd = I596_NULL;
578 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
581 lp->rfd_head = lp->rfds;
582 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
584 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
585 rfd->v_prev = lp->rfds + rx_ring_size - 1;
586 rfd = lp->rfds + rx_ring_size - 1;
587 rfd->v_next = lp->rfds;
588 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
589 rfd->cmd = CMD_EOL|CMD_FLEX;
591 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
594 static inline void remove_rx_bufs(struct net_device *dev)
596 struct i596_private *lp = dev->priv;
597 struct i596_rbd *rbd;
600 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
601 if (rbd->skb == NULL)
603 dma_unmap_single(lp->dev,
604 (dma_addr_t)WSWAPchar(rbd->b_data),
605 PKT_BUF_SZ, DMA_FROM_DEVICE);
606 dev_kfree_skb(rbd->skb);
611 static void rebuild_rx_bufs(struct net_device *dev)
613 struct i596_private *lp = dev->priv;
616 /* Ensure rx frame/buffer descriptors are tidy */
618 for (i = 0; i < rx_ring_size; i++) {
619 lp->rfds[i].rbd = I596_NULL;
620 lp->rfds[i].cmd = CMD_FLEX;
622 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
623 lp->rfd_head = lp->rfds;
624 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
625 lp->rbd_head = lp->rbds;
626 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
628 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
632 static int init_i596_mem(struct net_device *dev)
634 struct i596_private *lp = dev->priv;
637 disable_irq(dev->irq); /* disable IRQs from LAN */
639 printk("RESET 82596 port: %p (with IRQ %d disabled)\n",
640 (void*)(dev->base_addr + PA_I82596_RESET),
643 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
644 udelay(100); /* Wait 100us - seems to help */
646 /* change the scp address */
648 lp->last_cmd = jiffies;
651 lp->scp.sysbus = 0x0000006c;
652 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
653 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
654 lp->iscp.stat = ISCP_BUSY;
658 lp->scb.cmd = I596_NULL;
660 DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
662 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
663 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
665 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
669 if (wait_istat(dev, lp, 1000, "initialization timed out"))
671 DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
673 /* Ensure rx frame/buffer descriptors are tidy */
674 rebuild_rx_bufs(dev);
677 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
679 enable_irq(dev->irq); /* enable IRQs from LAN */
681 DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
682 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
683 lp->cf_cmd.cmd.command = CmdConfigure;
684 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
685 i596_add_cmd(dev, &lp->cf_cmd.cmd);
687 DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
688 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
689 lp->sa_cmd.cmd.command = CmdSASetup;
690 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
691 i596_add_cmd(dev, &lp->sa_cmd.cmd);
693 DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
694 lp->tdr_cmd.cmd.command = CmdTDR;
695 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
696 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
698 spin_lock_irqsave (&lp->lock, flags);
700 if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
701 spin_unlock_irqrestore (&lp->lock, flags);
704 DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
705 lp->scb.command = RX_START;
706 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
707 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
711 spin_unlock_irqrestore (&lp->lock, flags);
713 if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
715 DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
720 printk("%s: Failed to initialise 82596\n", dev->name);
721 MPU_PORT(dev, PORT_RESET, 0);
726 static inline int i596_rx(struct net_device *dev)
728 struct i596_private *lp = dev->priv;
729 struct i596_rfd *rfd;
730 struct i596_rbd *rbd;
733 DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
734 lp->rfd_head, lp->rbd_head));
737 rfd = lp->rfd_head; /* Ref next frame to check */
739 CHECK_INV(rfd, sizeof(struct i596_rfd));
740 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
741 if (rfd->rbd == I596_NULL)
743 else if (rfd->rbd == lp->rbd_head->b_addr) {
745 CHECK_INV(rbd, sizeof(struct i596_rbd));
748 printk("%s: rbd chain broken!\n", dev->name);
752 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
753 rfd, rfd->rbd, rfd->stat));
755 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
757 int pkt_len = rbd->count & 0x3fff;
758 struct sk_buff *skb = rbd->skb;
761 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
764 /* Check if the packet is long enough to just accept
765 * without copying to a properly sized skbuff.
768 if (pkt_len > rx_copybreak) {
769 struct sk_buff *newskb;
772 dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
773 /* Get fresh skbuff to replace filled one. */
774 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
775 if (newskb == NULL) {
776 skb = NULL; /* drop pkt */
779 skb_reserve(newskb, 2);
781 /* Pass up the skb already on the Rx ring. */
782 skb_put(skb, pkt_len);
786 dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
787 rbd->v_data = newskb->tail;
788 rbd->b_data = WSWAPchar(dma_addr);
789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
792 skb = dev_alloc_skb(pkt_len + 2);
795 /* XXX tulip.c can defer packets here!! */
796 printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
797 lp->stats.rx_dropped++;
802 /* 16 byte align the data fields */
803 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
805 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
806 dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
809 skb->protocol=eth_type_trans(skb,dev);
811 dev->last_rx = jiffies;
812 lp->stats.rx_packets++;
813 lp->stats.rx_bytes+=pkt_len;
817 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
818 dev->name, rfd->stat));
819 lp->stats.rx_errors++;
820 if ((rfd->stat) & 0x0001)
821 lp->stats.collisions++;
822 if ((rfd->stat) & 0x0080)
823 lp->stats.rx_length_errors++;
824 if ((rfd->stat) & 0x0100)
825 lp->stats.rx_over_errors++;
826 if ((rfd->stat) & 0x0200)
827 lp->stats.rx_fifo_errors++;
828 if ((rfd->stat) & 0x0400)
829 lp->stats.rx_frame_errors++;
830 if ((rfd->stat) & 0x0800)
831 lp->stats.rx_crc_errors++;
832 if ((rfd->stat) & 0x1000)
833 lp->stats.rx_length_errors++;
836 /* Clear the buffer descriptor count and EOF + F flags */
838 if (rbd != NULL && (rbd->count & 0x4000)) {
840 lp->rbd_head = rbd->v_next;
841 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
844 /* Tidy the frame descriptor, marking it as end of list */
846 rfd->rbd = I596_NULL;
848 rfd->cmd = CMD_EOL|CMD_FLEX;
851 /* Remove end-of-list from old end descriptor */
853 rfd->v_prev->cmd = CMD_FLEX;
855 /* Update record of next frame descriptor to process */
857 lp->scb.rfd = rfd->b_next;
858 lp->rfd_head = rfd->v_next;
859 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
860 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
862 CHECK_INV(rfd, sizeof(struct i596_rfd));
865 DEB(DEB_RXFRAME, printk("frames %d\n", frames));
871 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
873 struct i596_cmd *ptr;
875 while (lp->cmd_head != NULL) {
877 lp->cmd_head = ptr->v_next;
880 switch ((ptr->command) & 0x7) {
883 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
884 struct sk_buff *skb = tx_cmd->skb;
885 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
889 lp->stats.tx_errors++;
890 lp->stats.tx_aborted_errors++;
893 ptr->b_next = I596_NULL;
894 tx_cmd->cmd.command = 0; /* Mark as free */
899 ptr->b_next = I596_NULL;
901 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
904 wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
905 lp->scb.cmd = I596_NULL;
906 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
910 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
914 DEB(DEB_RESET, printk("i596_reset\n"));
916 spin_lock_irqsave (&lp->lock, flags);
918 wait_cmd(dev, lp, 100, "i596_reset timed out");
920 netif_stop_queue(dev);
922 /* FIXME: this command might cause an lpmc */
923 lp->scb.command = CUC_ABORT | RX_ABORT;
924 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
927 /* wait for shutdown */
928 wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
929 spin_unlock_irqrestore (&lp->lock, flags);
931 i596_cleanup_cmd(dev,lp);
934 netif_start_queue(dev);
939 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
941 struct i596_private *lp = dev->priv;
944 DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
947 cmd->command |= (CMD_EOL | CMD_INTR);
949 cmd->b_next = I596_NULL;
950 CHECK_WBACK(cmd, sizeof(struct i596_cmd));
952 spin_lock_irqsave (&lp->lock, flags);
954 if (lp->cmd_head != NULL) {
955 lp->cmd_tail->v_next = cmd;
956 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
957 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
960 wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
961 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
962 lp->scb.command = CUC_START;
963 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
969 spin_unlock_irqrestore (&lp->lock, flags);
971 if (lp->cmd_backlog > max_cmd_backlog) {
972 unsigned long tickssofar = jiffies - lp->last_cmd;
974 if (tickssofar < ticks_limit)
977 printk("%s: command unit timed out, status resetting.\n", dev->name);
985 /* this function makes a perfectly adequate probe... but we have a
987 static int i596_test(struct net_device *dev)
989 struct i596_private *lp = dev->priv;
993 tint = (volatile int *)(&(lp->scp));
994 data = virt_to_dma(lp,tint);
997 CHECK_WBACK(tint,PAGE_SIZE);
999 MPU_PORT(dev, 1, data);
1001 for(data = 1000000; data; data--) {
1002 CHECK_INV(tint,PAGE_SIZE);
1008 printk("i596_test result %d\n", tint[1]);
1014 static int i596_open(struct net_device *dev)
1016 DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1018 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1019 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1025 if (init_i596_mem(dev)) {
1026 printk("%s: Failed to init memory\n", dev->name);
1027 goto out_remove_rx_bufs;
1030 netif_start_queue(dev);
1035 remove_rx_bufs(dev);
1036 free_irq(dev->irq, dev);
1041 static void i596_tx_timeout (struct net_device *dev)
1043 struct i596_private *lp = dev->priv;
1045 /* Transmitter timeout, serious problems. */
1046 DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
1049 lp->stats.tx_errors++;
1051 /* Try to restart the adaptor */
1052 if (lp->last_restart == lp->stats.tx_packets) {
1053 DEB(DEB_ERRORS, printk("Resetting board.\n"));
1054 /* Shutdown and restart */
1055 i596_reset (dev, lp);
1057 /* Issue a channel attention signal */
1058 DEB(DEB_ERRORS, printk("Kicking board.\n"));
1059 lp->scb.command = CUC_START | RX_START;
1060 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1062 lp->last_restart = lp->stats.tx_packets;
1065 dev->trans_start = jiffies;
1066 netif_wake_queue (dev);
1070 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1072 struct i596_private *lp = dev->priv;
1073 struct tx_cmd *tx_cmd;
1074 struct i596_tbd *tbd;
1075 short length = skb->len;
1076 dev->trans_start = jiffies;
1078 DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1079 skb->len, skb->data));
1081 if (length < ETH_ZLEN) {
1082 skb = skb_padto(skb, ETH_ZLEN);
1088 netif_stop_queue(dev);
1090 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1091 tbd = lp->tbds + lp->next_tx_cmd;
1093 if (tx_cmd->cmd.command) {
1094 DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
1096 lp->stats.tx_dropped++;
1100 if (++lp->next_tx_cmd == TX_RING_SIZE)
1101 lp->next_tx_cmd = 0;
1102 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1103 tbd->next = I596_NULL;
1105 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1111 tbd->size = EOF | length;
1113 tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
1115 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1117 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1118 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1119 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1120 i596_add_cmd(dev, &tx_cmd->cmd);
1122 lp->stats.tx_packets++;
1123 lp->stats.tx_bytes += length;
1126 netif_start_queue(dev);
1131 static void print_eth(unsigned char *add, char *str)
1135 printk("i596 0x%p, ", add);
1136 for (i = 0; i < 6; i++)
1137 printk(" %02X", add[i + 6]);
1139 for (i = 0; i < 6; i++)
1140 printk(" %02X", add[i]);
1141 printk(" %02X%02X, %s\n", add[12], add[13], str);
1145 #define LAN_PROM_ADDR 0xF0810000
1147 static int __devinit i82596_probe(struct net_device *dev,
1148 struct device *gen_dev)
1151 struct i596_private *lp;
1153 dma_addr_t dma_addr;
1155 /* This lot is ensure things have been cache line aligned. */
1156 if (sizeof(struct i596_rfd) != 32) {
1157 printk("82596: sizeof(struct i596_rfd) = %d\n",
1158 (int)sizeof(struct i596_rfd));
1161 if ((sizeof(struct i596_rbd) % 32) != 0) {
1162 printk("82596: sizeof(struct i596_rbd) = %d\n",
1163 (int)sizeof(struct i596_rbd));
1166 if ((sizeof(struct tx_cmd) % 32) != 0) {
1167 printk("82596: sizeof(struct tx_cmd) = %d\n",
1168 (int)sizeof(struct tx_cmd));
1171 if (sizeof(struct i596_tbd) != 32) {
1172 printk("82596: sizeof(struct i596_tbd) = %d\n",
1173 (int)sizeof(struct i596_tbd));
1177 if (sizeof(struct i596_private) > 4096) {
1178 printk("82596: sizeof(struct i596_private) = %d\n",
1179 (int)sizeof(struct i596_private));
1184 if (!dev->base_addr || !dev->irq)
1187 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1188 for (i=0; i < 6; i++) {
1189 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1191 printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
1194 dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
1195 sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
1196 if (!dev->mem_start) {
1197 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1201 for (i = 0; i < 6; i++)
1202 dev->dev_addr[i] = eth_addr[i];
1204 /* The 82596-specific entries in the device structure. */
1205 dev->open = i596_open;
1206 dev->stop = i596_close;
1207 dev->hard_start_xmit = i596_start_xmit;
1208 dev->get_stats = i596_get_stats;
1209 dev->set_multicast_list = set_multicast_list;
1210 dev->tx_timeout = i596_tx_timeout;
1211 dev->watchdog_timeo = TX_TIMEOUT;
1213 dev->priv = (void *)(dev->mem_start);
1216 memset(lp, 0, sizeof(struct i596_private));
1218 lp->scb.command = 0;
1219 lp->scb.cmd = I596_NULL;
1220 lp->scb.rfd = I596_NULL;
1221 spin_lock_init(&lp->lock);
1222 lp->dma_addr = dma_addr;
1225 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1227 i = register_netdev(dev);
1230 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1231 (void *)dev->mem_start, lp->dma_addr);
1235 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1236 for (i = 0; i < 6; i++)
1237 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1238 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1239 DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
1240 dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
1246 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1248 struct net_device *dev = dev_id;
1249 struct i596_private *lp;
1250 unsigned short status, ack_cmd = 0;
1253 printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
1259 spin_lock (&lp->lock);
1261 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1262 status = lp->scb.status;
1264 DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1265 dev->name, irq, status));
1267 ack_cmd = status & 0xf000;
1270 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1271 spin_unlock (&lp->lock);
1275 if ((status & 0x8000) || (status & 0x2000)) {
1276 struct i596_cmd *ptr;
1278 if ((status & 0x8000))
1279 DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
1280 if ((status & 0x2000))
1281 DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1283 while (lp->cmd_head != NULL) {
1284 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1285 if (!(lp->cmd_head->status & STAT_C))
1290 DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
1291 lp->cmd_head->status, lp->cmd_head->command));
1292 lp->cmd_head = ptr->v_next;
1295 switch ((ptr->command) & 0x7) {
1298 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1299 struct sk_buff *skb = tx_cmd->skb;
1301 if ((ptr->status) & STAT_OK) {
1302 DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
1304 lp->stats.tx_errors++;
1305 if ((ptr->status) & 0x0020)
1306 lp->stats.collisions++;
1307 if (!((ptr->status) & 0x0040))
1308 lp->stats.tx_heartbeat_errors++;
1309 if ((ptr->status) & 0x0400)
1310 lp->stats.tx_carrier_errors++;
1311 if ((ptr->status) & 0x0800)
1312 lp->stats.collisions++;
1313 if ((ptr->status) & 0x1000)
1314 lp->stats.tx_aborted_errors++;
1316 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
1317 dev_kfree_skb_irq(skb);
1319 tx_cmd->cmd.command = 0; /* Mark free */
1324 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1326 if (status & 0x8000) {
1327 DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
1329 if (status & 0x4000)
1330 printk("%s: Transceiver problem.\n", dev->name);
1331 if (status & 0x2000)
1332 printk("%s: Termination problem.\n", dev->name);
1333 if (status & 0x1000)
1334 printk("%s: Short circuit.\n", dev->name);
1336 DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1341 /* Zap command so set_multicast_list() knows it is free */
1346 ptr->b_next = I596_NULL;
1347 CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1348 lp->last_cmd = jiffies;
1351 /* This mess is arranging that only the last of any outstanding
1352 * commands has the interrupt bit set. Should probably really
1353 * only add to the cmd queue when the CU is stopped.
1356 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1357 struct i596_cmd *prev = ptr;
1359 ptr->command &= 0x1fff;
1361 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1364 if ((lp->cmd_head != NULL))
1365 ack_cmd |= CUC_START;
1366 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1367 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1369 if ((status & 0x1000) || (status & 0x4000)) {
1370 if ((status & 0x4000))
1371 DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
1373 /* Only RX_START if stopped - RGH 07-07-96 */
1374 if (status & 0x1000) {
1375 if (netif_running(dev)) {
1376 DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1377 ack_cmd |= RX_START;
1378 lp->stats.rx_errors++;
1379 lp->stats.rx_fifo_errors++;
1380 rebuild_rx_bufs(dev);
1384 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1385 lp->scb.command = ack_cmd;
1386 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1388 /* DANGER: I suspect that some kind of interrupt
1389 acknowledgement aside from acking the 82596 might be needed
1390 here... but it's running acceptably without */
1394 wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
1395 DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
1397 spin_unlock (&lp->lock);
1401 static int i596_close(struct net_device *dev)
1403 struct i596_private *lp = dev->priv;
1404 unsigned long flags;
1406 netif_stop_queue(dev);
1408 DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
1409 dev->name, lp->scb.status));
1411 spin_lock_irqsave(&lp->lock, flags);
1413 wait_cmd(dev, lp, 100, "close1 timed out");
1414 lp->scb.command = CUC_ABORT | RX_ABORT;
1415 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1419 wait_cmd(dev, lp, 100, "close2 timed out");
1420 spin_unlock_irqrestore(&lp->lock, flags);
1421 DEB(DEB_STRUCT,i596_display_data(dev));
1422 i596_cleanup_cmd(dev,lp);
1424 disable_irq(dev->irq);
1426 free_irq(dev->irq, dev);
1427 remove_rx_bufs(dev);
1432 static struct net_device_stats *
1433 i596_get_stats(struct net_device *dev)
1435 struct i596_private *lp = dev->priv;
1441 * Set or clear the multicast filter for this adaptor.
1444 static void set_multicast_list(struct net_device *dev)
1446 struct i596_private *lp = dev->priv;
1447 int config = 0, cnt;
1449 DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1450 dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
1451 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1453 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1454 lp->cf_cmd.i596_config[8] |= 0x01;
1457 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1458 lp->cf_cmd.i596_config[8] &= ~0x01;
1461 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1462 lp->cf_cmd.i596_config[11] &= ~0x20;
1465 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1466 lp->cf_cmd.i596_config[11] |= 0x20;
1470 if (lp->cf_cmd.cmd.command)
1471 printk("%s: config change request already queued\n",
1474 lp->cf_cmd.cmd.command = CmdConfigure;
1475 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1476 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1480 cnt = dev->mc_count;
1481 if (cnt > MAX_MC_CNT)
1484 printk("%s: Only %d multicast addresses supported",
1488 if (dev->mc_count > 0) {
1489 struct dev_mc_list *dmi;
1494 cmd->cmd.command = CmdMulticastList;
1495 cmd->mc_cnt = dev->mc_count * 6;
1497 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1498 memcpy(cp, dmi->dmi_addr, 6);
1500 DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1501 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1503 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1504 i596_add_cmd(dev, &cmd->cmd);
1508 MODULE_PARM(debug, "i");
1509 MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1510 static int debug = -1;
1512 static int num_drivers;
1513 static struct net_device *netdevs[MAX_DRIVERS];
1515 static int __devinit
1516 lan_init_chip(struct parisc_device *dev)
1518 struct net_device *netdevice;
1521 if (num_drivers >= MAX_DRIVERS) {
1522 /* max count of possible i82596 drivers reached */
1526 if (num_drivers == 0)
1527 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
1530 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
1531 __FILE__, dev->hpa);
1535 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
1537 netdevice = alloc_etherdev(0);
1541 netdevice->base_addr = dev->hpa;
1542 netdevice->irq = dev->irq;
1544 retval = i82596_probe(netdevice, &dev->dev);
1546 free_netdev(netdevice);
1550 if (dev->id.sversion == 0x72) {
1551 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1554 netdevs[num_drivers++] = netdevice;
1560 static struct parisc_device_id lan_tbl[] = {
1561 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1562 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1566 MODULE_DEVICE_TABLE(parisc, lan_tbl);
1568 static struct parisc_driver lan_driver = {
1570 .id_table = lan_tbl,
1571 .probe = lan_init_chip,
1574 static int __devinit lasi_82596_init(void)
1578 return register_parisc_driver(&lan_driver);
1581 module_init(lasi_82596_init);
1583 static void __exit lasi_82596_exit(void)
1587 for (i=0; i<MAX_DRIVERS; i++) {
1588 struct i596_private *lp;
1589 struct net_device *netdevice;
1591 netdevice = netdevs[i];
1595 unregister_netdev(netdevice);
1597 lp = netdevice->priv;
1598 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1599 (void *)netdevice->mem_start, lp->dma_addr);
1600 free_netdev(netdevice);
1604 unregister_parisc_driver(&lan_driver);
1607 module_exit(lasi_82596_exit);